diff --git a/.gitignore b/.gitignore index 101d69721..9e5d33ae8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ tmp-* .hack/devnet/generated-** .hack/devnet/custom-** +cmd/statetransition-test/ + diff --git a/.hack/devnet/run.sh b/.hack/devnet/run.sh index 330c8d115..9ffab3fc4 100755 --- a/.hack/devnet/run.sh +++ b/.hack/devnet/run.sh @@ -147,6 +147,10 @@ indexer: cachePersistenceDelay: 8 disableIndexWriter: false syncEpochCooldown: 1 + stateCache: + enabled: true + path: "${__dir}/generated-state-cache" + maxStates: 5 executionIndexer: enabled: true retention: 4368h diff --git a/blockdb/blockdb.go b/blockdb/blockdb.go index 96a379863..ba8e74c69 100644 --- a/blockdb/blockdb.go +++ b/blockdb/blockdb.go @@ -4,23 +4,25 @@ import ( "context" "fmt" + "github.com/sirupsen/logrus" + "github.com/ethpandaops/dora/blockdb/pebble" "github.com/ethpandaops/dora/blockdb/s3" + "github.com/ethpandaops/dora/blockdb/tiered" "github.com/ethpandaops/dora/blockdb/types" dtypes "github.com/ethpandaops/dora/types" ) -// BlockDb wraps the underlying storage engine for both beacon block data -// and execution data. +// BlockDb is the main wrapper for block database operations. type BlockDb struct { engine types.BlockDbEngine execEngine types.ExecDataEngine // nil if engine doesn't support exec data } -// GlobalBlockDb is the global singleton BlockDb instance. +// GlobalBlockDb is the global block database instance. var GlobalBlockDb *BlockDb -// InitWithPebble initializes the global BlockDb with a Pebble backend. +// InitWithPebble initializes the block database with Pebble (local) storage. func InitWithPebble(config dtypes.PebbleBlockDBConfig) error { engine, err := pebble.NewPebbleEngine(config) if err != nil { @@ -41,7 +43,7 @@ func InitWithPebble(config dtypes.PebbleBlockDBConfig) error { return nil } -// InitWithS3 initializes the global BlockDb with an S3 backend. +// InitWithS3 initializes the block database with S3 (remote) storage. func InitWithS3(config dtypes.S3BlockDBConfig) error { engine, err := s3.NewS3Engine(config) if err != nil { @@ -62,6 +64,27 @@ func InitWithS3(config dtypes.S3BlockDBConfig) error { return nil } +// InitWithTiered initializes the block database with tiered storage (Pebble cache + S3 backend). +func InitWithTiered(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) error { + engine, err := tiered.NewTieredEngine(config, logger) + if err != nil { + return err + } + + db := &BlockDb{ + engine: engine, + } + + // Check if tiered engine supports exec data + if execEngine, ok := engine.(types.ExecDataEngine); ok { + db.execEngine = execEngine + } + + GlobalBlockDb = db + + return nil +} + // GetEngine returns the underlying storage engine. func (db *BlockDb) GetEngine() types.BlockDbEngine { return db.engine @@ -71,22 +94,59 @@ func (db *BlockDb) Close() error { return db.engine.Close() } -func (db *BlockDb) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { - return db.engine.GetBlock(ctx, slot, root, parseBlock) -} - -func (db *BlockDb) AddBlock(ctx context.Context, slot uint64, root []byte, header_ver uint64, header_data []byte, body_ver uint64, body_data []byte) (bool, error) { +// GetBlock retrieves block data with selective loading based on flags. +func (db *BlockDb) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + return db.engine.GetBlock(ctx, slot, root, flags, parseBlock, parsePayload) +} + +// GetStoredComponents returns which components exist for a block. +func (db *BlockDb) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + return db.engine.GetStoredComponents(ctx, slot, root) +} + +// AddBlock stores block data. Returns (added, updated, error). +func (db *BlockDb) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + headerVer uint64, + headerData []byte, + bodyVer uint64, + bodyData []byte, + payloadVer uint64, + payloadData []byte, + balVer uint64, + balData []byte, +) (bool, bool, error) { return db.engine.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { return &types.BlockData{ - HeaderVersion: header_ver, - HeaderData: header_data, - BodyVersion: body_ver, - BodyData: body_data, + HeaderVersion: headerVer, + HeaderData: headerData, + BodyVersion: bodyVer, + BodyData: bodyData, + PayloadVersion: payloadVer, + PayloadData: payloadData, + BalVersion: balVer, + BalData: balData, }, nil }) } -func (db *BlockDb) AddBlockWithCallback(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { +// AddBlockWithCallback stores block data using a callback for deferred data loading. +// Returns (added, updated, error). +func (db *BlockDb) AddBlockWithCallback( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { return db.engine.AddBlock(ctx, slot, root, dataCb) } diff --git a/blockdb/pebble/cleanup.go b/blockdb/pebble/cleanup.go new file mode 100644 index 000000000..5a3cf787e --- /dev/null +++ b/blockdb/pebble/cleanup.go @@ -0,0 +1,439 @@ +package pebble + +import ( + "context" + "encoding/binary" + "sort" + "sync" + "time" + + "github.com/cockroachdb/pebble" + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/blockdb/types" + dtypes "github.com/ethpandaops/dora/types" +) + +const ( + // KeyNamespaceLRU is the namespace for LRU tracking data. + KeyNamespaceLRU uint16 = 2 + + // LRU value format: [headerAccess (8B)] [bodyAccess (8B)] [payloadAccess (8B)] [balAccess (8B)] + // Each access time is a Unix nanosecond timestamp, 0 means never accessed. + lruValueSize = 32 + + // Maximum number of LRU updates to buffer before forcing a flush. + maxLRUBufferSize = 1000 +) + +// CacheCleanup manages background cleanup of cached data. +type CacheCleanup struct { + engine *PebbleEngine + config dtypes.PebbleBlockDBConfig + logger logrus.FieldLogger + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // LRU update buffer + lruMu sync.Mutex + lruBuffer map[string]*lruUpdate // root hex -> update +} + +// lruUpdate holds pending LRU timestamp updates for a block. +type lruUpdate struct { + root []byte + headerAccess int64 // Unix nano, 0 = no update + bodyAccess int64 + payloadAccess int64 + balAccess int64 +} + +// NewCacheCleanup creates a new cache cleanup manager. +func NewCacheCleanup(engine *PebbleEngine, logger logrus.FieldLogger) *CacheCleanup { + ctx, cancel := context.WithCancel(context.Background()) + + return &CacheCleanup{ + engine: engine, + config: engine.GetConfig(), + logger: logger.WithField("component", "pebble-cleanup"), + ctx: ctx, + cancel: cancel, + lruBuffer: make(map[string]*lruUpdate, 100), + } +} + +// Start begins the background cleanup loop. +func (c *CacheCleanup) Start() { + if c.config.CleanupInterval == 0 { + c.logger.Info("cleanup disabled (interval is 0)") + return + } + + c.wg.Add(1) + go c.runCleanupLoop() +} + +// Stop stops the background cleanup loop. +func (c *CacheCleanup) Stop() { + c.cancel() + c.wg.Wait() + + // Final flush of LRU buffer + c.FlushLRU() +} + +// runCleanupLoop runs the periodic cleanup. +func (c *CacheCleanup) runCleanupLoop() { + defer c.wg.Done() + + ticker := time.NewTicker(c.config.CleanupInterval) + defer ticker.Stop() + + for { + select { + case <-c.ctx.Done(): + return + case <-ticker.C: + c.FlushLRU() + c.runCleanup() + } + } +} + +// RecordAccess records an access for LRU tracking. Buffered until flush. +func (c *CacheCleanup) RecordAccess(root []byte, flags types.BlockDataFlags) { + c.lruMu.Lock() + defer c.lruMu.Unlock() + + key := string(root) + now := time.Now().UnixNano() + + update, exists := c.lruBuffer[key] + if !exists { + rootCopy := make([]byte, len(root)) + copy(rootCopy, root) + update = &lruUpdate{root: rootCopy} + c.lruBuffer[key] = update + } + + if flags.Has(types.BlockDataFlagHeader) { + update.headerAccess = now + } + if flags.Has(types.BlockDataFlagBody) { + update.bodyAccess = now + } + if flags.Has(types.BlockDataFlagPayload) { + update.payloadAccess = now + } + if flags.Has(types.BlockDataFlagBal) { + update.balAccess = now + } + + // Force flush if buffer is too large + if len(c.lruBuffer) >= maxLRUBufferSize { + c.flushLRULocked() + } +} + +// FlushLRU flushes buffered LRU updates to Pebble. +func (c *CacheCleanup) FlushLRU() { + c.lruMu.Lock() + defer c.lruMu.Unlock() + c.flushLRULocked() +} + +// flushLRULocked flushes LRU buffer (must hold lruMu). +func (c *CacheCleanup) flushLRULocked() { + if len(c.lruBuffer) == 0 { + return + } + + db := c.engine.GetDB() + batch := db.NewBatch() + + for _, update := range c.lruBuffer { + key := makeLRUKey(update.root) + + // Read existing LRU data + existing := make([]byte, lruValueSize) + if res, closer, err := db.Get(key); err == nil { + if len(res) >= lruValueSize { + copy(existing, res) + } + closer.Close() + } + + // Merge updates (only update non-zero values) + value := make([]byte, lruValueSize) + copy(value, existing) + + if update.headerAccess > 0 { + binary.BigEndian.PutUint64(value[0:8], uint64(update.headerAccess)) + } + if update.bodyAccess > 0 { + binary.BigEndian.PutUint64(value[8:16], uint64(update.bodyAccess)) + } + if update.payloadAccess > 0 { + binary.BigEndian.PutUint64(value[16:24], uint64(update.payloadAccess)) + } + if update.balAccess > 0 { + binary.BigEndian.PutUint64(value[24:32], uint64(update.balAccess)) + } + + batch.Set(key, value, nil) + } + + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to flush LRU updates: %v", err) + } + batch.Close() + + // Clear buffer + c.lruBuffer = make(map[string]*lruUpdate, 100) +} + +// makeLRUKey creates the key for LRU data. +func makeLRUKey(root []byte) []byte { + key := make([]byte, 2+len(root)) + binary.BigEndian.PutUint16(key[:2], KeyNamespaceLRU) + copy(key[2:], root) + return key +} + +// runCleanup performs cleanup for all configured component types. +func (c *CacheCleanup) runCleanup() { + c.logger.Debug("starting cache cleanup") + + componentConfigs := map[uint16]*dtypes.BlockDbRetentionConfig{ + BlockTypeHeader: &c.config.HeaderRetention, + BlockTypeBody: &c.config.BodyRetention, + BlockTypePayload: &c.config.PayloadRetention, + BlockTypeBal: &c.config.BalRetention, + } + + for blockType, config := range componentConfigs { + if config == nil || !config.Enabled { + continue + } + + switch config.CleanupMode { + case "age": + c.cleanupByAge(blockType, config.RetentionTime) + case "lru": + c.cleanupByLRU(blockType, config.MaxSize*1024*1024) // Convert MB to bytes + } + } +} + +// cleanupByAge removes entries older than the retention time based on storage timestamp. +func (c *CacheCleanup) cleanupByAge(blockType uint16, retention time.Duration) { + if retention == 0 { + return + } + + cutoff := time.Now().Add(-retention) + deleted := 0 + + db := c.engine.GetDB() + iter, err := db.NewIter(&pebble.IterOptions{}) + if err != nil { + c.logger.Errorf("failed to create iterator: %v", err) + return + } + defer iter.Close() + + batch := db.NewBatch() + defer batch.Close() + + for iter.First(); iter.Valid(); iter.Next() { + key := iter.Key() + + // Check if this key is in the block namespace + if len(key) < 36 { // 2 (namespace) + 32 (root) + 2 (type) + continue + } + + namespace := binary.BigEndian.Uint16(key[:2]) + if namespace != KeyNamespaceBlock { + continue + } + + keyType := binary.BigEndian.Uint16(key[len(key)-2:]) + if keyType != blockType { + continue + } + + // Check timestamp from value (stored at offset 8) + value := iter.Value() + if len(value) < valueHeaderSize { + continue + } + + timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(value[8:16]))) + if timestamp.Before(cutoff) { + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + batch.Delete(keyCopy, nil) + deleted++ + } + } + + if deleted > 0 { + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to commit age cleanup batch: %v", err) + } else { + c.logger.Infof("cleaned up %d entries for block type %d (age-based)", deleted, blockType) + } + } +} + +// lruEntry represents an entry for LRU cleanup sorting. +type lruEntry struct { + root []byte + key []byte + size int64 + lastAccess int64 +} + +// cleanupByLRU removes least recently used entries when size exceeds limit. +func (c *CacheCleanup) cleanupByLRU(blockType uint16, maxSize int64) { + if maxSize == 0 { + return + } + + db := c.engine.GetDB() + + // First pass: collect all entries with their sizes and LRU timestamps + entries := make([]*lruEntry, 0, 1000) + var totalSize int64 + + iter, err := db.NewIter(&pebble.IterOptions{}) + if err != nil { + c.logger.Errorf("failed to create iterator: %v", err) + return + } + + // Scan block entries + for iter.First(); iter.Valid(); iter.Next() { + key := iter.Key() + + if len(key) < 36 { + continue + } + + namespace := binary.BigEndian.Uint16(key[:2]) + if namespace != KeyNamespaceBlock { + continue + } + + keyType := binary.BigEndian.Uint16(key[len(key)-2:]) + if keyType != blockType { + continue + } + + // Extract root from key + root := key[2 : len(key)-2] + value := iter.Value() + size := int64(len(value)) + totalSize += size + + // Get LRU timestamp for this entry + lastAccess := c.getLRUTimestamp(db, root, blockType) + + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + rootCopy := make([]byte, len(root)) + copy(rootCopy, root) + + entries = append(entries, &lruEntry{ + root: rootCopy, + key: keyCopy, + size: size, + lastAccess: lastAccess, + }) + } + iter.Close() + + // Check if we need to clean up + if totalSize <= maxSize { + return + } + + // Sort by last access time (oldest first, 0 = never accessed = oldest) + sort.Slice(entries, func(i, j int) bool { + return entries[i].lastAccess < entries[j].lastAccess + }) + + // Delete oldest entries until we're under the limit + batch := db.NewBatch() + defer batch.Close() + + deleted := 0 + freedSize := int64(0) + targetFree := totalSize - maxSize + + for _, entry := range entries { + if freedSize >= targetFree { + break + } + + batch.Delete(entry.key, nil) + freedSize += entry.size + deleted++ + } + + if deleted > 0 { + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to commit LRU cleanup batch: %v", err) + } else { + c.logger.Infof("cleaned up %d entries for block type %d (LRU-based, freed %d bytes)", + deleted, blockType, freedSize) + } + } +} + +// getLRUTimestamp retrieves the LRU timestamp for a specific component. +func (c *CacheCleanup) getLRUTimestamp(db *pebble.DB, root []byte, blockType uint16) int64 { + key := makeLRUKey(root) + + res, closer, err := db.Get(key) + if err != nil { + return 0 // Never accessed + } + defer closer.Close() + + if len(res) < lruValueSize { + return 0 + } + + // Extract timestamp based on block type + var offset int + switch blockType { + case BlockTypeHeader: + offset = 0 + case BlockTypeBody: + offset = 8 + case BlockTypePayload: + offset = 16 + case BlockTypeBal: + offset = 24 + default: + return 0 + } + + return int64(binary.BigEndian.Uint64(res[offset : offset+8])) +} + +// DeleteLRU removes LRU data for a block (call when deleting block data). +func (c *CacheCleanup) DeleteLRU(root []byte) { + db := c.engine.GetDB() + key := makeLRUKey(root) + db.Delete(key, nil) + + // Also remove from buffer + c.lruMu.Lock() + delete(c.lruBuffer, string(root)) + c.lruMu.Unlock() +} diff --git a/blockdb/pebble/pebble.go b/blockdb/pebble/pebble.go index 3418912a9..985cc9e1f 100644 --- a/blockdb/pebble/pebble.go +++ b/blockdb/pebble/pebble.go @@ -3,6 +3,8 @@ package pebble import ( "context" "encoding/binary" + "fmt" + "time" "github.com/cockroachdb/pebble" "github.com/ethpandaops/dora/blockdb/types" @@ -14,12 +16,18 @@ const ( ) const ( - BlockTypeHeader uint16 = 1 - BlockTypeBody uint16 = 2 + BlockTypeHeader uint16 = 1 + BlockTypeBody uint16 = 2 + BlockTypePayload uint16 = 3 + BlockTypeBal uint16 = 4 ) +// Value format: [version (8 bytes)] [timestamp (8 bytes)] [data] +const valueHeaderSize = 16 + type PebbleEngine struct { - db *pebble.DB + db *pebble.DB + config dtypes.PebbleBlockDBConfig } func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, error) { @@ -34,7 +42,8 @@ func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, er } return &PebbleEngine{ - db: db, + db: db, + config: config, }, nil } @@ -44,148 +53,248 @@ func (e *PebbleEngine) GetDB() *pebble.DB { } func (e *PebbleEngine) Close() error { - err := e.db.Close() - if err != nil { - return err - } - - return nil + return e.db.Close() } -func (e *PebbleEngine) getBlockHeader(root []byte) ([]byte, uint64, error) { +// makeKey creates a key for the given root and block type. +func makeKey(root []byte, blockType uint16) []byte { key := make([]byte, 2+len(root)+2) binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader) + binary.BigEndian.PutUint16(key[2+len(root):], blockType) + return key +} + +// getComponent retrieves a single component from the database. +// Returns (data, version, timestamp, error). Returns nil data if not found. +func (e *PebbleEngine) getComponent(root []byte, blockType uint16) ([]byte, uint64, time.Time, error) { + key := makeKey(root, blockType) res, closer, err := e.db.Get(key) - if err != nil && err != pebble.ErrNotFound { - return nil, 0, err + if err == pebble.ErrNotFound { + return nil, 0, time.Time{}, nil + } + if err != nil { + return nil, 0, time.Time{}, err } defer closer.Close() - if err == pebble.ErrNotFound || len(res) == 0 { - return nil, 0, nil + if len(res) < valueHeaderSize { + return nil, 0, time.Time{}, nil } version := binary.BigEndian.Uint64(res[:8]) - header := make([]byte, len(res)-8) - copy(header, res[8:]) + timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(res[8:16]))) - return header, version, nil + data := make([]byte, len(res)-valueHeaderSize) + copy(data, res[valueHeaderSize:]) + + return data, version, timestamp, nil } -func (e *PebbleEngine) getBlockBody(root []byte, parser func(uint64, []byte) (interface{}, error)) (interface{}, uint64, error) { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody) +// setComponent stores a single component in the database. +func (e *PebbleEngine) setComponent(root []byte, blockType uint16, version uint64, data []byte) error { + key := makeKey(root, blockType) - res, closer, err := e.db.Get(key) - if err != nil && err != pebble.ErrNotFound { - return nil, 0, err - } - defer closer.Close() + value := make([]byte, valueHeaderSize+len(data)) + binary.BigEndian.PutUint64(value[:8], version) + binary.BigEndian.PutUint64(value[8:16], uint64(time.Now().UnixNano())) + copy(value[valueHeaderSize:], data) - if err == pebble.ErrNotFound || len(res) == 0 { - return nil, 0, nil + return e.db.Set(key, value, nil) +} + +// componentExists checks if a component exists in the database. +func (e *PebbleEngine) componentExists(root []byte, blockType uint16) bool { + key := makeKey(root, blockType) + + res, closer, err := e.db.Get(key) + if err == nil && len(res) >= valueHeaderSize { + closer.Close() + return true } + return false +} - version := binary.BigEndian.Uint64(res[:8]) - block := res[8:] +// GetStoredComponents returns which components exist for a block. +func (e *PebbleEngine) GetStoredComponents(_ context.Context, _ uint64, root []byte) (types.BlockDataFlags, error) { + var flags types.BlockDataFlags - body, err := parser(version, block) - if err != nil { - return nil, 0, err + if e.componentExists(root, BlockTypeHeader) { + flags |= types.BlockDataFlagHeader + } + if e.componentExists(root, BlockTypeBody) { + flags |= types.BlockDataFlagBody + } + if e.componentExists(root, BlockTypePayload) { + flags |= types.BlockDataFlagPayload + } + if e.componentExists(root, BlockTypeBal) { + flags |= types.BlockDataFlagBal } - return body, version, nil + return flags, nil } -func (e *PebbleEngine) GetBlock(_ context.Context, _ uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { - header, header_ver, err := e.getBlockHeader(root) - if err != nil { - return nil, err +// GetBlock retrieves block data with selective loading based on flags. +// Note: LRU access tracking should be done by the caller via CacheCleanup.RecordAccess() +// to avoid expensive read-modify-write operations on every access. +func (e *PebbleEngine) GetBlock( + _ context.Context, + _ uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + blockData := &types.BlockData{} + + // Load header if requested + if flags.Has(types.BlockDataFlagHeader) { + data, version, _, err := e.getComponent(root, BlockTypeHeader) + if err != nil { + return nil, fmt.Errorf("failed to get header: %w", err) + } + if data != nil { + blockData.HeaderVersion = version + blockData.HeaderData = data + } } - blockData := &types.BlockData{ - HeaderVersion: header_ver, - HeaderData: header, - } + // Load body if requested + if flags.Has(types.BlockDataFlagBody) { + data, version, _, err := e.getComponent(root, BlockTypeBody) + if err != nil { + return nil, fmt.Errorf("failed to get body: %w", err) + } - if parseBlock == nil { - parseBlock = func(version uint64, block []byte) (interface{}, error) { - blockData.BodyData = make([]byte, len(block)) - copy(blockData.BodyData, block) - return nil, nil + if data != nil { + blockData.BodyVersion = version + if parseBlock != nil { + body, err := parseBlock(version, data) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = data + } } } - body, body_ver, err := e.getBlockBody(root, parseBlock) - if err != nil { - return nil, err - } + // Load payload if requested + if flags.Has(types.BlockDataFlagPayload) { + data, version, _, err := e.getComponent(root, BlockTypePayload) + if err != nil { + return nil, fmt.Errorf("failed to get payload: %w", err) + } - blockData.Body = body - blockData.BodyVersion = body_ver + if data != nil { + blockData.PayloadVersion = version + if parsePayload != nil { + payload, err := parsePayload(version, data) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = data + } + } + } - return blockData, nil -} + // Load BAL if requested + if flags.Has(types.BlockDataFlagBal) { + data, version, _, err := e.getComponent(root, BlockTypeBal) + if err != nil { + return nil, fmt.Errorf("failed to get BAL: %w", err) + } -func (e *PebbleEngine) checkBlock(key []byte) bool { - res, closer, err := e.db.Get(key) - if err == nil && len(res) > 0 { - closer.Close() - return true + if data != nil { + blockData.BalVersion = version + blockData.BalData = data + } } - return false + return blockData, nil } -func (e *PebbleEngine) addBlockHeader(key []byte, version uint64, header []byte) error { - data := make([]byte, 8+len(header)) - binary.BigEndian.PutUint64(data[:8], version) +// AddBlock stores block data. Returns (added, updated, error). +// - added: true if a new block was created +// - updated: true if an existing block was updated with new components +func (e *PebbleEngine) AddBlock( + _ context.Context, + _ uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { + // Check what components already exist + existingFlags, err := e.GetStoredComponents(context.Background(), 0, root) + if err != nil { + return false, false, fmt.Errorf("failed to check existing components: %w", err) + } - return e.db.Set(key, data, nil) -} + // Get the new data + blockData, err := dataCb() + if err != nil { + return false, false, err + } -func (e *PebbleEngine) addBlockBody(root []byte, version uint64, block []byte) error { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody) + // Determine what new components we have + var newFlags types.BlockDataFlags + if len(blockData.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(blockData.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if blockData.BalVersion != 0 && len(blockData.BalData) > 0 { + newFlags |= types.BlockDataFlagBal + } - data := make([]byte, 8+len(block)) - binary.BigEndian.PutUint64(data[:8], version) - copy(data[8:], block) + // Calculate components to add (new components not in existing) + toAdd := newFlags &^ existingFlags - return e.db.Set(key, data, nil) -} + if toAdd == 0 { + // Nothing new to add + return false, false, nil + } -func (e *PebbleEngine) AddBlock(_ context.Context, _ uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader) + isNew := existingFlags == 0 + isUpdated := !isNew - if e.checkBlock(key) { - return false, nil + // Store new components + if toAdd.Has(types.BlockDataFlagHeader) { + if err := e.setComponent(root, BlockTypeHeader, blockData.HeaderVersion, blockData.HeaderData); err != nil { + return false, false, fmt.Errorf("failed to store header: %w", err) + } } - blockData, err := dataCb() - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagBody) { + if err := e.setComponent(root, BlockTypeBody, blockData.BodyVersion, blockData.BodyData); err != nil { + return false, false, fmt.Errorf("failed to store body: %w", err) + } } - err = e.addBlockHeader(key, blockData.HeaderVersion, blockData.HeaderData) - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagPayload) { + if err := e.setComponent(root, BlockTypePayload, blockData.PayloadVersion, blockData.PayloadData); err != nil { + return false, false, fmt.Errorf("failed to store payload: %w", err) + } } - err = e.addBlockBody(root, blockData.BodyVersion, blockData.BodyData) - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagBal) { + if err := e.setComponent(root, BlockTypeBal, blockData.BalVersion, blockData.BalData); err != nil { + return false, false, fmt.Errorf("failed to store BAL: %w", err) + } } - return true, nil + return isNew, isUpdated, nil +} + +// GetConfig returns the engine configuration. +func (e *PebbleEngine) GetConfig() dtypes.PebbleBlockDBConfig { + return e.config } diff --git a/blockdb/s3/format.go b/blockdb/s3/format.go new file mode 100644 index 000000000..378fc5e18 --- /dev/null +++ b/blockdb/s3/format.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "encoding/binary" + "fmt" + + "github.com/ethpandaops/go-eth2-client/spec" + + "github.com/ethpandaops/dora/blockdb/types" +) + +// Object format versions: +// v1: header + body (pre-gloas blocks) +// v2: header + body + payload + bal (gloas+ blocks, payload/BAL introduced in same fork) +// +// Note: Both payload and BAL may be empty (length 0), but body is always required. + +// Metadata sizes by version +const ( + metadataSizeV1 = 16 // 4 (version) + 4 (headerLen) + 4 (bodyVer) + 4 (bodyLen) + metadataSizeV2 = 32 // v1 + 4 (payloadVer) + 4 (payloadLen) + 4 (balVer) + 4 (balLen) + + // Maximum metadata size for initial read + maxMetadataSize = 64 +) + +// objectMetadata represents the metadata for all format versions. +type objectMetadata struct { + ObjVersion uint32 + + // Header (always present) + HeaderLength uint32 + + // Body (always required) + BodyVersion uint32 + BodyLength uint32 + + // Payload (v2+, may be empty) + PayloadVersion uint32 + PayloadLength uint32 + + // BAL (v2+, may be empty) + BalVersion uint32 + BalLength uint32 +} + +// metadataSize returns the metadata size for this object. +func (m *objectMetadata) metadataSize() int { + switch m.ObjVersion { + case 1: + return metadataSizeV1 + case 2: + return metadataSizeV2 + default: + return metadataSizeV2 + } +} + +// headerOffset returns the byte offset of the header data. +func (m *objectMetadata) headerOffset() int { + return m.metadataSize() +} + +// bodyOffset returns the byte offset of the body data. +func (m *objectMetadata) bodyOffset() int { + return m.metadataSize() + int(m.HeaderLength) +} + +// payloadOffset returns the byte offset of the payload data. +func (m *objectMetadata) payloadOffset() int { + return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) +} + +// balOffset returns the byte offset of the BAL data. +func (m *objectMetadata) balOffset() int { + return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) + int(m.PayloadLength) +} + +// storedFlags returns which components are stored in this object. +func (m *objectMetadata) storedFlags() types.BlockDataFlags { + var flags types.BlockDataFlags + + if m.HeaderLength > 0 { + flags |= types.BlockDataFlagHeader + } + if m.BodyLength > 0 { + flags |= types.BlockDataFlagBody + } + if m.PayloadLength > 0 && m.ObjVersion >= 2 { + flags |= types.BlockDataFlagPayload + } + if m.BalLength > 0 && m.ObjVersion >= 2 { + flags |= types.BlockDataFlagBal + } + + return flags +} + +// readObjectMetadata reads metadata from any format version. +func readObjectMetadata(data []byte) (*objectMetadata, error) { + if len(data) < 4 { + return nil, fmt.Errorf("data too short for metadata version") + } + + version := binary.BigEndian.Uint32(data[:4]) + meta := &objectMetadata{ObjVersion: version} + + switch version { + case 1: + if len(data) < metadataSizeV1 { + return nil, fmt.Errorf("data too short for v1 metadata: need %d, got %d", metadataSizeV1, len(data)) + } + meta.HeaderLength = binary.BigEndian.Uint32(data[4:8]) + meta.BodyVersion = binary.BigEndian.Uint32(data[8:12]) + meta.BodyLength = binary.BigEndian.Uint32(data[12:16]) + + case 2: + if len(data) < metadataSizeV2 { + return nil, fmt.Errorf("data too short for v2 metadata: need %d, got %d", metadataSizeV2, len(data)) + } + meta.HeaderLength = binary.BigEndian.Uint32(data[4:8]) + meta.BodyVersion = binary.BigEndian.Uint32(data[8:12]) + meta.BodyLength = binary.BigEndian.Uint32(data[12:16]) + meta.PayloadVersion = binary.BigEndian.Uint32(data[16:20]) + meta.PayloadLength = binary.BigEndian.Uint32(data[20:24]) + meta.BalVersion = binary.BigEndian.Uint32(data[24:28]) + meta.BalLength = binary.BigEndian.Uint32(data[28:32]) + + default: + return nil, fmt.Errorf("unsupported object version: %d", version) + } + + return meta, nil +} + +// writeObjectMetadata creates metadata bytes for the given BlockData. +// Uses v1 format for pre-gloas blocks, v2 for gloas+ blocks. +func writeObjectMetadata(data *types.BlockData) []byte { + // Use v2 format only for gloas+ blocks (which can have payload/BAL) + if data.BodyVersion >= uint64(spec.DataVersionGloas) { + meta := make([]byte, metadataSizeV2) + binary.BigEndian.PutUint32(meta[0:4], 2) + binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData))) + binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion)) + binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData))) + binary.BigEndian.PutUint32(meta[16:20], uint32(data.PayloadVersion)) + binary.BigEndian.PutUint32(meta[20:24], uint32(len(data.PayloadData))) + binary.BigEndian.PutUint32(meta[24:28], uint32(data.BalVersion)) + binary.BigEndian.PutUint32(meta[28:32], uint32(len(data.BalData))) + return meta + } + + // Use v1 format for pre-gloas blocks + meta := make([]byte, metadataSizeV1) + binary.BigEndian.PutUint32(meta[0:4], 1) + binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData))) + binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion)) + binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData))) + return meta +} + +// getDataRange calculates the single byte range spanning all requested components. +// Returns (start, end) where end is inclusive. Returns (-1, -1) if no data to fetch. +func (m *objectMetadata) getDataRange(flags types.BlockDataFlags) (int64, int64) { + var start int64 = -1 + var end int64 = -1 + + // Check each component in order (they're stored sequentially) + if flags.Has(types.BlockDataFlagHeader) && m.HeaderLength > 0 { + start = int64(m.headerOffset()) + end = start + int64(m.HeaderLength) - 1 + } + + if flags.Has(types.BlockDataFlagBody) && m.BodyLength > 0 { + bodyStart := int64(m.bodyOffset()) + bodyEnd := bodyStart + int64(m.BodyLength) - 1 + if start < 0 { + start = bodyStart + } + end = bodyEnd + } + + if flags.Has(types.BlockDataFlagPayload) && m.PayloadLength > 0 && m.ObjVersion >= 2 { + payloadStart := int64(m.payloadOffset()) + payloadEnd := payloadStart + int64(m.PayloadLength) - 1 + if start < 0 { + start = payloadStart + } + end = payloadEnd + } + + if flags.Has(types.BlockDataFlagBal) && m.BalLength > 0 && m.ObjVersion >= 2 { + balStart := int64(m.balOffset()) + balEnd := balStart + int64(m.BalLength) - 1 + if start < 0 { + start = balStart + } + end = balEnd + } + + return start, end +} diff --git a/blockdb/s3/s3store.go b/blockdb/s3/s3store.go index 69e240c93..8fea67501 100644 --- a/blockdb/s3/s3store.go +++ b/blockdb/s3/s3store.go @@ -3,7 +3,6 @@ package s3 import ( "bytes" "context" - "encoding/binary" "encoding/hex" "fmt" "io" @@ -13,6 +12,7 @@ import ( "github.com/ethpandaops/dora/blockdb/types" dtypes "github.com/ethpandaops/dora/types" + "github.com/ethpandaops/go-eth2-client/spec" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" ) @@ -21,6 +21,10 @@ type S3Engine struct { client *minio.Client bucket string pathPrefix string + config dtypes.S3BlockDBConfig + + // Range request support (configured via EnableRangeRequests) + rangeRequestsEnabled bool // Operation counters getCount atomic.Int64 @@ -74,9 +78,11 @@ func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) { } engine := &S3Engine{ - client: client, - bucket: config.Bucket, - pathPrefix: strings.TrimPrefix(config.Path, "/"), + client: client, + bucket: config.Bucket, + pathPrefix: strings.TrimPrefix(config.Path, "/"), + config: config, + rangeRequestsEnabled: config.EnableRangeRequests, } return engine, nil @@ -91,149 +97,406 @@ func (e *S3Engine) getObjectKey(root []byte, slot uint64) string { return path.Join(e.pathPrefix, fmt.Sprintf("%06d", slot/10000), fmt.Sprintf("%010d_%s", slot, rootHex)) } -type objectMetadata struct { - objVersion uint32 - headerLength uint32 - bodyVersion uint32 - bodyLength uint32 +// GetStoredComponents returns which components exist for a block by reading metadata. +func (e *S3Engine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + key := e.getObjectKey(root, slot) + e.getCount.Add(1) + + // Read just the metadata + meta, err := e.readMetadata(ctx, key) + if err != nil { + return 0, err + } + if meta == nil { + return 0, nil + } + + return meta.storedFlags(), nil } -func (e *S3Engine) readObjectMetadata(data []byte) (*objectMetadata, int, error) { - metadataLength := 4 - metadata := &objectMetadata{ - objVersion: binary.BigEndian.Uint32(data[:4]), +// readMetadata reads object metadata using range request if enabled, otherwise full read. +func (e *S3Engine) readMetadata(ctx context.Context, key string) (*objectMetadata, error) { + if e.config.EnableRangeRequests { + meta, err := e.readMetadataWithRange(ctx, key) + if err == nil { + return meta, nil + } + // Fall through to full read on error } - switch metadata.objVersion { - case 1: - metadata.headerLength = binary.BigEndian.Uint32(data[4:8]) - metadata.bodyVersion = binary.BigEndian.Uint32(data[8:12]) - metadata.bodyLength = binary.BigEndian.Uint32(data[12:16]) - metadataLength += 12 + // Full read fallback + obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{}) + if err != nil { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { + return nil, nil + } + return nil, fmt.Errorf("failed to get object: %w", err) + } + defer obj.Close() + + buf := make([]byte, maxMetadataSize) + n, err := obj.Read(buf) + if (err != nil && err != io.EOF) || n == 0 { + return nil, fmt.Errorf("failed to read metadata: %w", err) } - return metadata, metadataLength, nil + return readObjectMetadata(buf[:n]) } -func (e *S3Engine) writeObjectMetadata(metadata *objectMetadata) []byte { - data := make([]byte, 4, 16) - binary.BigEndian.PutUint32(data, metadata.objVersion) +// readMetadataWithRange reads metadata using HTTP Range request. +func (e *S3Engine) readMetadataWithRange(ctx context.Context, key string) (*objectMetadata, error) { + opts := minio.GetObjectOptions{} + if err := opts.SetRange(0, int64(maxMetadataSize-1)); err != nil { + return nil, err + } - switch metadata.objVersion { - case 1: - data = binary.BigEndian.AppendUint32(data, metadata.headerLength) - data = binary.BigEndian.AppendUint32(data, metadata.bodyVersion) - data = binary.BigEndian.AppendUint32(data, metadata.bodyLength) + obj, err := e.client.GetObject(ctx, e.bucket, key, opts) + if err != nil { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { + return nil, nil + } + return nil, fmt.Errorf("failed to get object with range: %w", err) + } + defer obj.Close() + + buf := make([]byte, maxMetadataSize) + n, err := obj.Read(buf) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("failed to read range: %w", err) } - return data + return readObjectMetadata(buf[:n]) } -func (e *S3Engine) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { +// GetBlock retrieves block data with selective loading based on flags. +func (e *S3Engine) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { key := e.getObjectKey(root, slot) - e.getCount.Add(1) + // Try range-based loading if enabled + if e.config.EnableRangeRequests && e.rangeRequestsEnabled { + data, err := e.getBlockWithRanges(ctx, key, flags, parseBlock, parsePayload) + if err == nil { + return data, nil + } + // Fall through to full read on error + } + + // Full read fallback + return e.getBlockFull(ctx, key, flags, parseBlock, parsePayload) +} + +// getBlockWithRanges uses a single range request for selective loading. +// Makes exactly 2 GET requests: one for metadata, one for all requested data. +func (e *S3Engine) getBlockWithRanges( + ctx context.Context, + key string, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + // First, get metadata (1 GET request) + meta, err := e.readMetadataWithRange(ctx, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + // Calculate the single byte range spanning all requested components + rangeStart, rangeEnd := meta.getDataRange(flags) + if rangeStart < 0 { + // No data to fetch + return &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), + }, nil + } + + // Fetch all requested data in a single GET request + opts := minio.GetObjectOptions{} + if err := opts.SetRange(rangeStart, rangeEnd); err != nil { + return nil, err + } + + obj, err := e.client.GetObject(ctx, e.bucket, key, opts) + if err != nil { + return nil, fmt.Errorf("failed to get object range: %w", err) + } + defer obj.Close() + + data, err := io.ReadAll(obj) + if err != nil { + return nil, fmt.Errorf("failed to read object range: %w", err) + } + + // Extract requested components from the fetched data + return e.extractComponents(meta, flags, data, rangeStart, parseBlock, parsePayload) +} + +// extractComponents extracts requested components from fetched data. +func (e *S3Engine) extractComponents( + meta *objectMetadata, + flags types.BlockDataFlags, + data []byte, + dataStartOffset int64, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + blockData := &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), + } + + // Extract header if requested + if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 { + start := int64(meta.headerOffset()) - dataStartOffset + end := start + int64(meta.HeaderLength) + if start >= 0 && end <= int64(len(data)) { + blockData.HeaderData = data[start:end] + } + } + + // Extract body if requested + if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 { + start := int64(meta.bodyOffset()) - dataStartOffset + end := start + int64(meta.BodyLength) + if start >= 0 && end <= int64(len(data)) { + bodyData := data[start:end] + if parseBlock != nil { + body, err := parseBlock(uint64(meta.BodyVersion), bodyData) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = bodyData + } + } + } + + // Extract payload if requested (v2+) + if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 { + start := int64(meta.payloadOffset()) - dataStartOffset + end := start + int64(meta.PayloadLength) + if start >= 0 && end <= int64(len(data)) { + payloadData := data[start:end] + if parsePayload != nil { + payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = payloadData + } + } + } + + // Extract BAL if requested (v2+) + if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 { + start := int64(meta.balOffset()) - dataStartOffset + end := start + int64(meta.BalLength) + if start >= 0 && end <= int64(len(data)) { + blockData.BalData = data[start:end] + } + } + + return blockData, nil +} + +// getBlockFull performs a full object read (fallback when range requests fail). +func (e *S3Engine) getBlockFull( + ctx context.Context, + key string, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{}) if err != nil { - if minio.ToErrorResponse(err).Code == "NoSuchKey" { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { return nil, nil } return nil, fmt.Errorf("failed to get object: %w", err) } defer obj.Close() - // read metadata - buf := make([]byte, 1024) - buflen, err := obj.Read(buf) - if (err != nil && err != io.EOF) || buflen == 0 { - return nil, fmt.Errorf("failed to read metadata: %w", err) + // Read entire object + data, err := io.ReadAll(obj) + if err != nil { + return nil, fmt.Errorf("failed to read object: %w", err) } - metadata, metadataLength, err := e.readObjectMetadata(buf) + // Parse metadata + meta, err := readObjectMetadata(data) if err != nil { return nil, fmt.Errorf("failed to read metadata: %w", err) } - headerData := make([]byte, metadata.headerLength) - headerOffset := 0 - if buflen > metadataLength { - copy(headerData, buf[metadataLength:buflen]) - headerOffset = buflen - metadataLength + blockData := &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), } - if buflen < int(metadataLength)+int(metadata.headerLength) { - _, err = obj.Read(headerData[headerOffset:]) - if err != nil { - return nil, fmt.Errorf("failed to read header data: %w", err) - } - } + metaSize := meta.metadataSize() - bodyData := make([]byte, metadata.bodyLength) - bodyOffset := 0 - if buflen > int(metadataLength)+int(metadata.headerLength) { - copy(bodyData, buf[int(metadataLength)+int(metadata.headerLength):buflen]) - bodyOffset = buflen - int(metadataLength) - int(metadata.headerLength) + // Extract header if requested + if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 { + headerEnd := metaSize + int(meta.HeaderLength) + if headerEnd <= len(data) { + blockData.HeaderData = data[metaSize:headerEnd] + } } - if buflen < int(metadataLength)+int(metadata.headerLength)+int(metadata.bodyLength) { - _, err = obj.Read(bodyData[bodyOffset:]) - if err != nil { - return nil, fmt.Errorf("failed to read body data: %w", err) + // Extract body if requested + if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 { + bodyStart := metaSize + int(meta.HeaderLength) + bodyEnd := bodyStart + int(meta.BodyLength) + if bodyEnd <= len(data) { + bodyData := data[bodyStart:bodyEnd] + if parseBlock != nil { + body, err := parseBlock(uint64(meta.BodyVersion), bodyData) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = bodyData + } } } - blockData := &types.BlockData{ - HeaderVersion: uint64(metadata.objVersion), - HeaderData: headerData, - BodyVersion: uint64(metadata.bodyVersion), + // Extract payload if requested (v2+) + if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 { + payloadStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + payloadEnd := payloadStart + int(meta.PayloadLength) + if payloadEnd <= len(data) { + payloadData := data[payloadStart:payloadEnd] + if parsePayload != nil { + payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = payloadData + } + } } - e.getBytes.Add(int64(metadataLength) + int64(metadata.headerLength) + int64(metadata.bodyLength)) - - if parseBlock != nil { - body, err := parseBlock(uint64(metadata.bodyVersion), bodyData) - if err != nil { - return nil, fmt.Errorf("failed to parse body: %w", err) + // Extract BAL if requested (v3+) + if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 { + balStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + int(meta.PayloadLength) + balEnd := balStart + int(meta.BalLength) + if balEnd <= len(data) { + blockData.BalData = data[balStart:balEnd] } - - blockData.Body = body - } else { - blockData.BodyData = bodyData } return blockData, nil } -func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { +// AddBlock stores block data. Returns (added, updated, error). +func (e *S3Engine) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { key := e.getObjectKey(root, slot) e.statCount.Add(1) - // Check if object already exists - stat, err := e.client.StatObject(ctx, e.bucket, key, minio.StatObjectOptions{}) - if err == nil && stat.Size > 0 { - return false, nil + // Check what components already exist + existingMeta, err := e.readMetadata(ctx, key) + if err != nil && err.Error() != "failed to get object: NoSuchKey" { + // Ignore "not found" errors + existingFlags, _ := e.GetStoredComponents(ctx, slot, root) + if existingFlags == 0 { + existingMeta = nil + } } + // Get the new data blockData, err := dataCb() if err != nil { - return false, fmt.Errorf("failed to get block data: %w", err) + return false, false, fmt.Errorf("failed to get block data: %w", err) + } + + // Calculate what we already have + var existingFlags types.BlockDataFlags + if existingMeta != nil { + existingFlags = existingMeta.storedFlags() + } + + // Calculate what the new data provides + var newFlags types.BlockDataFlags + if len(blockData.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(blockData.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if blockData.BalVersion != 0 && len(blockData.BalData) > 0 { + newFlags |= types.BlockDataFlagBal + } + + // Check if we need to update (new data has more components) + needsUpdate := (newFlags &^ existingFlags) != 0 + isNew := existingFlags == 0 + + if !isNew && !needsUpdate { + // Already have all the data + return false, false, nil } - metadata := &objectMetadata{ - objVersion: uint32(blockData.HeaderVersion), - headerLength: uint32(len(blockData.HeaderData)), - bodyVersion: uint32(blockData.BodyVersion), - bodyLength: uint32(len(blockData.BodyData)), + // If updating, merge with existing data + finalData := blockData + if !isNew && needsUpdate { + // Fetch existing data and merge + existingData, err := e.GetBlock(ctx, slot, root, types.BlockDataFlagAll, nil, nil) + if err == nil && existingData != nil { + finalData = mergeBlockData(existingData, blockData) + } } - metadataBytes := e.writeObjectMetadata(metadata) - metadataLength := len(metadataBytes) + // Write object (v1 for pre-gloas, v2 for gloas+) + metaBytes := writeObjectMetadata(finalData) + + // Calculate total size and build reader chain (avoids copying to concatenated buffer) + totalSize := int64(len(metaBytes) + len(finalData.HeaderData) + len(finalData.BodyData)) + readers := []io.Reader{ + bytes.NewReader(metaBytes), + bytes.NewReader(finalData.HeaderData), + bytes.NewReader(finalData.BodyData), + } - // Prepare data with header and body versions and lengths - data := make([]byte, metadataLength+int(metadata.headerLength)+int(metadata.bodyLength)) - copy(data[:metadataLength], metadataBytes) - copy(data[metadataLength:metadataLength+int(metadata.headerLength)], blockData.HeaderData) - copy(data[metadataLength+int(metadata.headerLength):], blockData.BodyData) + if finalData.BodyVersion >= uint64(spec.DataVersionGloas) { + totalSize += int64(len(finalData.PayloadData) + len(finalData.BalData)) + readers = append(readers, + bytes.NewReader(finalData.PayloadData), + bytes.NewReader(finalData.BalData), + ) + } // Upload object e.putCount.Add(1) @@ -241,14 +504,55 @@ func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataC ctx, e.bucket, key, - bytes.NewReader(data), - int64(len(data)), + io.MultiReader(readers...), + totalSize, minio.PutObjectOptions{ContentType: "application/octet-stream"}, ) if err != nil { - return false, fmt.Errorf("failed to upload block: %w", err) + return false, false, fmt.Errorf("failed to upload block: %w", err) + } + + e.putBytes.Add(totalSize) + + return isNew, !isNew && needsUpdate, nil +} + +// mergeBlockData merges existing data with new data (new takes precedence for non-empty fields). +func mergeBlockData(existing, new *types.BlockData) *types.BlockData { + result := &types.BlockData{} + + // Use new data if available, otherwise keep existing + if len(new.HeaderData) > 0 { + result.HeaderVersion = new.HeaderVersion + result.HeaderData = new.HeaderData + } else { + result.HeaderVersion = existing.HeaderVersion + result.HeaderData = existing.HeaderData + } + + if len(new.BodyData) > 0 { + result.BodyVersion = new.BodyVersion + result.BodyData = new.BodyData + } else { + result.BodyVersion = existing.BodyVersion + result.BodyData = existing.BodyData + } + + if new.PayloadVersion != 0 && len(new.PayloadData) > 0 { + result.PayloadVersion = new.PayloadVersion + result.PayloadData = new.PayloadData + } else { + result.PayloadVersion = existing.PayloadVersion + result.PayloadData = existing.PayloadData + } + + if new.BalVersion != 0 && len(new.BalData) > 0 { + result.BalVersion = new.BalVersion + result.BalData = new.BalData + } else { + result.BalVersion = existing.BalVersion + result.BalData = existing.BalData } - e.putBytes.Add(int64(len(data))) - return true, nil + return result } diff --git a/blockdb/tiered/tiered.go b/blockdb/tiered/tiered.go new file mode 100644 index 000000000..04f05a16f --- /dev/null +++ b/blockdb/tiered/tiered.go @@ -0,0 +1,278 @@ +package tiered + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/blockdb/pebble" + "github.com/ethpandaops/dora/blockdb/s3" + "github.com/ethpandaops/dora/blockdb/types" + dtypes "github.com/ethpandaops/dora/types" +) + +// TieredEngine combines Pebble (cache) and S3 (primary storage) in a tiered architecture. +// Reads check cache first, then fall back to S3. +// Writes go to both (write-through). +type TieredEngine struct { + cache *pebble.PebbleEngine + primary *s3.S3Engine + cleanup *pebble.CacheCleanup + logger logrus.FieldLogger +} + +// NewTieredEngine creates a new tiered storage engine. +func NewTieredEngine(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) (types.BlockDbEngine, error) { + // Initialize Pebble cache + cacheEngine, err := pebble.NewPebbleEngine(config.Pebble) + if err != nil { + return nil, fmt.Errorf("failed to initialize pebble cache: %w", err) + } + + pebbleEngine, ok := cacheEngine.(*pebble.PebbleEngine) + if !ok { + return nil, fmt.Errorf("unexpected pebble engine type") + } + + // Initialize S3 primary storage + primaryEngine, err := s3.NewS3Engine(config.S3) + if err != nil { + cacheEngine.Close() + return nil, fmt.Errorf("failed to initialize s3 primary storage: %w", err) + } + + s3Engine, ok := primaryEngine.(*s3.S3Engine) + if !ok { + cacheEngine.Close() + return nil, fmt.Errorf("unexpected s3 engine type") + } + + // Initialize cache cleanup + cleanup := pebble.NewCacheCleanup(pebbleEngine, logger) + cleanup.Start() + + return &TieredEngine{ + cache: pebbleEngine, + primary: s3Engine, + cleanup: cleanup, + logger: logger.WithField("component", "tiered-blockdb"), + }, nil +} + +// Close closes both storage engines. +func (e *TieredEngine) Close() error { + if e.cleanup != nil { + e.cleanup.Stop() + } + + var errs []error + if err := e.cache.Close(); err != nil { + errs = append(errs, fmt.Errorf("cache close: %w", err)) + } + if err := e.primary.Close(); err != nil { + errs = append(errs, fmt.Errorf("primary close: %w", err)) + } + + if len(errs) > 0 { + return errs[0] + } + return nil +} + +// GetStoredComponents returns which components exist for a block. +// Checks cache first, then S3. +func (e *TieredEngine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + // Check cache first + cacheFlags, err := e.cache.GetStoredComponents(ctx, slot, root) + if err != nil { + e.logger.Debugf("cache GetStoredComponents error: %v", err) + } + + // If cache has all components, return early + if cacheFlags == types.BlockDataFlagAll { + return cacheFlags, nil + } + + // Check S3 for additional components + s3Flags, err := e.primary.GetStoredComponents(ctx, slot, root) + if err != nil { + return cacheFlags, nil // Return cache result on S3 error + } + + return cacheFlags | s3Flags, nil +} + +// GetBlock retrieves block data with selective loading. +// Checks cache first, fetches missing components from S3. +func (e *TieredEngine) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + // Check what's in cache + cacheFlags, _ := e.cache.GetStoredComponents(ctx, slot, root) + + // Determine what we can get from cache vs S3 + cacheRequestFlags := flags & cacheFlags + s3RequestFlags := flags &^ cacheFlags + + result := &types.BlockData{} + + // Get from cache + if cacheRequestFlags != 0 { + cacheData, err := e.cache.GetBlock(ctx, slot, root, cacheRequestFlags, parseBlock, parsePayload) + if err != nil { + e.logger.Debugf("cache GetBlock error: %v", err) + } else if cacheData != nil { + mergeBlockDataInto(result, cacheData) + + // Record LRU access + if e.cleanup != nil { + e.cleanup.RecordAccess(root, cacheRequestFlags) + } + } + } + + // Get missing components from S3 + if s3RequestFlags != 0 { + s3Data, err := e.primary.GetBlock(ctx, slot, root, s3RequestFlags, parseBlock, parsePayload) + if err != nil { + e.logger.Debugf("s3 GetBlock error: %v", err) + } else if s3Data != nil { + mergeBlockDataInto(result, s3Data) + + // Cache the S3 data for future reads + e.cacheS3Data(ctx, slot, root, s3Data, s3RequestFlags) + } + } + + return result, nil +} + +// cacheS3Data stores S3 data in the cache for future reads. +func (e *TieredEngine) cacheS3Data(ctx context.Context, slot uint64, root []byte, data *types.BlockData, flags types.BlockDataFlags) { + // Build cache data with only the components we fetched from S3 + cacheData := &types.BlockData{} + + if flags.Has(types.BlockDataFlagHeader) && len(data.HeaderData) > 0 { + cacheData.HeaderVersion = data.HeaderVersion + cacheData.HeaderData = data.HeaderData + } + if flags.Has(types.BlockDataFlagBody) && len(data.BodyData) > 0 { + cacheData.BodyVersion = data.BodyVersion + cacheData.BodyData = data.BodyData + } + if flags.Has(types.BlockDataFlagPayload) && len(data.PayloadData) > 0 { + cacheData.PayloadVersion = data.PayloadVersion + cacheData.PayloadData = data.PayloadData + } + if flags.Has(types.BlockDataFlagBal) && len(data.BalData) > 0 { + cacheData.BalVersion = data.BalVersion + cacheData.BalData = data.BalData + } + + // Add to cache (ignore errors - caching is best effort) + _, _, err := e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return cacheData, nil + }) + if err != nil { + e.logger.Debugf("failed to cache S3 data: %v", err) + } + + // Flush LRU updates since we did a write + if e.cleanup != nil { + e.cleanup.FlushLRU() + } +} + +// AddBlock stores block data using write-through to both cache and S3. +// Returns (added, updated, error). +func (e *TieredEngine) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { + // Get the data once + data, err := dataCb() + if err != nil { + return false, false, err + } + + // Check what components already exist (in cache or S3) + existingFlags, _ := e.GetStoredComponents(ctx, slot, root) + + // Determine what new data provides + var newFlags types.BlockDataFlags + if len(data.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(data.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if data.PayloadVersion != 0 && len(data.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if data.BalVersion != 0 && len(data.BalData) > 0 { + newFlags |= types.BlockDataFlagBal + } + + // Check if we need to update + needsUpdate := (newFlags &^ existingFlags) != 0 + isNew := existingFlags == 0 + + if !isNew && !needsUpdate { + return false, false, nil + } + + // Write-through: write to S3 first (primary), then cache + // S3 handles merging with existing data + s3Added, s3Updated, err := e.primary.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return data, nil + }) + if err != nil { + return false, false, fmt.Errorf("failed to write to S3: %w", err) + } + + // Write to cache + _, _, err = e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return data, nil + }) + if err != nil { + e.logger.Warnf("failed to write to cache: %v", err) + // Don't fail - S3 write succeeded + } + + // Flush LRU updates after write + if e.cleanup != nil { + e.cleanup.FlushLRU() + } + + return s3Added, s3Updated, nil +} + +// mergeBlockDataInto merges source data into target (source values take precedence for non-empty fields). +func mergeBlockDataInto(target, source *types.BlockData) { + if source.HeaderVersion != 0 || len(source.HeaderData) > 0 { + target.HeaderVersion = source.HeaderVersion + target.HeaderData = source.HeaderData + } + if source.BodyVersion != 0 || len(source.BodyData) > 0 { + target.BodyVersion = source.BodyVersion + target.BodyData = source.BodyData + target.Body = source.Body + } + if source.PayloadVersion != 0 || len(source.PayloadData) > 0 { + target.PayloadVersion = source.PayloadVersion + target.PayloadData = source.PayloadData + target.Payload = source.Payload + } + if source.BalVersion != 0 || len(source.BalData) > 0 { + target.BalVersion = source.BalVersion + target.BalData = source.BalData + } +} diff --git a/blockdb/types/engine.go b/blockdb/types/engine.go index 80db81f67..86b352ddf 100644 --- a/blockdb/types/engine.go +++ b/blockdb/types/engine.go @@ -2,13 +2,25 @@ package types import "context" -// BlockData holds beacon block header and body data. +// BlockData contains all data components for a block. type BlockData struct { + // Header data HeaderVersion uint64 HeaderData []byte - BodyVersion uint64 - BodyData []byte - Body interface{} + + // Body data + BodyVersion uint64 + BodyData []byte + Body any // Parsed body (optional) + + // Execution payload data (ePBS) + PayloadVersion uint64 + PayloadData []byte + Payload any // Parsed payload (optional) + + // Block access list data + BalVersion uint64 + BalData []byte } // ExecDataTxSections holds all compressed section data for a single @@ -22,11 +34,35 @@ type ExecDataTxSections struct { StateChangeData []byte // snappy-compressed, nil if section not present } -// BlockDbEngine is the interface for beacon block storage. +// BlockDbEngine defines the interface for block database engines. type BlockDbEngine interface { + // Close closes the database engine. Close() error - GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*BlockData, error) - AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*BlockData, error)) (bool, error) + + // GetBlock retrieves block data with selective loading based on flags. + // If parseBlock is nil, raw body data is stored in BlockData.BodyData. + // If parsePayload is nil, raw payload data is stored in BlockData.PayloadData. + GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), + ) (*BlockData, error) + + // AddBlock stores block data. Returns: + // - added: true if a new block was created + // - updated: true if an existing block was updated with new components + AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*BlockData, error), + ) (added bool, updated bool, err error) + + // GetStoredComponents returns which components exist for a block. + GetStoredComponents(ctx context.Context, slot uint64, root []byte) (BlockDataFlags, error) } // ExecDataEngine is the interface for per-block execution data storage. diff --git a/blockdb/types/execdata_sections_ssz.go b/blockdb/types/execdata_sections_ssz.go index b679cac9e..0517875ae 100644 --- a/blockdb/types/execdata_sections_ssz.go +++ b/blockdb/types/execdata_sections_ssz.go @@ -1,6 +1,6 @@ // Code generated by dynamic-ssz. DO NOT EDIT. // Hash: d3acce19a9e9c0afc0038f9dfd949d804d4b82beafef97cc4d618d03c202d4af -// Version: v1.2.1 (https://github.com/pk910/dynamic-ssz) +// Version: v1.2.2 (https://github.com/pk910/dynamic-ssz) package types import ( @@ -21,43 +21,41 @@ func (t *ReceiptMetaData) MarshalSSZTo(buf []byte) (dst []byte, err error) { if t == nil { t = new(ReceiptMetaData) } - { // Field #0 'Version' + { // Static Field #0 'Version' dst = binary.LittleEndian.AppendUint16(dst, t.Version) } - { // Field #1 'Status' + { // Static Field #1 'Status' dst = append(dst, byte(t.Status)) } - { // Field #2 'TxType' + { // Static Field #2 'TxType' dst = append(dst, byte(t.TxType)) } - { // Field #3 'CumulativeGasUsed' + { // Static Field #3 'CumulativeGasUsed' dst = binary.LittleEndian.AppendUint64(dst, t.CumulativeGasUsed) } - { // Field #4 'GasUsed' + { // Static Field #4 'GasUsed' dst = binary.LittleEndian.AppendUint64(dst, t.GasUsed) } - { // Field #5 'EffectiveGasPrice' + { // Static Field #5 'EffectiveGasPrice' t := &t.EffectiveGasPrice - for i := range 4 { - dst = binary.LittleEndian.AppendUint64(dst, t[i]) - } + dst = sszutils.MarshalUint64Slice(dst, t[:4]) } - { // Field #6 'BlobGasUsed' + { // Static Field #6 'BlobGasUsed' dst = binary.LittleEndian.AppendUint64(dst, t.BlobGasUsed) } - { // Field #7 'LogsBloom' + { // Static Field #7 'LogsBloom' dst = append(dst, t.LogsBloom[:256]...) } - { // Field #8 'From' + { // Static Field #8 'From' dst = append(dst, t.From[:20]...) } - { // Field #9 'To' + { // Static Field #9 'To' dst = append(dst, t.To[:20]...) } - { // Field #10 'ContractAddress' + { // Static Field #10 'ContractAddress' dst = append(dst, t.ContractAddress[:20]...) } - { // Field #11 'HasContractAddr' + { // Static Field #11 'HasContractAddr' dst = sszutils.MarshalBool(dst, t.HasContractAddr) } return dst, nil @@ -66,7 +64,7 @@ func (t *ReceiptMetaData) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *ReceiptMetaData) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) if buflen < 377 { - return sszutils.ErrUnexpectedEOF + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 377) } { // Field #0 'Version' (static) buf := buf[0:2] @@ -91,10 +89,7 @@ func (t *ReceiptMetaData) UnmarshalSSZ(buf []byte) (err error) { { // Field #5 'EffectiveGasPrice' (static) buf := buf[20:52] val1 := t.EffectiveGasPrice - for i := range 4 { - buf := buf[8*i : 8*(i+1)] - val1[i] = binary.LittleEndian.Uint64(buf) - } + sszutils.UnmarshalUint64Slice(val1[:4], buf) t.EffectiveGasPrice = val1 } { // Field #6 'BlobGasUsed' (static) @@ -120,7 +115,7 @@ func (t *ReceiptMetaData) UnmarshalSSZ(buf []byte) (err error) { { // Field #11 'HasContractAddr' (static) buf := buf[376:377] if buf[0] != 1 && buf[0] != 0 { - return sszutils.ErrInvalidValueRange + return sszutils.NewSszError(sszutils.ErrInvalidValueRange, "bool value must be 0 or 1") } t.HasContractAddr = buf[0] == 1 } @@ -164,7 +159,7 @@ func (t *ReceiptMetaData) HashTreeRootWith(hh sszutils.HashWalker) error { { // Field #5 'EffectiveGasPrice' t := &t.EffectiveGasPrice if root, err := t.HashTreeRoot(); err != nil { - return err + return sszutils.ErrorWithPath(err, "EffectiveGasPrice") } else { hh.AppendBytes32(root[:]) } @@ -199,10 +194,10 @@ func (t *BlockReceiptMeta) MarshalSSZTo(buf []byte) (dst []byte, err error) { if t == nil { t = new(BlockReceiptMeta) } - { // Field #0 'Version' + { // Static Field #0 'Version' dst = binary.LittleEndian.AppendUint16(dst, t.Version) } - { // Field #1 'BlobGasPrice' + { // Static Field #1 'BlobGasPrice' dst = binary.LittleEndian.AppendUint64(dst, t.BlobGasPrice) } return dst, nil @@ -211,7 +206,7 @@ func (t *BlockReceiptMeta) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *BlockReceiptMeta) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) if buflen < 10 { - return sszutils.ErrUnexpectedEOF + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 10) } { // Field #0 'Version' (static) buf := buf[0:2] @@ -258,64 +253,56 @@ func (t *StateChangeAccount) MarshalSSZ() ([]byte, error) { } func (t *StateChangeAccount) MarshalSSZTo(buf []byte) (dst []byte, err error) { dst = buf + zeroBytes := sszutils.ZeroBytes() if t == nil { t = new(StateChangeAccount) } dstlen := len(dst) - { // Field #0 'Address' + { // Static Field #0 'Address' dst = append(dst, t.Address[:20]...) } - { // Field #1 'Flags' + { // Static Field #1 'Flags' dst = append(dst, byte(t.Flags)) } - { // Field #2 'PreBalance' + { // Static Field #2 'PreBalance' t := &t.PreBalance - for i := range 4 { - dst = binary.LittleEndian.AppendUint64(dst, t[i]) - } + dst = sszutils.MarshalUint64Slice(dst, t[:4]) } - { // Field #3 'PostBalance' + { // Static Field #3 'PostBalance' t := &t.PostBalance - for i := range 4 { - dst = binary.LittleEndian.AppendUint64(dst, t[i]) - } + dst = sszutils.MarshalUint64Slice(dst, t[:4]) } - { // Field #4 'PreNonce' + { // Static Field #4 'PreNonce' dst = binary.LittleEndian.AppendUint64(dst, t.PreNonce) } - { // Field #5 'PostNonce' + { // Static Field #5 'PostNonce' dst = binary.LittleEndian.AppendUint64(dst, t.PostNonce) } - // Offset #6 'PreCode' - offset6 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #7 'PostCode' - offset7 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #8 'Slots' - offset8 := len(dst) - dst = append(dst, 0, 0, 0, 0) + // Offset Field #6 'PreCode' + // Offset Field #7 'PostCode' + // Offset Field #8 'Slots' + dst = append(dst, zeroBytes[:12]...) { // Dynamic Field #6 'PreCode' - binary.LittleEndian.PutUint32(dst[offset6:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+101:], uint32(len(dst)-dstlen)) dst = append(dst, t.PreCode[:]...) } { // Dynamic Field #7 'PostCode' - binary.LittleEndian.PutUint32(dst[offset7:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+105:], uint32(len(dst)-dstlen)) dst = append(dst, t.PostCode[:]...) } { // Dynamic Field #8 'Slots' - binary.LittleEndian.PutUint32(dst[offset8:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+109:], uint32(len(dst)-dstlen)) t := t.Slots vlen := len(t) - for i := range vlen { - t := &t[i] - { // Field #0 'Slot' + for idx1 := range vlen { + t := &t[idx1] + { // Static Field #0 'Slot' dst = append(dst, t.Slot[:32]...) } - { // Field #1 'PreValue' + { // Static Field #1 'PreValue' dst = append(dst, t.PreValue[:32]...) } - { // Field #2 'PostValue' + { // Static Field #2 'PostValue' dst = append(dst, t.PostValue[:32]...) } } @@ -326,7 +313,7 @@ func (t *StateChangeAccount) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *StateChangeAccount) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) if buflen < 113 { - return sszutils.ErrUnexpectedEOF + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 113) } { // Field #0 'Address' (static) buf := buf[0:20] @@ -339,19 +326,13 @@ func (t *StateChangeAccount) UnmarshalSSZ(buf []byte) (err error) { { // Field #2 'PreBalance' (static) buf := buf[21:53] val1 := t.PreBalance - for i := range 4 { - buf := buf[8*i : 8*(i+1)] - val1[i] = binary.LittleEndian.Uint64(buf) - } + sszutils.UnmarshalUint64Slice(val1[:4], buf) t.PreBalance = val1 } { // Field #3 'PostBalance' (static) buf := buf[53:85] val2 := t.PostBalance - for i := range 4 { - buf := buf[8*i : 8*(i+1)] - val2[i] = binary.LittleEndian.Uint64(buf) - } + sszutils.UnmarshalUint64Slice(val2[:4], buf) t.PostBalance = val2 } { // Field #4 'PreNonce' (static) @@ -365,17 +346,17 @@ func (t *StateChangeAccount) UnmarshalSSZ(buf []byte) (err error) { // Field #6 'PreCode' (offset) offset6 := int(binary.LittleEndian.Uint32(buf[101:105])) if offset6 != 113 { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "first offset does not match (offset: %d, static len: %d)", offset6, 113), "PreCode:o") } // Field #7 'PostCode' (offset) offset7 := int(binary.LittleEndian.Uint32(buf[105:109])) if offset7 < offset6 || offset7 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset7, offset6, buflen), "PostCode:o") } // Field #8 'Slots' (offset) offset8 := int(binary.LittleEndian.Uint32(buf[109:113])) if offset8 < offset7 || offset8 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset8, offset7, buflen), "Slots:o") } { // Field #6 'PreCode' (dynamic) buf := buf[offset6:offset7] @@ -392,15 +373,15 @@ func (t *StateChangeAccount) UnmarshalSSZ(buf []byte) (err error) { val3 := t.Slots itemCount := len(buf) / 96 if len(buf)%96 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 96), "Slots") } val3 = sszutils.ExpandSlice(val3, itemCount) - for i := range itemCount { - val4 := val3[i] - buf := buf[96*i : 96*(i+1)] + for idx1 := range itemCount { + val4 := val3[idx1] + buf := buf[96*idx1 : 96*(idx1+1)] buflen := len(buf) if buflen < 96 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPathf(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 96), "Slots[%d]", idx1) } { // Field #0 'Slot' (static) buf := buf[0:32] @@ -414,7 +395,7 @@ func (t *StateChangeAccount) UnmarshalSSZ(buf []byte) (err error) { buf := buf[64:96] copy(val4.PostValue[:], buf) } - val3[i] = val4 + val3[idx1] = val4 } t.Slots = val3 } @@ -471,7 +452,7 @@ func (t *StateChangeAccount) HashTreeRootWith(hh sszutils.HashWalker) error { { // Field #2 'PreBalance' t := &t.PreBalance if root, err := t.HashTreeRoot(); err != nil { - return err + return sszutils.ErrorWithPath(err, "PreBalance") } else { hh.AppendBytes32(root[:]) } @@ -479,7 +460,7 @@ func (t *StateChangeAccount) HashTreeRootWith(hh sszutils.HashWalker) error { { // Field #3 'PostBalance' t := &t.PostBalance if root, err := t.HashTreeRoot(); err != nil { - return err + return sszutils.ErrorWithPath(err, "PostBalance") } else { hh.AppendBytes32(root[:]) } @@ -504,8 +485,8 @@ func (t *StateChangeAccount) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.Slots idx := hh.Index() vlen := uint64(len(t)) - for i := range int(vlen) { - t := &t[i] + for idx1 := range int(vlen) { + t := &t[idx1] idx := hh.Index() { // Field #0 'Slot' hh.PutBytes(t.Slot[:32]) @@ -529,56 +510,50 @@ func (t *FlatCallFrame) MarshalSSZ() ([]byte, error) { } func (t *FlatCallFrame) MarshalSSZTo(buf []byte) (dst []byte, err error) { dst = buf + zeroBytes := sszutils.ZeroBytes() if t == nil { t = new(FlatCallFrame) } dstlen := len(dst) - { // Field #0 'Depth' + { // Static Field #0 'Depth' dst = binary.LittleEndian.AppendUint16(dst, t.Depth) } - { // Field #1 'Type' + { // Static Field #1 'Type' dst = append(dst, byte(t.Type)) } - { // Field #2 'From' + { // Static Field #2 'From' dst = append(dst, t.From[:20]...) } - { // Field #3 'To' + { // Static Field #3 'To' dst = append(dst, t.To[:20]...) } - { // Field #4 'Value' + { // Static Field #4 'Value' t := &t.Value - for i := range 4 { - dst = binary.LittleEndian.AppendUint64(dst, t[i]) - } + dst = sszutils.MarshalUint64Slice(dst, t[:4]) } - { // Field #5 'Gas' + { // Static Field #5 'Gas' dst = binary.LittleEndian.AppendUint64(dst, t.Gas) } - { // Field #6 'GasUsed' + { // Static Field #6 'GasUsed' dst = binary.LittleEndian.AppendUint64(dst, t.GasUsed) } - { // Field #7 'Status' + { // Static Field #7 'Status' dst = append(dst, byte(t.Status)) } - // Offset #8 'Input' - offset8 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #9 'Output' - offset9 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #10 'Error' - offset10 := len(dst) - dst = append(dst, 0, 0, 0, 0) + // Offset Field #8 'Input' + // Offset Field #9 'Output' + // Offset Field #10 'Error' + dst = append(dst, zeroBytes[:12]...) { // Dynamic Field #8 'Input' - binary.LittleEndian.PutUint32(dst[offset8:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+92:], uint32(len(dst)-dstlen)) dst = append(dst, t.Input[:]...) } { // Dynamic Field #9 'Output' - binary.LittleEndian.PutUint32(dst[offset9:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+96:], uint32(len(dst)-dstlen)) dst = append(dst, t.Output[:]...) } { // Dynamic Field #10 'Error' - binary.LittleEndian.PutUint32(dst[offset10:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+100:], uint32(len(dst)-dstlen)) dst = append(dst, t.Error[:]...) } return dst, nil @@ -587,7 +562,7 @@ func (t *FlatCallFrame) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *FlatCallFrame) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) if buflen < 104 { - return sszutils.ErrUnexpectedEOF + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 104) } { // Field #0 'Depth' (static) buf := buf[0:2] @@ -608,10 +583,7 @@ func (t *FlatCallFrame) UnmarshalSSZ(buf []byte) (err error) { { // Field #4 'Value' (static) buf := buf[43:75] val1 := t.Value - for i := range 4 { - buf := buf[8*i : 8*(i+1)] - val1[i] = binary.LittleEndian.Uint64(buf) - } + sszutils.UnmarshalUint64Slice(val1[:4], buf) t.Value = val1 } { // Field #5 'Gas' (static) @@ -629,17 +601,17 @@ func (t *FlatCallFrame) UnmarshalSSZ(buf []byte) (err error) { // Field #8 'Input' (offset) offset8 := int(binary.LittleEndian.Uint32(buf[92:96])) if offset8 != 104 { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "first offset does not match (offset: %d, static len: %d)", offset8, 104), "Input:o") } // Field #9 'Output' (offset) offset9 := int(binary.LittleEndian.Uint32(buf[96:100])) if offset9 < offset8 || offset9 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset9, offset8, buflen), "Output:o") } // Field #10 'Error' (offset) offset10 := int(binary.LittleEndian.Uint32(buf[100:104])) if offset10 < offset9 || offset10 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset10, offset9, buflen), "Error:o") } { // Field #8 'Input' (dynamic) buf := buf[offset8:offset9] @@ -716,7 +688,7 @@ func (t *FlatCallFrame) HashTreeRootWith(hh sszutils.HashWalker) error { { // Field #4 'Value' t := &t.Value if root, err := t.HashTreeRoot(); err != nil { - return err + return sszutils.ErrorWithPath(err, "Value") } else { hh.AppendBytes32(root[:]) } @@ -758,38 +730,35 @@ func (t *EventData) MarshalSSZTo(buf []byte) (dst []byte, err error) { t = new(EventData) } dstlen := len(dst) - { // Field #0 'EventIndex' + { // Static Field #0 'EventIndex' dst = binary.LittleEndian.AppendUint32(dst, t.EventIndex) } - { // Field #1 'Source' + { // Static Field #1 'Source' dst = append(dst, t.Source[:20]...) } - // Offset #2 'Topics' - offset2 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #3 'Data' - offset3 := len(dst) - dst = append(dst, 0, 0, 0, 0) + // Offset Field #2 'Topics' + // Offset Field #3 'Data' + dst = append(dst, 0, 0, 0, 0, 0, 0, 0, 0) { // Dynamic Field #2 'Topics' - binary.LittleEndian.PutUint32(dst[offset2:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+24:], uint32(len(dst)-dstlen)) t := t.Topics vlen := len(t) if vlen > 5 { - return nil, sszutils.ErrListTooBig + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 5), "Topics") } - for i := range vlen { - vlen := len(t[i]) + for idx1 := range vlen { + vlen := len(t[idx1]) if vlen > 32 { - return nil, sszutils.ErrVectorLength + return nil, sszutils.ErrorWithPathf(sszutils.NewSszErrorf(sszutils.ErrVectorLength, "vector length %d exceeds limit %d", vlen, 32), "Topics[%d]", idx1) } - dst = append(dst, t[i][:vlen]...) + dst = append(dst, t[idx1][:vlen]...) if vlen < 32 { dst = sszutils.AppendZeroPadding(dst, (32-vlen)*1) } } } { // Dynamic Field #3 'Data' - binary.LittleEndian.PutUint32(dst[offset3:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+28:], uint32(len(dst)-dstlen)) dst = append(dst, t.Data[:]...) } return dst, nil @@ -798,7 +767,7 @@ func (t *EventData) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *EventData) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) if buflen < 32 { - return sszutils.ErrUnexpectedEOF + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 32) } { // Field #0 'EventIndex' (static) buf := buf[0:4] @@ -811,25 +780,25 @@ func (t *EventData) UnmarshalSSZ(buf []byte) (err error) { // Field #2 'Topics' (offset) offset2 := int(binary.LittleEndian.Uint32(buf[24:28])) if offset2 != 32 { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "first offset does not match (offset: %d, static len: %d)", offset2, 32), "Topics:o") } // Field #3 'Data' (offset) offset3 := int(binary.LittleEndian.Uint32(buf[28:32])) if offset3 < offset2 || offset3 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset3, offset2, buflen), "Data:o") } { // Field #2 'Topics' (dynamic) buf := buf[offset2:offset3] val1 := t.Topics itemCount := len(buf) / 32 if len(buf)%32 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 32), "Topics") } val1 = sszutils.ExpandSlice(val1, itemCount) - for i := range itemCount { - buf := buf[32*i : 32*(i+1)] - val1[i] = sszutils.ExpandSlice(val1[i], 32) - copy(val1[i][:], buf) + for idx1 := range itemCount { + buf := buf[32*idx1 : 32*(idx1+1)] + val1[idx1] = sszutils.ExpandSlice(val1[idx1], 32) + copy(val1[idx1][:], buf) } t.Topics = val1 } @@ -884,15 +853,15 @@ func (t *EventData) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.Topics vlen := uint64(len(t)) if vlen > 5 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 5), "Topics") } idx := hh.Index() - for i := range int(vlen) { - vlen := len(t[i]) + for idx1 := range int(vlen) { + vlen := len(t[idx1]) if vlen > 32 { - return sszutils.ErrVectorLength + return sszutils.ErrorWithPathf(sszutils.NewSszErrorf(sszutils.ErrVectorLength, "vector length %d exceeds limit %d", vlen, 32), "Topics[%d]", idx1) } - val := t[i][:] + val := t[idx1][:] if vlen < 32 { val = sszutils.AppendZeroPadding(val, (32-vlen)*1) } diff --git a/blockdb/types/flags.go b/blockdb/types/flags.go new file mode 100644 index 000000000..34aff4dbb --- /dev/null +++ b/blockdb/types/flags.go @@ -0,0 +1,38 @@ +package types + +// BlockDataFlags specifies which components to load from storage. +type BlockDataFlags uint8 + +const ( + // BlockDataFlagHeader requests the block header data. + BlockDataFlagHeader BlockDataFlags = 1 << iota // 0x01 + // BlockDataFlagBody requests the block body data. + BlockDataFlagBody // 0x02 + // BlockDataFlagPayload requests the execution payload data. + BlockDataFlagPayload // 0x04 + // BlockDataFlagBal requests the block access list data. + BlockDataFlagBal // 0x08 + + // BlockDataFlagAll requests all block components. + BlockDataFlagAll = BlockDataFlagHeader | BlockDataFlagBody | BlockDataFlagPayload | BlockDataFlagBal +) + +// Has returns true if the flag set contains the specified flag. +func (f BlockDataFlags) Has(flag BlockDataFlags) bool { + return f&flag == flag +} + +// HasAny returns true if the flag set contains any of the specified flags. +func (f BlockDataFlags) HasAny(flags BlockDataFlags) bool { + return f&flags != 0 +} + +// Add returns a new flag set with the specified flag added. +func (f BlockDataFlags) Add(flag BlockDataFlags) BlockDataFlags { + return f | flag +} + +// Remove returns a new flag set with the specified flag removed. +func (f BlockDataFlags) Remove(flag BlockDataFlags) BlockDataFlags { + return f &^ flag +} diff --git a/clients/consensus/chainspec.go b/clients/consensus/chainspec.go index 8272c9531..212b42cc4 100644 --- a/clients/consensus/chainspec.go +++ b/clients/consensus/chainspec.go @@ -8,7 +8,7 @@ import ( "sort" "sync" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "gopkg.in/Knetic/govaluate.v3" "gopkg.in/yaml.v2" ) @@ -53,6 +53,8 @@ type ChainSpecConfig struct { ElectraForkEpoch *uint64 `yaml:"ELECTRA_FORK_EPOCH" check-if-fork:"ElectraForkEpoch"` FuluForkVersion phase0.Version `yaml:"FULU_FORK_VERSION" check-if-fork:"FuluForkEpoch"` FuluForkEpoch *uint64 `yaml:"FULU_FORK_EPOCH" check-if-fork:"FuluForkEpoch"` + GloasForkVersion phase0.Version `yaml:"GLOAS_FORK_VERSION" check-if-fork:"GloasForkEpoch"` + GloasForkEpoch *uint64 `yaml:"GLOAS_FORK_EPOCH" check-if-fork:"GloasForkEpoch"` // Time parameters SlotDurationMs uint64 `yaml:"SLOT_DURATION_MS"` @@ -84,7 +86,6 @@ type ChainSpecConfig struct { MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE"` MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"` EpochsPerSubnetSubscription uint64 `yaml:"EPOCHS_PER_SUBNET_SUBSCRIPTION"` - MinEpochsForBlockRequests uint64 `yaml:"MIN_EPOCHS_FOR_BLOCK_REQUESTS"` AttestationPropoagationSlotRange uint64 `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"` MaximumGossipClockDisparity uint64 `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"` MessageDomainInvalidSnappy phase0.DomainType `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"` @@ -92,32 +93,31 @@ type ChainSpecConfig struct { SubnetsPerNode uint64 `yaml:"SUBNETS_PER_NODE"` AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"` AttestationSubnetExtraBits uint64 `yaml:"ATTESTATION_SUBNET_EXTRA_BITS"` - AttestationSubnetPrefixBits uint64 `yaml:"ATTESTATION_SUBNET_PREFIX_BITS"` // Deneb MaxRequestBlocksDeneb uint64 `yaml:"MAX_REQUEST_BLOCKS_DENEB" check-if-fork:"DenebForkEpoch"` MinEpochsForBlobSidecarsRequests uint64 `yaml:"MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS" check-if-fork:"DenebForkEpoch"` BlobSidecarSubnetCount uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT" check-if-fork:"DenebForkEpoch"` MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" check-if-fork:"DenebForkEpoch"` - MaxRequestBlobSidecars uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS" check-if-fork:"DenebForkEpoch"` // Electra MinPerEpochChurnLimitElectra uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA" check-if-fork:"ElectraForkEpoch"` MaxPerEpochActivationExitChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT" check-if-fork:"ElectraForkEpoch"` BlobSidecarSubnetCountElectra uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT_ELECTRA" check-if-fork:"ElectraForkEpoch"` MaxBlobsPerBlockElectra uint64 `yaml:"MAX_BLOBS_PER_BLOCK_ELECTRA" check-if-fork:"ElectraForkEpoch"` - MaxRequestBlobSidecarsElectra uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS_ELECTRA" check-if-fork:"ElectraForkEpoch"` // Fulu MinEpochsForDataColumnSidecars uint64 `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS" check-if-fork:"FuluForkEpoch"` NumberOfCustodyGroups *uint64 `yaml:"NUMBER_OF_CUSTODY_GROUPS" check-if-fork:"FuluForkEpoch"` DataColumnSidecarSubnetCount *uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" check-if-fork:"FuluForkEpoch"` - MaxRequestDataColumnSidecars uint64 `yaml:"MAX_REQUEST_DATA_COLUMN_SIDECARS" check-if-fork:"FuluForkEpoch"` SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT" check-if-fork:"FuluForkEpoch"` CustodyRequirement *uint64 `yaml:"CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"` ValidatorCustodyRequirement *uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"` BalancePerAdditionalCustodyGroup *uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" check-if-fork:"FuluForkEpoch"` BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" check-if-fork:"FuluForkEpoch"` + + // Gloas + MinBuilderWithdrawabilityDelay uint64 `yaml:"MIN_BUILDER_WITHDRAWABILITY_DELAY" check-if-fork:"GloasForkEpoch"` } type ChainSpecPreset struct { @@ -205,6 +205,13 @@ type ChainSpecPreset struct { FieldElementsPerExtBlob uint64 `yaml:"FIELD_ELEMENTS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"` CellsPerExtBlob uint64 `yaml:"CELLS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"` NumberOfColumns *uint64 `yaml:"NUMBER_OF_COLUMNS" check-if-fork:"FuluForkEpoch"` + + // Gloas + PtcSize uint64 `yaml:"PTC_SIZE" check-if-fork:"GloasForkEpoch"` + MaxPayloadAttestations uint64 `yaml:"MAX_PAYLOAD_ATTESTATIONS" check-if-fork:"GloasForkEpoch"` + BuilderRegistryLimit uint64 `yaml:"BUILDER_REGISTRY_LIMIT" check-if-fork:"GloasForkEpoch"` + BuilderPendingWithdrawalsLimit uint64 `yaml:"BUILDER_PENDING_WITHDRAWALS_LIMIT" check-if-fork:"GloasForkEpoch"` + MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" check-if-fork:"GloasForkEpoch"` } type ChainSpecDomainTypes struct { @@ -219,6 +226,9 @@ type ChainSpecDomainTypes struct { DomainSyncCommitteeSelectionProof phase0.DomainType `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF"` DomainContributionAndProof phase0.DomainType `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF"` DomainBlsToExecutionChange phase0.DomainType `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE"` + DomainBeaconBuilder phase0.DomainType `yaml:"DOMAIN_BEACON_BUILDER" check-if-fork:"GloasForkEpoch"` + DomainPtcAttester phase0.DomainType `yaml:"DOMAIN_PTC_ATTESTER" check-if-fork:"GloasForkEpoch"` + DomainProposerPreferences phase0.DomainType `yaml:"DOMAIN_PROPOSER_PREFERENCES" check-if-fork:"GloasForkEpoch"` } type ChainSpec struct { diff --git a/clients/consensus/chainstate.go b/clients/consensus/chainstate.go index 0a1eb2230..28295d3af 100644 --- a/clients/consensus/chainstate.go +++ b/clients/consensus/chainstate.go @@ -8,10 +8,10 @@ import ( "sync" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/utils" "github.com/ethpandaops/ethwallclock" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) type ChainState struct { @@ -361,6 +361,34 @@ func (cs *ChainState) GetForkDigestForEpoch(epoch phase0.Epoch) phase0.ForkDiges return cs.GetForkDigest(currentForkVersion, currentBlobParams) } +func (cs *ChainState) GetBlobScheduleForEpoch(epoch phase0.Epoch) *BlobScheduleEntry { + if cs.specs == nil { + return nil + } + + var blobSchedule *BlobScheduleEntry + + if cs.specs.ElectraForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.ElectraForkEpoch) { + blobSchedule = &BlobScheduleEntry{ + Epoch: *cs.specs.ElectraForkEpoch, + MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlockElectra, + } + } else if cs.specs.DenebForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.DenebForkEpoch) { + blobSchedule = &BlobScheduleEntry{ + Epoch: *cs.specs.DenebForkEpoch, + MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlock, + } + } + + for i, blobScheduleEntry := range cs.specs.BlobSchedule { + if blobScheduleEntry.Epoch <= uint64(epoch) { + blobSchedule = &cs.specs.BlobSchedule[i] + } + } + + return blobSchedule +} + func (cs *ChainState) GetForkDigest(forkVersion phase0.Version, blobParams *BlobScheduleEntry) phase0.ForkDigest { if cs.specs == nil || cs.genesis == nil { return phase0.ForkDigest{} @@ -444,6 +472,22 @@ func (cs *ChainState) GetValidatorChurnLimit(validatorCount uint64) uint64 { return adaptable } +func (cs *ChainState) IsEip7732Enabled(epoch phase0.Epoch) bool { + if cs.specs == nil { + return false + } + + return cs.specs.GloasForkEpoch != nil && phase0.Epoch(*cs.specs.GloasForkEpoch) <= epoch +} + +func (cs *ChainState) IsFuluEnabled(epoch phase0.Epoch) bool { + if cs.specs == nil { + return false + } + + return cs.specs.FuluForkEpoch != nil && phase0.Epoch(*cs.specs.FuluForkEpoch) <= epoch +} + func (cs *ChainState) GetBalanceChurnLimit(totalActiveBalance uint64) uint64 { if cs.specs == nil { return 0 diff --git a/clients/consensus/client.go b/clients/consensus/client.go index 234c48eeb..4b790aeb2 100644 --- a/clients/consensus/client.go +++ b/clients/consensus/client.go @@ -5,8 +5,9 @@ import ( "sync" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/consensus/rpc" @@ -23,36 +24,38 @@ type ClientConfig struct { } type Client struct { - pool *Pool - clientIdx uint16 - endpointConfig *ClientConfig - clientCtx context.Context - clientCtxCancel context.CancelFunc - rpcClient *rpc.BeaconClient - logger *logrus.Entry - isOnline bool - isSyncing bool - isOptimistic bool - versionStr string - nodeIdentity *rpc.NodeIdentity - clientType ClientType - lastEvent time.Time - retryCounter uint64 - lastError error - headMutex sync.RWMutex - headRoot phase0.Root - headSlot phase0.Slot - justifiedRoot phase0.Root - justifiedEpoch phase0.Epoch - finalizedRoot phase0.Root - finalizedEpoch phase0.Epoch - lastFinalityUpdateEpoch phase0.Epoch - lastMetadataUpdateEpoch phase0.Epoch - lastMetadataUpdateTime time.Time - lastSyncUpdateEpoch phase0.Epoch - peers []*v1.Peer - streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent] - checkpointDispatcher utils.Dispatcher[*v1.Finality] + pool *Pool + clientIdx uint16 + endpointConfig *ClientConfig + clientCtx context.Context + clientCtxCancel context.CancelFunc + rpcClient *rpc.BeaconClient + logger *logrus.Entry + isOnline bool + isSyncing bool + isOptimistic bool + versionStr string + nodeIdentity *rpc.NodeIdentity + clientType ClientType + lastEvent time.Time + retryCounter uint64 + lastError error + headMutex sync.RWMutex + headRoot phase0.Root + headSlot phase0.Slot + justifiedRoot phase0.Root + justifiedEpoch phase0.Epoch + finalizedRoot phase0.Root + finalizedEpoch phase0.Epoch + lastFinalityUpdateEpoch phase0.Epoch + lastMetadataUpdateEpoch phase0.Epoch + lastMetadataUpdateTime time.Time + lastSyncUpdateEpoch phase0.Epoch + peers []*v1.Peer + streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent] + checkpointDispatcher utils.Dispatcher[*v1.Finality] + executionPayloadDispatcher utils.Dispatcher[*v1.ExecutionPayloadAvailableEvent] + executionPayloadBidDispatcher utils.Dispatcher[*gloas.SignedExecutionPayloadBid] specWarnings []string // warnings from incomplete spec checks specs map[string]interface{} @@ -99,6 +102,14 @@ func (client *Client) SubscribeFinalizedEvent(capacity int) *utils.Subscription[ return client.checkpointDispatcher.Subscribe(capacity, false) } +func (client *Client) SubscribeExecutionPayloadAvailableEvent(capacity int, blocking bool) *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] { + return client.executionPayloadDispatcher.Subscribe(capacity, blocking) +} + +func (client *Client) SubscribeExecutionPayloadBidEvent(capacity int, blocking bool) *utils.Subscription[*gloas.SignedExecutionPayloadBid] { + return client.executionPayloadBidDispatcher.Subscribe(capacity, blocking) +} + func (client *Client) GetPool() *Pool { return client.pool } diff --git a/clients/consensus/clientlogic.go b/clients/consensus/clientlogic.go index 146af4072..edb5c7917 100644 --- a/clients/consensus/clientlogic.go +++ b/clients/consensus/clientlogic.go @@ -7,8 +7,9 @@ import ( "runtime/debug" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/consensus/rpc" @@ -59,7 +60,7 @@ func (client *Client) checkClient() error { err := client.rpcClient.Initialize(ctx) if err != nil { - return fmt.Errorf("initialization of attestantio/go-eth2-client failed: %w", err) + return fmt.Errorf("initialization of ethpandaops/go-eth2-client failed: %w", err) } // update node metadata @@ -133,7 +134,11 @@ func (client *Client) runClientLogic() error { } // start event stream - blockStream := client.rpcClient.NewBlockStream(client.clientCtx, client.logger, rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent) + blockStream := client.rpcClient.NewBlockStream( + client.clientCtx, + client.logger, + rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent|rpc.StreamExecutionPayloadEvent|rpc.StreamExecutionPayloadBidEvent, + ) defer blockStream.Close() // process events @@ -165,6 +170,12 @@ func (client *Client) runClientLogic() error { if err != nil { client.logger.Warnf("failed processing finalized event: %v", err) } + + case rpc.StreamExecutionPayloadEvent: + client.executionPayloadDispatcher.Fire(evt.Data.(*v1.ExecutionPayloadAvailableEvent)) + + case rpc.StreamExecutionPayloadBidEvent: + client.executionPayloadBidDispatcher.Fire(evt.Data.(*gloas.SignedExecutionPayloadBid)) } // fire through stream dispatcher first to preserve SSE ordering diff --git a/clients/consensus/const.go b/clients/consensus/const.go index b36b45ded..16f7b3966 100644 --- a/clients/consensus/const.go +++ b/clients/consensus/const.go @@ -1,5 +1,5 @@ package consensus -import "github.com/attestantio/go-eth2-client/spec/phase0" +import "github.com/ethpandaops/go-eth2-client/spec/phase0" var NullRoot phase0.Root = phase0.Root{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} diff --git a/clients/consensus/pool.go b/clients/consensus/pool.go index eb900ad79..a0616954e 100644 --- a/clients/consensus/pool.go +++ b/clients/consensus/pool.go @@ -6,9 +6,9 @@ import ( "math/rand/v2" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethpandaops/dora/utils" "github.com/ethpandaops/ethwallclock" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" ) diff --git a/clients/consensus/rpc/beaconapi.go b/clients/consensus/rpc/beaconapi.go index 6768091bc..e09a794d0 100644 --- a/clients/consensus/rpc/beaconapi.go +++ b/clients/consensus/rpc/beaconapi.go @@ -12,14 +12,15 @@ import ( "strings" "time" - eth2client "github.com/attestantio/go-eth2-client" - "github.com/attestantio/go-eth2-client/api" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/http" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/capella" - "github.com/attestantio/go-eth2-client/spec/deneb" - "github.com/attestantio/go-eth2-client/spec/phase0" + eth2client "github.com/ethpandaops/go-eth2-client" + "github.com/ethpandaops/go-eth2-client/api" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/http" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/deneb" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/rs/zerolog" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" @@ -406,6 +407,22 @@ func (bc *BeaconClient) GetBlockBodyByBlockroot(ctx context.Context, blockroot p return result.Data, nil } +func (bc *BeaconClient) GetExecutionPayloadByBlockroot(ctx context.Context, blockroot phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + provider, isProvider := bc.clientSvc.(eth2client.ExecutionPayloadProvider) + if !isProvider { + return nil, fmt.Errorf("get execution payload not supported") + } + + result, err := provider.SignedExecutionPayloadEnvelope(ctx, &api.SignedExecutionPayloadEnvelopeOpts{ + Block: fmt.Sprintf("0x%x", blockroot), + }) + if err != nil { + return nil, err + } + + return result.Data, nil +} + func (bc *BeaconClient) GetState(ctx context.Context, stateRef string) (*spec.VersionedBeaconState, error) { provider, isProvider := bc.clientSvc.(eth2client.BeaconStateProvider) if !isProvider { diff --git a/clients/consensus/rpc/beaconstream.go b/clients/consensus/rpc/beaconstream.go index be6fd92c9..5bac7bf3f 100644 --- a/clients/consensus/rpc/beaconstream.go +++ b/clients/consensus/rpc/beaconstream.go @@ -9,17 +9,19 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/donovanhide/eventsource" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/consensus/rpc/eventstream" ) const ( - StreamBlockEvent uint16 = 0x01 - StreamHeadEvent uint16 = 0x02 - StreamFinalizedEvent uint16 = 0x04 + StreamBlockEvent uint16 = 0x01 + StreamHeadEvent uint16 = 0x02 + StreamFinalizedEvent uint16 = 0x04 + StreamExecutionPayloadEvent uint16 = 0x08 + StreamExecutionPayloadBidEvent uint16 = 0x10 ) type BeaconStreamEvent struct { @@ -71,48 +73,127 @@ func (bs *BeaconStream) startStream() { bs.running = false }() - stream := bs.subscribeStream(bs.client.endpoint, bs.events) - if stream != nil { - defer stream.Close() - - for { + // Subscribe to basic events (block, head, finalized_checkpoint) + basicEvents := bs.events & (StreamBlockEvent | StreamHeadEvent | StreamFinalizedEvent) + basicStream := bs.subscribeStream(bs.client.endpoint, basicEvents) + if basicStream == nil { + return + } + defer basicStream.Close() + + // Subscribe to advanced events (execution_payload_available, execution_payload_bid) + // These are in a separate stream because clients may not support them yet, + // and subscribing to unsupported topics can cause the entire subscription to fail. + // Run in a separate goroutine so it doesn't block the basic stream. + advancedEvents := bs.events & (StreamExecutionPayloadEvent | StreamExecutionPayloadBidEvent) + advancedStreamChan := make(chan *eventstream.Stream, 1) + if advancedEvents > 0 { + go func() { + stream := bs.subscribeStream(bs.client.endpoint, advancedEvents) select { + case advancedStreamChan <- stream: case <-bs.ctx.Done(): - return - case evt := <-stream.Events: - switch evt.Event() { - case "block": - bs.processBlockEvent(evt) - case "head": - bs.processHeadEvent(evt) - case "finalized_checkpoint": - bs.processFinalizedEvent(evt) - } - case <-stream.Ready: - bs.ReadyChan <- &BeaconStreamStatus{ - Ready: true, - } - case err := <-stream.Errors: - if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") { - // this seems to be a go bug, silently reconnect to the stream - time.Sleep(10 * time.Millisecond) - stream.RetryNow() - } else { - bs.logger.Warnf("beacon block stream error: %v", err) + if stream != nil { + stream.Close() } + } + }() + } - select { - case bs.ReadyChan <- &BeaconStreamStatus{ - Ready: false, - Error: err, - }: - case <-bs.ctx.Done(): - } + var advancedStream *eventstream.Stream + defer func() { + if advancedStream != nil { + advancedStream.Close() + } + }() + + for { + select { + case <-bs.ctx.Done(): + return + + // Basic stream events + case evt := <-basicStream.Events: + switch evt.Event() { + case "block": + bs.processBlockEvent(evt) + case "head": + bs.processHeadEvent(evt) + case "finalized_checkpoint": + bs.processFinalizedEvent(evt) + } + case <-basicStream.Ready: + bs.ReadyChan <- &BeaconStreamStatus{ + Ready: true, } + case err := <-basicStream.Errors: + bs.handleStreamError(basicStream, err) + + // Advanced stream connection established + case stream := <-advancedStreamChan: + advancedStream = stream + + // Advanced stream events (no Ready/Error forwarding) + case evt := <-bs.getAdvancedStreamEvents(advancedStream): + switch evt.Event() { + case "execution_payload_available": + bs.processExecutionPayloadAvailableEvent(evt) + case "execution_payload_bid": + bs.processExecutionPayloadBidEvent(evt) + } + case <-bs.getAdvancedStreamReady(advancedStream): + // Don't forward ready events from advanced stream + case <-bs.getAdvancedStreamErrors(advancedStream): + // Silently retry - clients may not support these events yet + time.Sleep(10 * time.Millisecond) + advancedStream.RetryNow() } } } +// getAdvancedStreamEvents returns the events channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamEvents(stream *eventstream.Stream) chan eventstream.StreamEvent { + if stream == nil { + return nil + } + return stream.Events +} + +// getAdvancedStreamReady returns the ready channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamReady(stream *eventstream.Stream) chan bool { + if stream == nil { + return nil + } + return stream.Ready +} + +// getAdvancedStreamErrors returns the errors channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamErrors(stream *eventstream.Stream) chan error { + if stream == nil { + return nil + } + return stream.Errors +} + +// handleStreamError handles stream errors and forwards them to the ReadyChan. +func (bs *BeaconStream) handleStreamError(stream *eventstream.Stream, err error) { + if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") { + // this seems to be a go bug, silently reconnect to the stream + time.Sleep(10 * time.Millisecond) + stream.RetryNow() + } else { + bs.logger.Warnf("beacon block stream error: %v", err) + } + + select { + case bs.ReadyChan <- &BeaconStreamStatus{ + Ready: false, + Error: err, + }: + case <-bs.ctx.Done(): + } +} + func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventstream.Stream { var topics strings.Builder @@ -148,6 +229,26 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst topicsCount++ } + if events&StreamExecutionPayloadEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + + fmt.Fprintf(&topics, "execution_payload_available") + + topicsCount++ + } + + if events&StreamExecutionPayloadBidEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + + fmt.Fprintf(&topics, "execution_payload_bid") + + topicsCount++ + } + if topicsCount == 0 { return nil } @@ -179,7 +280,7 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst } } -func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) { +func (bs *BeaconStream) processBlockEvent(evt eventstream.StreamEvent) { var parsed v1.BlockEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -194,7 +295,7 @@ func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) { } } -func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { +func (bs *BeaconStream) processHeadEvent(evt eventstream.StreamEvent) { var parsed v1.HeadEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -210,7 +311,7 @@ func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { } } -func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { +func (bs *BeaconStream) processFinalizedEvent(evt eventstream.StreamEvent) { var parsed v1.FinalizedCheckpointEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -225,6 +326,36 @@ func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { } } +func (bs *BeaconStream) processExecutionPayloadAvailableEvent(evt eventstream.StreamEvent) { + var parsed v1.ExecutionPayloadAvailableEvent + + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + bs.logger.Warnf("beacon block stream failed to decode execution_payload event: %v", err) + return + } + + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamExecutionPayloadEvent, + Data: &parsed, + } +} + +func (bs *BeaconStream) processExecutionPayloadBidEvent(evt eventstream.StreamEvent) { + var parsed gloas.SignedExecutionPayloadBid + + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + bs.logger.Warnf("beacon block stream failed to decode execution_payload_bid event: %v", err) + return + } + + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamExecutionPayloadBidEvent, + Data: &parsed, + } +} + func getRedactedURL(requrl string) string { var logurl string diff --git a/clients/consensus/rpc/syncstatus.go b/clients/consensus/rpc/syncstatus.go index f52052674..9842166e7 100644 --- a/clients/consensus/rpc/syncstatus.go +++ b/clients/consensus/rpc/syncstatus.go @@ -1,6 +1,6 @@ package rpc -import v1 "github.com/attestantio/go-eth2-client/api/v1" +import v1 "github.com/ethpandaops/go-eth2-client/api/v1" type SyncStatus struct { IsSyncing bool diff --git a/cmd/dora-explorer/main.go b/cmd/dora-explorer/main.go index 1e935e885..52c6e0f8c 100644 --- a/cmd/dora-explorer/main.go +++ b/cmd/dora-explorer/main.go @@ -234,6 +234,8 @@ func startFrontend(router *mux.Router) { router.HandleFunc("/validators/submit_withdrawals", handlers.SubmitWithdrawal).Methods("GET") router.HandleFunc("/validator/{idxOrPubKey}", handlers.Validator).Methods("GET") router.HandleFunc("/validator/{index}/slots", handlers.ValidatorSlots).Methods("GET") + router.HandleFunc("/builders", handlers.Builders).Methods("GET") + router.HandleFunc("/builder/{idxOrPubKey}", handlers.BuilderDetail).Methods("GET") if utils.Config.Frontend.Pprof { // add pprof handler diff --git a/cmd/dora-utils/blockdb_sync.go b/cmd/dora-utils/blockdb_sync.go index fdebcb81a..d0a483685 100644 --- a/cmd/dora-utils/blockdb_sync.go +++ b/cmd/dora-utils/blockdb_sync.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" btypes "github.com/ethpandaops/dora/blockdb/types" "github.com/ethpandaops/dora/clients/consensus" @@ -13,6 +12,7 @@ import ( "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/types" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" dynssz "github.com/pk910/dynamic-ssz" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -271,7 +271,7 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs return slotResult{slot: slot, err: fmt.Errorf("failed to marshal block header for slot %d: %v", slot, err), time: time.Since(t1)} } - added, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) { + added, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) { blockBody, err := client.GetRPCClient().GetBlockBodyByBlockroot(ctx, blockHeader.Root) if err != nil { return nil, fmt.Errorf("failed to get block body for slot %d: %v", slot, err) @@ -282,11 +282,29 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs return nil, fmt.Errorf("failed to marshal block body for slot %d: %v", slot, err) } + var payloadVersion uint64 + var payloadBytes []byte + + chainState := pool.GetChainState() + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(phase0.Slot(slot))) { + blockPayload, err := client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, blockHeader.Root) + if err != nil { + return nil, fmt.Errorf("failed to get block execution payload for slot %d: %v", slot, err) + } + + payloadVersion, payloadBytes, err = beacon.MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz, blockPayload, true) + if err != nil { + return nil, fmt.Errorf("failed to marshal block execution payload for slot %d: %v", slot, err) + } + } + return &btypes.BlockData{ - HeaderVersion: 1, - HeaderData: headerBytes, - BodyVersion: version, - BodyData: bodyBytes, + HeaderVersion: 1, + HeaderData: headerBytes, + BodyVersion: version, + BodyData: bodyBytes, + PayloadVersion: payloadVersion, + PayloadData: payloadBytes, }, nil }) if err != nil { diff --git a/db/block_bids.go b/db/block_bids.go new file mode 100644 index 000000000..4e44772d5 --- /dev/null +++ b/db/block_bids.go @@ -0,0 +1,237 @@ +package db + +import ( + "context" + "fmt" + "strings" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/jmoiron/sqlx" +) + +func InsertBids(bids []*dbtypes.BlockBid, tx *sqlx.Tx) error { + var sql strings.Builder + fmt.Fprint(&sql, + EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: "INSERT INTO block_bids ", + dbtypes.DBEngineSqlite: "INSERT OR REPLACE INTO block_bids ", + }), + "(parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment)", + " VALUES ", + ) + argIdx := 0 + fieldCount := 9 + + args := make([]any, len(bids)*fieldCount) + for i, bid := range bids { + if i > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "(") + for f := 0; f < fieldCount; f++ { + if f > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", argIdx+f+1) + } + fmt.Fprintf(&sql, ")") + + args[argIdx+0] = bid.ParentRoot + args[argIdx+1] = bid.ParentHash + args[argIdx+2] = bid.BlockHash + args[argIdx+3] = bid.FeeRecipient + args[argIdx+4] = bid.GasLimit + args[argIdx+5] = bid.BuilderIndex + args[argIdx+6] = bid.Slot + args[argIdx+7] = bid.Value + args[argIdx+8] = bid.ElPayment + argIdx += fieldCount + } + fmt.Fprint(&sql, EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: " ON CONFLICT (parent_root, parent_hash, block_hash, builder_index) DO UPDATE SET " + + "fee_recipient = excluded.fee_recipient, " + + "gas_limit = excluded.gas_limit, " + + "slot = excluded.slot, " + + "value = excluded.value, " + + "el_payment = excluded.el_payment", + dbtypes.DBEngineSqlite: "", + })) + + _, err := tx.Exec(sql.String(), args...) + if err != nil { + return err + } + return nil +} + +func GetBidsForBlockRoot(ctx context.Context, blockRoot []byte) []*dbtypes.BlockBid { + var sql strings.Builder + args := []any{ + blockRoot, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE parent_root = $1 + ORDER BY value DESC + `) + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for block root: %v", err) + return nil + } + return bids +} + +func GetBidsForSlotRange(ctx context.Context, minSlot uint64) []*dbtypes.BlockBid { + var sql strings.Builder + args := []any{ + minSlot, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE slot >= $1 + ORDER BY slot DESC, value DESC + `) + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for slot range: %v", err) + return nil + } + return bids +} + +func DeleteBidsBeforeSlot(minSlot uint64, tx *sqlx.Tx) error { + _, err := tx.Exec(`DELETE FROM block_bids WHERE slot < $1`, minSlot) + return err +} + +// GetBidsByBlockHashes returns bids for multiple block hashes and a specific builder index +// Returns a map keyed by block hash (hex string) for easy lookup +func GetBidsByBlockHashes(ctx context.Context, blockHashes [][]byte, builderIndex uint64) map[string]*dbtypes.BlockBid { + result := make(map[string]*dbtypes.BlockBid, len(blockHashes)) + if len(blockHashes) == 0 { + return result + } + + var sql strings.Builder + args := make([]any, 0, len(blockHashes)+1) + + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE builder_index = $1 AND block_hash IN (`) + + args = append(args, builderIndex) + for i, hash := range blockHashes { + if i > 0 { + fmt.Fprint(&sql, ", ") + } + fmt.Fprintf(&sql, "$%d", i+2) + args = append(args, hash) + } + fmt.Fprint(&sql, ")") + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids by block hashes: %v", err) + return result + } + + for _, bid := range bids { + key := fmt.Sprintf("%x", bid.BlockHash) + result[key] = bid + } + + return result +} + +// GetBidsBySlots returns the highest-value bid for each slot for a specific builder. +// Returns a map keyed by slot number for easy lookup. +func GetBidsBySlots(ctx context.Context, slots []uint64, builderIndex int64) map[uint64]*dbtypes.BlockBid { + result := make(map[uint64]*dbtypes.BlockBid, len(slots)) + if len(slots) == 0 { + return result + } + + var sql strings.Builder + args := make([]any, 0, len(slots)+1) + + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE builder_index = $1 AND slot IN (`) + + args = append(args, builderIndex) + for i, slot := range slots { + if i > 0 { + fmt.Fprint(&sql, ", ") + } + fmt.Fprintf(&sql, "$%d", i+2) + args = append(args, slot) + } + fmt.Fprint(&sql, ") ORDER BY value DESC") + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids by slots: %v", err) + return result + } + + for _, bid := range bids { + // Keep only the highest-value bid per slot + if _, exists := result[bid.Slot]; !exists { + result[bid.Slot] = bid + } + } + + return result +} + +// GetBidsByBuilderIndex returns bids submitted by a specific builder, ordered by slot descending +func GetBidsByBuilderIndex(ctx context.Context, builderIndex uint64, offset uint64, limit uint32) ([]*dbtypes.BlockBid, uint64) { + var sql strings.Builder + args := []any{ + builderIndex, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE builder_index = $1 + ORDER BY slot DESC, value DESC + `) + + if limit > 0 { + fmt.Fprintf(&sql, " LIMIT $%d OFFSET $%d", len(args)+1, len(args)+2) + args = append(args, limit, offset) + } + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for builder index %d: %v", builderIndex, err) + return nil, 0 + } + + // Get total count + var totalCount uint64 + err = ReaderDb.GetContext(ctx, &totalCount, `SELECT COUNT(*) FROM block_bids WHERE builder_index = $1`, builderIndex) + if err != nil { + logger.Errorf("Error while counting bids for builder index %d: %v", builderIndex, err) + return bids, 0 + } + + return bids, totalCount +} diff --git a/db/builders.go b/db/builders.go new file mode 100644 index 000000000..26d67bfa1 --- /dev/null +++ b/db/builders.go @@ -0,0 +1,450 @@ +package db + +import ( + "context" + "fmt" + "strings" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/jmoiron/sqlx" +) + +// InsertBuilder inserts a single builder into the database +func InsertBuilder(builder *dbtypes.Builder, tx *sqlx.Tx) error { + _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (pubkey) DO UPDATE SET + builder_index = excluded.builder_index, + version = excluded.version, + execution_address = excluded.execution_address, + deposit_epoch = excluded.deposit_epoch, + withdrawable_epoch = excluded.withdrawable_epoch, + superseded = excluded.superseded`, + dbtypes.DBEngineSqlite: ` + INSERT OR REPLACE INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES ($1, $2, $3, $4, $5, $6, $7)`, + }), + builder.Pubkey, + builder.BuilderIndex, + builder.Version, + builder.ExecutionAddress, + builder.DepositEpoch, + builder.WithdrawableEpoch, + builder.Superseded) + + if err != nil { + return fmt.Errorf("error inserting builder: %w", err) + } + return nil +} + +// InsertBuilderBatch inserts multiple builders in a batch +func InsertBuilderBatch(builders []*dbtypes.Builder, tx *sqlx.Tx) error { + if len(builders) == 0 { + return nil + } + + valueStrings := make([]string, len(builders)) + valueArgs := make([]any, 0, len(builders)*7) + for i, b := range builders { + valueStrings[i] = fmt.Sprintf("($%v, $%v, $%v, $%v, $%v, $%v, $%v)", + i*7+1, i*7+2, i*7+3, i*7+4, i*7+5, i*7+6, i*7+7) + valueArgs = append(valueArgs, + b.Pubkey, + b.BuilderIndex, + b.Version, + b.ExecutionAddress, + b.DepositEpoch, + b.WithdrawableEpoch, + b.Superseded) + } + + stmt := fmt.Sprintf(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES %s + ON CONFLICT (pubkey) DO UPDATE SET + builder_index = excluded.builder_index, + version = excluded.version, + execution_address = excluded.execution_address, + deposit_epoch = excluded.deposit_epoch, + withdrawable_epoch = excluded.withdrawable_epoch, + superseded = excluded.superseded`, + dbtypes.DBEngineSqlite: ` + INSERT OR REPLACE INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES %s`, + }), strings.Join(valueStrings, ",")) + + _, err := tx.Exec(stmt, valueArgs...) + if err != nil { + return fmt.Errorf("error inserting builder batch: %w", err) + } + + return nil +} + +// GetBuilderByPubkey returns a builder by pubkey (primary key) +func GetBuilderByPubkey(ctx context.Context, pubkey []byte) *dbtypes.Builder { + builder := dbtypes.Builder{} + err := ReaderDb.GetContext(ctx, &builder, ` + SELECT * FROM builders WHERE pubkey = $1 + `, pubkey) + if err != nil { + return nil + } + return &builder +} + +// GetActiveBuilderByIndex returns the active (non-superseded) builder for a given index +func GetActiveBuilderByIndex(ctx context.Context, index uint64) *dbtypes.Builder { + builder := dbtypes.Builder{} + err := ReaderDb.GetContext(ctx, &builder, ` + SELECT * FROM builders WHERE builder_index = $1 AND superseded = false + `, index) + if err != nil { + return nil + } + return &builder +} + +// GetBuildersByIndex returns all builders (including superseded) for a given index +func GetBuildersByIndex(ctx context.Context, index uint64) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders WHERE builder_index = $1 ORDER BY superseded ASC + `, index) + if err != nil { + logger.Errorf("Error while fetching builders by index: %v", err) + return nil + } + return builders +} + +// GetBuilderRange returns builders in a given index range (only active builders) +func GetBuilderRange(ctx context.Context, startIndex uint64, endIndex uint64) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders + WHERE builder_index >= $1 AND builder_index <= $2 AND superseded = false + ORDER BY builder_index ASC + `, startIndex, endIndex) + if err != nil { + logger.Errorf("Error while fetching builder range: %v", err) + return nil + } + return builders +} + +// GetMaxBuilderIndex returns the highest builder index in the database +func GetMaxBuilderIndex(ctx context.Context) (uint64, error) { + var maxIndex uint64 + err := ReaderDb.GetContext(ctx, &maxIndex, "SELECT COALESCE(MAX(builder_index), 0) FROM builders") + if err != nil { + return 0, fmt.Errorf("error getting max builder index: %w", err) + } + return maxIndex, nil +} + +// GetBuilderCount returns the count of builders (optionally only active) +func GetBuilderCount(ctx context.Context, activeOnly bool) (uint64, error) { + var count uint64 + var err error + if activeOnly { + err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders WHERE superseded = false") + } else { + err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders") + } + if err != nil { + return 0, fmt.Errorf("error getting builder count: %w", err) + } + return count, nil +} + +// SetBuilderSuperseded marks a builder as superseded +func SetBuilderSuperseded(pubkey []byte, tx *sqlx.Tx) error { + _, err := tx.Exec(` + UPDATE builders SET superseded = true WHERE pubkey = $1 + `, pubkey) + if err != nil { + return fmt.Errorf("error setting builder superseded: %w", err) + } + return nil +} + +// SetBuildersSuperseded marks multiple builders as superseded in a batch +func SetBuildersSuperseded(pubkeys [][]byte, tx *sqlx.Tx) error { + if len(pubkeys) == 0 { + return nil + } + + var sql strings.Builder + sql.WriteString("UPDATE builders SET superseded = true WHERE pubkey IN (") + + args := make([]any, len(pubkeys)) + for i, pk := range pubkeys { + if i > 0 { + sql.WriteString(", ") + } + fmt.Fprintf(&sql, "$%d", i+1) + args[i] = pk + } + sql.WriteString(")") + + _, err := tx.Exec(sql.String(), args...) + if err != nil { + return fmt.Errorf("error setting builders superseded: %w", err) + } + return nil +} + +// StreamBuildersByPubkeys streams builders by pubkeys in batches +func StreamBuildersByPubkeys(ctx context.Context, pubkeys [][]byte, cb func(builder *dbtypes.Builder) bool) error { + const batchSize = 1000 + + for i := 0; i < len(pubkeys); i += batchSize { + end := min(i+batchSize, len(pubkeys)) + batch := pubkeys[i:end] + + var sql strings.Builder + fmt.Fprintf(&sql, ` + SELECT + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + FROM builders + WHERE pubkey in (`) + + args := make([]any, len(batch)) + for j, pk := range batch { + if j > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", j+1) + args[j] = pk + } + fmt.Fprintf(&sql, ")") + + // Create pubkey map for ordering + pubkeyMap := make(map[string]int, len(batch)) + for pos, pk := range batch { + pubkeyMap[string(pk)] = pos + } + + // Fetch all builders for this batch + builders := make([]*dbtypes.Builder, len(batch)) + rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) + if err != nil { + return fmt.Errorf("error querying builders: %w", err) + } + defer rows.Close() + + for rows.Next() { + builder := &dbtypes.Builder{} + err := rows.Scan( + &builder.Pubkey, + &builder.BuilderIndex, + &builder.Version, + &builder.ExecutionAddress, + &builder.DepositEpoch, + &builder.WithdrawableEpoch, + &builder.Superseded, + ) + if err != nil { + return fmt.Errorf("error scanning builder: %w", err) + } + pos := pubkeyMap[string(builder.Pubkey)] + builders[pos] = builder + } + + if err = rows.Err(); err != nil { + return fmt.Errorf("error iterating rows: %w", err) + } + + // Stream in original order + for _, b := range builders { + if b != nil && !cb(b) { + return nil + } + } + } + + return nil +} + +// GetBuildersByExecutionAddress returns builders with a specific execution address +func GetBuildersByExecutionAddress(ctx context.Context, address []byte) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders WHERE execution_address = $1 ORDER BY builder_index ASC + `, address) + if err != nil { + logger.Errorf("Error while fetching builders by execution address: %v", err) + return nil + } + return builders +} + +// GetBuilderIndexesByFilter returns builder indexes matching a filter +func GetBuilderIndexesByFilter(ctx context.Context, filter dbtypes.BuilderFilter, currentEpoch uint64) ([]uint64, error) { + var sql strings.Builder + args := []interface{}{} + fmt.Fprint(&sql, ` + SELECT + builder_index + FROM builders + `) + + args = buildBuilderFilterSql(filter, currentEpoch, &sql, args) + + switch filter.OrderBy { + case dbtypes.BuilderOrderIndexAsc: + fmt.Fprint(&sql, " ORDER BY builder_index ASC") + case dbtypes.BuilderOrderIndexDesc: + fmt.Fprint(&sql, " ORDER BY builder_index DESC") + case dbtypes.BuilderOrderPubKeyAsc: + fmt.Fprint(&sql, " ORDER BY pubkey ASC") + case dbtypes.BuilderOrderPubKeyDesc: + fmt.Fprint(&sql, " ORDER BY pubkey DESC") + case dbtypes.BuilderOrderDepositEpochAsc: + fmt.Fprint(&sql, " ORDER BY deposit_epoch ASC") + case dbtypes.BuilderOrderDepositEpochDesc: + fmt.Fprint(&sql, " ORDER BY deposit_epoch DESC") + case dbtypes.BuilderOrderWithdrawableEpochAsc: + fmt.Fprint(&sql, " ORDER BY withdrawable_epoch ASC") + case dbtypes.BuilderOrderWithdrawableEpochDesc: + fmt.Fprint(&sql, " ORDER BY withdrawable_epoch DESC") + } + + builderIds := []uint64{} + err := ReaderDb.SelectContext(ctx, &builderIds, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching builders by filter: %v", err) + return nil, err + } + + return builderIds, nil +} + +func buildBuilderFilterSql(filter dbtypes.BuilderFilter, currentEpoch uint64, sql *strings.Builder, args []interface{}) []interface{} { + filterOp := "WHERE" + + if filter.MinIndex != nil { + fmt.Fprintf(sql, " %v builder_index >= $%v", filterOp, len(args)+1) + args = append(args, *filter.MinIndex) + filterOp = "AND" + } + if filter.MaxIndex != nil { + fmt.Fprintf(sql, " %v builder_index <= $%v", filterOp, len(args)+1) + args = append(args, *filter.MaxIndex) + filterOp = "AND" + } + if len(filter.PubKey) > 0 { + fmt.Fprintf(sql, " %v pubkey LIKE $%v", filterOp, len(args)+1) + args = append(args, append(filter.PubKey, '%')) + filterOp = "AND" + } + if len(filter.ExecutionAddress) > 0 { + fmt.Fprintf(sql, " %v execution_address = $%v", filterOp, len(args)+1) + args = append(args, filter.ExecutionAddress) + filterOp = "AND" + } + if len(filter.Status) > 0 { + statusConditions := make([]string, 0, len(filter.Status)) + for _, status := range filter.Status { + switch status { + case dbtypes.BuilderStatusActiveFilter: + statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch > $%v)", len(args)+1)) + args = append(args, ConvertUint64ToInt64(currentEpoch)) + case dbtypes.BuilderStatusExitedFilter: + statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch <= $%v)", len(args)+1)) + args = append(args, ConvertUint64ToInt64(currentEpoch)) + case dbtypes.BuilderStatusSupersededFilter: + statusConditions = append(statusConditions, "superseded = true") + } + } + if len(statusConditions) > 0 { + fmt.Fprintf(sql, " %v (%v)", filterOp, strings.Join(statusConditions, " OR ")) + } + } + + return args +} + +// StreamBuildersByIndexes streams builders by indexes +func StreamBuildersByIndexes(ctx context.Context, indexes []uint64, cb func(builder *dbtypes.Builder) bool) { + const batchSize = 1000 + + for i := 0; i < len(indexes); i += batchSize { + end := min(i+batchSize, len(indexes)) + batch := indexes[i:end] + + var sql strings.Builder + fmt.Fprint(&sql, ` + SELECT + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + FROM builders + WHERE builder_index IN (`) + + args := make([]any, len(batch)) + for j, idx := range batch { + if j > 0 { + fmt.Fprint(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", j+1) + args[j] = idx + } + fmt.Fprint(&sql, ")") + + // Create index map for ordering + indexMap := make(map[uint64]int, len(batch)) + for pos, idx := range batch { + indexMap[idx] = pos + } + + // Fetch all builders for this batch + builders := make([]*dbtypes.Builder, len(batch)) + rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) + if err != nil { + logger.Errorf("Error querying builders: %v", err) + return + } + + for rows.Next() { + builder := &dbtypes.Builder{} + err := rows.Scan( + &builder.Pubkey, + &builder.BuilderIndex, + &builder.Version, + &builder.ExecutionAddress, + &builder.DepositEpoch, + &builder.WithdrawableEpoch, + &builder.Superseded, + ) + if err != nil { + logger.Errorf("Error scanning builder: %v", err) + rows.Close() + return + } + pos := indexMap[builder.BuilderIndex] + builders[pos] = builder + } + rows.Close() + + // Stream in original order + for _, b := range builders { + if b != nil && !cb(b) { + return + } + } + } +} diff --git a/db/deposits.go b/db/deposits.go index c54a2c3a6..9adc88f1d 100644 --- a/db/deposits.go +++ b/db/deposits.go @@ -139,14 +139,18 @@ func GetDepositsFiltered(ctx context.Context, offset uint64, limit uint32, canon } if len(txFilter.WithdrawalAddress) > 0 { + // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit wdcreds1 := make([]byte, 32) wdcreds1[0] = 0x01 copy(wdcreds1[12:], txFilter.WithdrawalAddress) wdcreds2 := make([]byte, 32) wdcreds2[0] = 0x02 copy(wdcreds2[12:], txFilter.WithdrawalAddress) - args = append(args, wdcreds1, wdcreds2) - fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-1, len(args)) + wdcreds3 := make([]byte, 32) + wdcreds3[0] = 0x03 + copy(wdcreds3[12:], txFilter.WithdrawalAddress) + args = append(args, wdcreds1, wdcreds2, wdcreds3) + fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-2, len(args)-1, len(args)) filterOp = "AND" } diff --git a/db/epochs.go b/db/epochs.go index aafce0f59..b003009f6 100644 --- a/db/epochs.go +++ b/db/epochs.go @@ -14,8 +14,8 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error { epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22) + eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23) ON CONFLICT (epoch) DO UPDATE SET validator_count = excluded.validator_count, validator_balance = excluded.validator_balance, @@ -37,18 +37,19 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error { sync_participation = excluded.sync_participation, blob_count = excluded.blob_count, eth_gas_used = excluded.eth_gas_used, - eth_gas_limit = excluded.eth_gas_limit`, + eth_gas_limit = excluded.eth_gas_limit, + payload_count = excluded.payload_count`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO epochs ( epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)`, + eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)`, }), epoch.Epoch, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, - epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit) + epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount) if err != nil { return err } @@ -71,7 +72,7 @@ func GetEpochs(ctx context.Context, firstEpoch uint64, limit uint32) []*dbtypes. epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit + eth_gas_used, eth_gas_limit, payload_count FROM epochs WHERE epoch <= $1 ORDER BY epoch DESC diff --git a/db/orphaned_blocks.go b/db/orphaned_blocks.go index a027b860a..3f6eddd4d 100644 --- a/db/orphaned_blocks.go +++ b/db/orphaned_blocks.go @@ -11,15 +11,15 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan _, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` INSERT INTO orphaned_blocks ( - root, header_ver, header_ssz, block_ver, block_ssz, block_uid - ) VALUES ($1, $2, $3, $4, $5, $6) + root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (root) DO NOTHING`, dbtypes.DBEngineSqlite: ` INSERT OR IGNORE INTO orphaned_blocks ( - root, header_ver, header_ssz, block_ver, block_ssz, block_uid - ) VALUES ($1, $2, $3, $4, $5, $6)`, + root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, }), - block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid) + block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid, block.PayloadVer, block.PayloadSSZ) if err != nil { return err } @@ -29,7 +29,7 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan func GetOrphanedBlock(ctx context.Context, root []byte) *dbtypes.OrphanedBlock { block := dbtypes.OrphanedBlock{} err := ReaderDb.GetContext(ctx, &block, ` - SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid + SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz FROM orphaned_blocks WHERE root = $1 `, root) diff --git a/db/schema/pgsql/20260108202212_epbs-payload.sql b/db/schema/pgsql/20260108202212_epbs-payload.sql new file mode 100644 index 000000000..4a9eaf95c --- /dev/null +++ b/db/schema/pgsql/20260108202212_epbs-payload.sql @@ -0,0 +1,83 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE public."unfinalized_blocks" + ADD COLUMN "payload_ver" int NOT NULL DEFAULT 0, + ADD COLUMN "payload_ssz" bytea NULL; + +ALTER TABLE public."orphaned_blocks" + ADD COLUMN "payload_ver" int NOT NULL DEFAULT 0, + ADD COLUMN "payload_ssz" bytea NULL; + +ALTER TABLE public."slots" + ADD COLUMN "payload_status" smallint NOT NULL DEFAULT 0, + ADD COLUMN "builder_index" bigint NOT NULL DEFAULT -1, + ADD COLUMN "eth_block_parent_hash" bytea NULL; + +CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" + ON public."slots" + ("payload_status" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx" + ON public."slots" + ("eth_block_parent_hash" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "slots_builder_index_idx" + ON public."slots" + ("builder_index" ASC NULLS LAST); + +ALTER TABLE public."epochs" + ADD COLUMN "payload_count" int NOT NULL DEFAULT 0; + +ALTER TABLE public."unfinalized_epochs" + ADD COLUMN "payload_count" int NOT NULL DEFAULT 0; + +CREATE TABLE IF NOT EXISTS public."block_bids" ( + "parent_root" bytea NOT NULL, + "parent_hash" bytea NOT NULL, + "block_hash" bytea NOT NULL, + "fee_recipient" bytea NOT NULL, + "gas_limit" bigint NOT NULL, + "builder_index" bigint NOT NULL, + "slot" bigint NOT NULL, + "value" bigint NOT NULL, + "el_payment" bigint NOT NULL, + CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index) +); + +CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" + ON public."block_bids" + ("parent_root" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" + ON public."block_bids" + ("builder_index" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" + ON public."block_bids" + ("slot" ASC NULLS LAST); + +CREATE TABLE IF NOT EXISTS public."builders" ( + "pubkey" bytea NOT NULL, + "builder_index" bigint NOT NULL, + "version" smallint NOT NULL, + "execution_address" bytea NOT NULL, + "deposit_epoch" bigint NOT NULL, + "withdrawable_epoch" bigint NOT NULL, + "superseded" boolean NOT NULL DEFAULT false, + CONSTRAINT builders_pkey PRIMARY KEY (pubkey) +); + +CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" + ON public."builders" + ("builder_index" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" + ON public."builders" + ("execution_address" ASC NULLS LAST); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd \ No newline at end of file diff --git a/db/schema/pgsql/20260403000000_withdrawal-ref-slot.sql b/db/schema/pgsql/20260403000000_withdrawal-ref-slot.sql new file mode 100644 index 000000000..fb064d32d --- /dev/null +++ b/db/schema/pgsql/20260403000000_withdrawal-ref-slot.sql @@ -0,0 +1,8 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE public."withdrawals" ADD COLUMN IF NOT EXISTS ref_slot BIGINT NULL; +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 1; +-- +goose StatementEnd diff --git a/db/schema/sqlite/20260108202212_epbs-payload.sql b/db/schema/sqlite/20260108202212_epbs-payload.sql new file mode 100644 index 000000000..2bf22624a --- /dev/null +++ b/db/schema/sqlite/20260108202212_epbs-payload.sql @@ -0,0 +1,60 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE "unfinalized_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0; +ALTER TABLE "unfinalized_blocks" ADD "payload_ssz" BLOB NULL; + +ALTER TABLE "orphaned_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0; +ALTER TABLE "orphaned_blocks" ADD "payload_ssz" BLOB NULL; + +ALTER TABLE "slots" ADD "payload_status" smallint NOT NULL DEFAULT 0; +ALTER TABLE "slots" ADD "builder_index" BIGINT NOT NULL DEFAULT -1; +ALTER TABLE "slots" ADD "eth_block_parent_hash" BLOB NULL; + +CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" ON "slots" ("payload_status" ASC); +CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx" ON "slots" ("eth_block_parent_hash" ASC); +CREATE INDEX IF NOT EXISTS "slots_builder_index_idx" ON "slots" ("builder_index" ASC); + +ALTER TABLE "epochs" ADD "payload_count" int NOT NULL DEFAULT 0; + +ALTER TABLE "unfinalized_epochs" ADD "payload_count" int NOT NULL DEFAULT 0; + +CREATE TABLE IF NOT EXISTS "block_bids" ( + "parent_root" BLOB NOT NULL, + "parent_hash" BLOB NOT NULL, + "block_hash" BLOB NOT NULL, + "fee_recipient" BLOB NOT NULL, + "gas_limit" BIGINT NOT NULL, + "builder_index" BIGINT NOT NULL, + "slot" BIGINT NOT NULL, + "value" BIGINT NOT NULL, + "el_payment" BIGINT NOT NULL, + CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index) +); + +CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" ON "block_bids" ("parent_root" ASC); + +CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" ON "block_bids" ("builder_index" ASC); + +CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" ON "block_bids" ("slot" ASC); + +CREATE TABLE IF NOT EXISTS "builders" ( + "pubkey" BLOB NOT NULL, + "builder_index" BIGINT NOT NULL, + "version" SMALLINT NOT NULL, + "execution_address" BLOB NOT NULL, + "deposit_epoch" BIGINT NOT NULL, + "withdrawable_epoch" BIGINT NOT NULL, + "superseded" BOOLEAN NOT NULL DEFAULT false, + PRIMARY KEY (pubkey) +); + +CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" ON "builders" ("builder_index" ASC); + +CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" ON "builders" ("execution_address" ASC); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd \ No newline at end of file diff --git a/db/schema/sqlite/20260403000000_withdrawal-ref-slot.sql b/db/schema/sqlite/20260403000000_withdrawal-ref-slot.sql new file mode 100644 index 000000000..9d9d4a7a5 --- /dev/null +++ b/db/schema/sqlite/20260403000000_withdrawal-ref-slot.sql @@ -0,0 +1,8 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE "withdrawals" ADD COLUMN ref_slot BIGINT NULL; +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 1; +-- +goose StatementEnd diff --git a/db/slots.go b/db/slots.go index d9d689bdd..574d3d56e 100644 --- a/db/slots.go +++ b/db/slots.go @@ -7,8 +7,8 @@ import ( "math" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/mitchellh/mapstructure" ) @@ -20,31 +20,32 @@ func InsertSlot(ctx context.Context, tx *sqlx.Tx, slot *dbtypes.Slot) error { slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34) + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37) ON CONFLICT (slot, root) DO UPDATE SET status = excluded.status, eth_block_extra = excluded.eth_block_extra, eth_block_extra_text = excluded.eth_block_extra_text, - fork_id = excluded.fork_id`, + fork_id = excluded.fork_id, + payload_status = excluded.payload_status`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO slots ( slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)`, + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37)`, }), slot.Slot, slot.Proposer, slot.Status, slot.Root, slot.ParentRoot, slot.StateRoot, slot.Graffiti, slot.GraffitiText, slot.AttestationCount, slot.DepositCount, slot.ExitCount, slot.WithdrawCount, slot.WithdrawAmount, slot.AttesterSlashingCount, slot.ProposerSlashingCount, slot.BLSChangeCount, slot.EthTransactionCount, slot.EthBlockNumber, slot.EthBlockHash, - slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, slot.EthGasUsed, - slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, slot.MaxExecTime, - slot.ExecTimes, slot.BlockUid) + slot.EthBlockParentHash, slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, + slot.EthGasUsed, slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, + slot.MaxExecTime, slot.ExecTimes, slot.BlockUid, slot.PayloadStatus, slot.BuilderIndex) if err != nil { return err } @@ -99,9 +100,9 @@ func GetSlotsRange(ctx context.Context, firstSlot uint64, lastSlot uint64, withM "state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text", "attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count", "proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash", - "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used", - "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times", - "block_uid", + "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", + "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", + "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index", } for _, blockField := range blockFields { fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField) @@ -133,9 +134,9 @@ func GetSlotsByParentRoot(ctx context.Context, parentRoot []byte) []*dbtypes.Slo slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE parent_root = $1 ORDER BY slot DESC @@ -154,9 +155,9 @@ func GetSlotByRoot(ctx context.Context, root []byte) *dbtypes.Slot { root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE root = $1 `, root) @@ -182,9 +183,9 @@ func GetSlotsByRoots(ctx context.Context, roots [][]byte) map[phase0.Root]*dbtyp root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE root IN (%v) ORDER BY slot DESC`, @@ -258,9 +259,9 @@ func GetSlotsByBlockHash(ctx context.Context, blockHash []byte) []*dbtypes.Slot slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE eth_block_hash = $1 ORDER BY slot DESC @@ -320,9 +321,9 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo "state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text", "attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count", "proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash", - "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used", - "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times", - "block_uid", + "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", + "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", + "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index", } for _, blockField := range blockFields { fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField) @@ -477,6 +478,37 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo fmt.Fprintf(&sql, ` AND slots.eth_block_hash = $%v `, argIdx) args = append(args, filter.EthBlockHash) } + if filter.BuilderIndex != nil { + argIdx++ + fmt.Fprintf(&sql, ` AND slots.builder_index = $%v `, argIdx) + args = append(args, *filter.BuilderIndex) + } + + if filter.WithPayloadMask != dbtypes.PayloadStatusMaskAll { + allowedPayloadStatuses := []dbtypes.PayloadStatus{} + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing != 0 { + allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusMissing) + } + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskCanonical != 0 { + allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusCanonical) + } + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskOrphaned != 0 { + allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusOrphaned) + } + + if len(allowedPayloadStatuses) > 0 { + allowedPayloadStatusesPlaceholders := make([]string, len(allowedPayloadStatuses)) + for i, payloadStatus := range allowedPayloadStatuses { + allowedPayloadStatusesPlaceholders[i] = fmt.Sprintf("%v", payloadStatus) + } + fmt.Fprintf(&sql, ` AND slots.payload_status IN (%s) `, strings.Join(allowedPayloadStatusesPlaceholders, ", ")) + } + } + if len(filter.EthBlockParentHash) > 0 { + argIdx++ + fmt.Fprintf(&sql, ` AND slots.eth_block_parent_hash = $%v `, argIdx) + args = append(args, filter.EthBlockParentHash) + } if filter.MinGasUsed != nil { argIdx++ fmt.Fprintf(&sql, ` AND slots.eth_gas_used >= $%v `, argIdx) diff --git a/db/unfinalized_blocks.go b/db/unfinalized_blocks.go index 914173fb5..668797776 100644 --- a/db/unfinalized_blocks.go +++ b/db/unfinalized_blocks.go @@ -13,18 +13,16 @@ func InsertUnfinalizedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Unf _, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` INSERT INTO unfinalized_blocks ( - root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT (root) DO NOTHING`, dbtypes.DBEngineSqlite: ` INSERT OR IGNORE INTO unfinalized_blocks ( - root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`, + root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)`, }), - block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.Status, block.ForkId, block.RecvDelay, block.MinExecTime, block.MaxExecTime, - block.ExecTimes, block.BlockUid, + block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.PayloadVer, block.PayloadSSZ, block.Status, block.ForkId, block.RecvDelay, + block.MinExecTime, block.MaxExecTime, block.ExecTimes, block.BlockUid, ) if err != nil { return err @@ -90,6 +88,14 @@ func UpdateUnfinalizedBlockForkId(ctx context.Context, tx *sqlx.Tx, roots [][]by return nil } +func UpdateUnfinalizedBlockPayload(ctx context.Context, tx *sqlx.Tx, root []byte, payloadVer uint64, payloadSSZ []byte) error { + _, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET payload_ver = $1, payload_ssz = $2 WHERE root = $3`, payloadVer, payloadSSZ, root) + if err != nil { + return err + } + return nil +} + func UpdateUnfinalizedBlockExecutionTimes(ctx context.Context, tx *sqlx.Tx, root []byte, minExecTime uint32, maxExecTime uint32, execTimes []byte) error { _, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET min_exec_time = $1, max_exec_time = $2, exec_times = $3 WHERE root = $4`, minExecTime, maxExecTime, execTimes, root) if err != nil { @@ -141,7 +147,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db var sql strings.Builder args := []any{slot} - fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`) + fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`) rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) if err != nil { @@ -152,7 +158,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db for rows.Next() { block := dbtypes.UnfinalizedBlock{} err := rows.Scan( - &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.Status, &block.ForkId, &block.RecvDelay, + &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.PayloadVer, &block.PayloadSSZ, &block.Status, &block.ForkId, &block.RecvDelay, &block.MinExecTime, &block.MaxExecTime, &block.ExecTimes, &block.BlockUid, ) if err != nil { @@ -165,13 +171,28 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db return nil } -func GetUnfinalizedBlock(ctx context.Context, root []byte) *dbtypes.UnfinalizedBlock { +func GetUnfinalizedBlock(ctx context.Context, root []byte, withHeader bool, withBody bool, withPayload bool) *dbtypes.UnfinalizedBlock { + var sql strings.Builder + fmt.Fprint(&sql, `SELECT root, slot`) + + if withHeader { + fmt.Fprint(&sql, `, header_ver, header_ssz`) + } + + if withBody { + fmt.Fprint(&sql, `, block_ver, block_ssz`) + } + + if withPayload { + fmt.Fprint(&sql, `, payload_ver, payload_ssz`) + } + + fmt.Fprint(&sql, `, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid`) + + fmt.Fprint(&sql, ` FROM unfinalized_blocks WHERE root = $1`) + block := dbtypes.UnfinalizedBlock{} - err := ReaderDb.GetContext(ctx, &block, ` - SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid - FROM unfinalized_blocks - WHERE root = $1 - `, root) + err := ReaderDb.GetContext(ctx, &block, sql.String(), root) if err != nil { logger.Errorf("Error while fetching unfinalized block 0x%x: %v", root, err) return nil diff --git a/db/unfinalized_epochs.go b/db/unfinalized_epochs.go index 960fde0d4..c5a452591 100644 --- a/db/unfinalized_epochs.go +++ b/db/unfinalized_epochs.go @@ -14,8 +14,8 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25) + blob_count, eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26) ON CONFLICT (epoch, dependent_root, epoch_head_root) DO UPDATE SET epoch_head_fork_id = excluded.epoch_head_fork_id, validator_count = excluded.validator_count, @@ -38,19 +38,20 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf sync_participation = excluded.sync_participation, blob_count = excluded.blob_count, eth_gas_used = excluded.eth_gas_used, - eth_gas_limit = excluded.eth_gas_limit`, + eth_gas_limit = excluded.eth_gas_limit, + payload_count = excluded.payload_count`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO unfinalized_epochs ( epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)`, + blob_count, eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)`, }), epoch.Epoch, epoch.DependentRoot, epoch.EpochHeadRoot, epoch.EpochHeadForkId, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, - epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, + epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount, ) if err != nil { return err @@ -64,7 +65,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit + blob_count, eth_gas_used, eth_gas_limit, payload_count FROM unfinalized_epochs WHERE epoch >= $1`, epoch) if err != nil { @@ -78,7 +79,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db &e.Epoch, &e.DependentRoot, &e.EpochHeadRoot, &e.EpochHeadForkId, &e.ValidatorCount, &e.ValidatorBalance, &e.Eligible, &e.VotedTarget, &e.VotedHead, &e.VotedTotal, &e.BlockCount, &e.OrphanedCount, &e.AttestationCount, &e.DepositCount, &e.ExitCount, &e.WithdrawCount, &e.WithdrawAmount, &e.AttesterSlashingCount, &e.ProposerSlashingCount, &e.BLSChangeCount, &e.EthTransactionCount, &e.SyncParticipation, - &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, + &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, &e.PayloadCount, ) if err != nil { logger.Errorf("Error while scanning unfinalized epoch: %v", err) @@ -97,7 +98,7 @@ func GetUnfinalizedEpoch(ctx context.Context, epoch uint64, headRoot []byte) *db epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit + blob_count, eth_gas_used, eth_gas_limit, payload_count FROM unfinalized_epochs WHERE epoch = $1 AND epoch_head_root = $2 `, epoch, headRoot) diff --git a/db/validators.go b/db/validators.go index 0c988b293..ade9bdc7b 100644 --- a/db/validators.go +++ b/db/validators.go @@ -6,8 +6,8 @@ import ( "math" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" ) diff --git a/db/withdrawals.go b/db/withdrawals.go index 3f20995e5..dff8f8a54 100644 --- a/db/withdrawals.go +++ b/db/withdrawals.go @@ -20,11 +20,11 @@ func InsertWithdrawals(ctx context.Context, dbTx *sqlx.Tx, withdrawals []*dbtype dbtypes.DBEnginePgsql: "INSERT INTO withdrawals ", dbtypes.DBEngineSqlite: "INSERT OR REPLACE INTO withdrawals ", }), - "(block_uid, block_idx, type, orphaned, fork_id, validator, account_id, amount)", + "(block_uid, block_idx, type, orphaned, fork_id, validator, account_id, ref_slot, amount)", " VALUES ", ) argIdx := 0 - fieldCount := 8 + fieldCount := 9 args := make([]any, len(withdrawals)*fieldCount) for i, w := range withdrawals { @@ -47,7 +47,8 @@ func InsertWithdrawals(ctx context.Context, dbTx *sqlx.Tx, withdrawals []*dbtype args[argIdx+4] = w.ForkId args[argIdx+5] = w.Validator args[argIdx+6] = w.AccountID - args[argIdx+7] = w.Amount + args[argIdx+7] = w.RefSlot + args[argIdx+8] = w.Amount argIdx += fieldCount } fmt.Fprint(&sql, EngineQuery(map[dbtypes.DBEngineType]string{ @@ -57,6 +58,7 @@ func InsertWithdrawals(ctx context.Context, dbTx *sqlx.Tx, withdrawals []*dbtype " fork_id = excluded.fork_id," + " validator = excluded.validator," + " account_id = excluded.account_id," + + " ref_slot = excluded.ref_slot," + " amount = excluded.amount", dbtypes.DBEngineSqlite: "", })) @@ -71,7 +73,7 @@ func InsertWithdrawals(ctx context.Context, dbTx *sqlx.Tx, withdrawals []*dbtype func GetWithdrawalsByBlockUid(ctx context.Context, blockUid uint64) ([]*dbtypes.Withdrawal, error) { withdrawals := []*dbtypes.Withdrawal{} err := ReaderDb.SelectContext(ctx, &withdrawals, - "SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, amount"+ + "SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, ref_slot, amount"+ " FROM withdrawals WHERE block_uid = $1 ORDER BY block_idx ASC", blockUid) if err != nil { @@ -87,7 +89,7 @@ func GetWithdrawalsFiltered(ctx context.Context, offset uint64, limit uint32, fi fmt.Fprint(&sql, ` WITH cte AS ( SELECT - block_uid, block_idx, type, orphaned, fork_id, validator, account_id, amount + block_uid, block_idx, type, orphaned, fork_id, validator, account_id, ref_slot, amount FROM withdrawals `) @@ -98,9 +100,14 @@ func GetWithdrawalsFiltered(ctx context.Context, offset uint64, limit uint32, fi } filterOp := "WHERE" - if filter.Validator != nil { - args = append(args, *filter.Validator) - fmt.Fprintf(&sql, " %v validator = $%v", filterOp, len(args)) + if filter.MinIndex > 0 { + args = append(args, filter.MinIndex) + fmt.Fprintf(&sql, " %v validator >= $%v", filterOp, len(args)) + filterOp = "AND" + } + if filter.MaxIndex > 0 { + args = append(args, filter.MaxIndex) + fmt.Fprintf(&sql, " %v validator <= $%v", filterOp, len(args)) filterOp = "AND" } if filter.AccountID != nil { @@ -161,6 +168,7 @@ func GetWithdrawalsFiltered(ctx context.Context, offset uint64, limit uint32, fi 0 AS fork_id, 0 AS validator, 0 AS account_id, + null AS ref_slot, 0 AS amount FROM cte UNION ALL SELECT * FROM ( @@ -197,7 +205,7 @@ func GetWithdrawalsByAccountID(ctx context.Context, accountID uint64, offset uin args := []any{accountID} fmt.Fprint(&sql, ` - SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, amount + SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, ref_slot, amount FROM withdrawals WHERE account_id = $1 ORDER BY block_uid DESC NULLS LAST, block_idx ASC @@ -252,7 +260,7 @@ func GetWithdrawalsByBlockUidRange(ctx context.Context, minBlockUid uint64, maxB args := []any{minBlockUid, maxBlockUid} fmt.Fprint(&sql, - "SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, amount"+ + "SELECT block_uid, block_idx, type, orphaned, fork_id, validator, account_id, ref_slot, amount"+ " FROM withdrawals WHERE block_uid >= $1 AND block_uid < $2") if typeFilter != nil { diff --git a/dbtypes/dbtypes.go b/dbtypes/dbtypes.go index 3ee89bca3..21128e0ac 100644 --- a/dbtypes/dbtypes.go +++ b/dbtypes/dbtypes.go @@ -18,6 +18,14 @@ const ( Orphaned ) +type PayloadStatus uint8 + +const ( + PayloadStatusMissing PayloadStatus = iota + PayloadStatusCanonical + PayloadStatusOrphaned +) + type SlotHeader struct { Slot uint64 `db:"slot"` Proposer uint64 `db:"proposer"` @@ -25,40 +33,43 @@ type SlotHeader struct { } type Slot struct { - Slot uint64 `db:"slot"` - Proposer uint64 `db:"proposer"` - Status SlotStatus `db:"status"` - Root []byte `db:"root"` - ParentRoot []byte `db:"parent_root"` - StateRoot []byte `db:"state_root"` - Graffiti []byte `db:"graffiti"` - GraffitiText string `db:"graffiti_text"` - AttestationCount uint64 `db:"attestation_count"` - DepositCount uint64 `db:"deposit_count"` - ExitCount uint64 `db:"exit_count"` - WithdrawCount uint64 `db:"withdraw_count"` - WithdrawAmount uint64 `db:"withdraw_amount"` - AttesterSlashingCount uint64 `db:"attester_slashing_count"` - ProposerSlashingCount uint64 `db:"proposer_slashing_count"` - BLSChangeCount uint64 `db:"bls_change_count"` - EthTransactionCount uint64 `db:"eth_transaction_count"` - BlobCount uint64 `db:"blob_count"` - EthGasUsed uint64 `db:"eth_gas_used"` - EthGasLimit uint64 `db:"eth_gas_limit"` - EthBaseFee uint64 `db:"eth_base_fee"` - EthFeeRecipient []byte `db:"eth_fee_recipient"` - EthBlockNumber *uint64 `db:"eth_block_number"` - EthBlockHash []byte `db:"eth_block_hash"` - EthBlockExtra []byte `db:"eth_block_extra"` - EthBlockExtraText string `db:"eth_block_extra_text"` - SyncParticipation float32 `db:"sync_participation"` - ForkId uint64 `db:"fork_id"` - BlockSize uint64 `db:"block_size"` - RecvDelay int32 `db:"recv_delay"` - MinExecTime uint32 `db:"min_exec_time"` - MaxExecTime uint32 `db:"max_exec_time"` - ExecTimes []byte `db:"exec_times"` - BlockUid uint64 `db:"block_uid"` + Slot uint64 `db:"slot"` + Proposer uint64 `db:"proposer"` + Status SlotStatus `db:"status"` + Root []byte `db:"root"` + ParentRoot []byte `db:"parent_root"` + StateRoot []byte `db:"state_root"` + Graffiti []byte `db:"graffiti"` + GraffitiText string `db:"graffiti_text"` + AttestationCount uint64 `db:"attestation_count"` + DepositCount uint64 `db:"deposit_count"` + ExitCount uint64 `db:"exit_count"` + WithdrawCount uint64 `db:"withdraw_count"` + WithdrawAmount uint64 `db:"withdraw_amount"` + AttesterSlashingCount uint64 `db:"attester_slashing_count"` + ProposerSlashingCount uint64 `db:"proposer_slashing_count"` + BLSChangeCount uint64 `db:"bls_change_count"` + EthTransactionCount uint64 `db:"eth_transaction_count"` + BlobCount uint64 `db:"blob_count"` + EthGasUsed uint64 `db:"eth_gas_used"` + EthGasLimit uint64 `db:"eth_gas_limit"` + EthBaseFee uint64 `db:"eth_base_fee"` + EthFeeRecipient []byte `db:"eth_fee_recipient"` + EthBlockNumber *uint64 `db:"eth_block_number"` + EthBlockHash []byte `db:"eth_block_hash"` + EthBlockParentHash []byte `db:"eth_block_parent_hash"` + EthBlockExtra []byte `db:"eth_block_extra"` + EthBlockExtraText string `db:"eth_block_extra_text"` + SyncParticipation float32 `db:"sync_participation"` + ForkId uint64 `db:"fork_id"` + BlockSize uint64 `db:"block_size"` + RecvDelay int32 `db:"recv_delay"` + MinExecTime uint32 `db:"min_exec_time"` + MaxExecTime uint32 `db:"max_exec_time"` + ExecTimes []byte `db:"exec_times"` + PayloadStatus PayloadStatus `db:"payload_status"` + BlockUid uint64 `db:"block_uid"` + BuilderIndex int64 `db:"builder_index"` // Builder index, -1 for self-built blocks (MaxUint64) } type Epoch struct { @@ -84,15 +95,18 @@ type Epoch struct { EthGasUsed uint64 `db:"eth_gas_used"` EthGasLimit uint64 `db:"eth_gas_limit"` SyncParticipation float32 `db:"sync_participation"` + PayloadCount uint64 `db:"payload_count"` } type OrphanedBlock struct { - Root []byte `db:"root"` - HeaderVer uint64 `db:"header_ver"` - HeaderSSZ []byte `db:"header_ssz"` - BlockVer uint64 `db:"block_ver"` - BlockSSZ []byte `db:"block_ssz"` - BlockUid uint64 `db:"block_uid"` + Root []byte `db:"root"` + HeaderVer uint64 `db:"header_ver"` + HeaderSSZ []byte `db:"header_ssz"` + BlockVer uint64 `db:"block_ver"` + BlockSSZ []byte `db:"block_ssz"` + PayloadVer uint64 `db:"payload_ver"` + PayloadSSZ []byte `db:"payload_ssz"` + BlockUid uint64 `db:"block_uid"` } type SlotAssignment struct { @@ -121,6 +135,8 @@ type UnfinalizedBlock struct { HeaderSSZ []byte `db:"header_ssz"` BlockVer uint64 `db:"block_ver"` BlockSSZ []byte `db:"block_ssz"` + PayloadVer uint64 `db:"payload_ver"` + PayloadSSZ []byte `db:"payload_ssz"` Status UnfinalizedBlockStatus `db:"status"` ForkId uint64 `db:"fork_id"` RecvDelay int32 `db:"recv_delay"` @@ -156,6 +172,7 @@ type UnfinalizedEpoch struct { EthGasUsed uint64 `db:"eth_gas_used"` EthGasLimit uint64 `db:"eth_gas_limit"` SyncParticipation float32 `db:"sync_participation"` + PayloadCount uint64 `db:"payload_count"` } type OrphanedEpoch struct { @@ -551,21 +568,49 @@ type ElTokenTransfer struct { AmountRaw []byte `db:"amount_raw"` } +// ePBS types + +type BlockBid struct { + ParentRoot []byte `db:"parent_root"` + ParentHash []byte `db:"parent_hash"` + BlockHash []byte `db:"block_hash"` + FeeRecipient []byte `db:"fee_recipient"` + GasLimit uint64 `db:"gas_limit"` + BuilderIndex int64 `db:"builder_index"` + Slot uint64 `db:"slot"` + Value uint64 `db:"value"` + ElPayment uint64 `db:"el_payment"` +} + +type Builder struct { + Pubkey []byte `db:"pubkey"` + BuilderIndex uint64 `db:"builder_index"` + Version uint8 `db:"version"` + ExecutionAddress []byte `db:"execution_address"` + DepositEpoch int64 `db:"deposit_epoch"` + WithdrawableEpoch int64 `db:"withdrawable_epoch"` + Superseded bool `db:"superseded"` +} + // Withdrawal types const ( - WithdrawalTypeFullWithdrawal = 1 // Full withdrawal after validator exit - WithdrawalTypeSweepWithdrawal = 2 // Regular scheduled sweep (excess balance) - WithdrawalTypeRequestedWithdrawal = 3 // EIP-7002 requested partial withdrawal + WithdrawalTypeFullWithdrawal = 1 // Full withdrawal after validator exit + WithdrawalTypeSweepWithdrawal = 2 // Regular scheduled sweep (excess balance) + WithdrawalTypeRequestedWithdrawal = 3 // EIP-7002 requested partial withdrawal + WithdrawalTypeBuilderFullWithdrawal = 4 // Builder sweep (full balance withdrawal) + WithdrawalTypeBuilderPayment = 5 // Builder direct payment (payload delivered) + WithdrawalTypeBuilderDelayedPayment = 6 // Builder delayed/quorum payment (missed payload) ) type Withdrawal struct { BlockUid uint64 `db:"block_uid"` BlockIdx int16 `db:"block_idx"` - Type uint8 `db:"type"` // 1=full, 2=sweep, 3=requested + Type uint8 `db:"type"` // 1=full, 2=sweep, 3=requested, 4=builder_payment, 5=builder_full, 6=builder_delayed Orphaned bool `db:"orphaned"` ForkId uint64 `db:"fork_id"` Validator uint64 `db:"validator"` AccountID uint64 `db:"account_id"` Address []byte - Amount uint64 `db:"amount"` // Gwei + RefSlot *uint64 `db:"ref_slot"` // Reference slot (for builder payments: the slot the payment is for) + Amount uint64 `db:"amount"` // Gwei } diff --git a/dbtypes/other.go b/dbtypes/other.go index 082c20c69..b9f054e1a 100644 --- a/dbtypes/other.go +++ b/dbtypes/other.go @@ -1,8 +1,8 @@ package dbtypes import ( - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) type AssignedSlot struct { @@ -43,6 +43,16 @@ type UnfinalizedBlockFilter struct { WithBody bool } +type PayloadStatusMask uint8 + +const ( + PayloadStatusMaskMissing PayloadStatusMask = 0x01 + PayloadStatusMaskCanonical PayloadStatusMask = 0x02 + PayloadStatusMaskOrphaned PayloadStatusMask = 0x04 + + PayloadStatusMaskAll PayloadStatusMask = 0x07 +) + type BlockFilter struct { Graffiti string InvertGraffiti bool @@ -53,6 +63,7 @@ type BlockFilter struct { InvertProposer bool WithOrphaned uint8 WithMissing uint8 + WithPayloadMask PayloadStatusMask MinSyncParticipation *float32 MaxSyncParticipation *float32 MinExecTime *uint32 @@ -67,6 +78,8 @@ type BlockFilter struct { ForkIds []uint64 // Filter by fork IDs EthBlockNumber *uint64 // Filter by EL block number EthBlockHash []byte // Filter by EL block hash + EthBlockParentHash []byte // Filter by EL block parent hash + BuilderIndex *int64 // Filter by builder index (-1 for self-built blocks) MinGasUsed *uint64 // Filter by minimum gas used MaxGasUsed *uint64 // Filter by maximum gas used MinGasLimit *uint64 // Filter by minimum gas limit @@ -222,6 +235,43 @@ type ValidatorFilter struct { Offset uint64 } +// Builder filter types + +type BuilderOrder uint8 + +const ( + BuilderOrderIndexAsc BuilderOrder = iota + BuilderOrderIndexDesc + BuilderOrderPubKeyAsc + BuilderOrderPubKeyDesc + BuilderOrderBalanceAsc + BuilderOrderBalanceDesc + BuilderOrderDepositEpochAsc + BuilderOrderDepositEpochDesc + BuilderOrderWithdrawableEpochAsc + BuilderOrderWithdrawableEpochDesc +) + +type BuilderStatus uint8 + +const ( + BuilderStatusActiveFilter BuilderStatus = iota + BuilderStatusExitedFilter + BuilderStatusSupersededFilter +) + +type BuilderFilter struct { + MinIndex *uint64 + MaxIndex *uint64 + PubKey []byte + ExecutionAddress []byte + Status []BuilderStatus + + OrderBy BuilderOrder + Limit uint64 + Offset uint64 +} + // EL Explorer filters type ElTransactionFilter struct { @@ -270,7 +320,8 @@ type ElTokenTransferFilter struct { } type WithdrawalFilter struct { - Validator *uint64 + MinIndex uint64 + MaxIndex uint64 ValidatorName string AccountID *uint64 Types []uint8 diff --git a/go.mod b/go.mod index fe57d9268..35f064ee6 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,12 @@ go 1.25.1 require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/allegro/bigcache/v3 v3.1.0 - github.com/attestantio/go-eth2-client v0.28.0 github.com/cockroachdb/pebble v1.1.5 github.com/ethereum/go-ethereum v1.17.2 + github.com/ethpandaops/eth-das-guardian v0.1.0 github.com/ethpandaops/ethcore v0.0.0-20260320045412-9cdd5d70a29c github.com/ethpandaops/ethwallclock v0.4.0 + github.com/ethpandaops/go-eth2-client v0.0.1 github.com/glebarez/go-sqlite v1.22.0 github.com/go-redis/redis/v8 v8.11.5 github.com/golang-jwt/jwt/v5 v5.3.1 @@ -22,7 +23,7 @@ require ( github.com/mashingan/smapping v0.1.19 github.com/minio/minio-go/v7 v7.0.100 github.com/mitchellh/mapstructure v1.5.0 - github.com/pk910/dynamic-ssz v1.3.0 + github.com/pk910/dynamic-ssz v1.3.1-0.20260407212738-e97de623fd84 github.com/pressly/goose/v3 v3.27.0 github.com/probe-lab/eth-das-guardian v0.2.2 github.com/protolambda/bls12-381-util v0.1.0 @@ -52,6 +53,7 @@ require ( github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 // indirect github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/VictoriaMetrics/fastcache v1.13.0 // indirect + github.com/attestantio/go-eth2-client v0.27.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/casbin/govaluate v1.10.0 // indirect github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 // indirect diff --git a/go.sum b/go.sum index f13b098a0..7b82f8fc1 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk= github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/attestantio/go-eth2-client v0.28.0 h1:2zIIIMPvSD+g6h3TgVXsoda/Yw3e+wjo1e8CZEanORU= -github.com/attestantio/go-eth2-client v0.28.0/go.mod h1:PO9sHFCq+1RiG+Eh3eOR2GYvYV64Qzg7idM3kLgCs5k= +github.com/attestantio/go-eth2-client v0.27.1 h1:g7bm+gG/p+gfzYdEuxuAepVWYb8EO+2KojV5/Lo2BxM= +github.com/attestantio/go-eth2-client v0.27.1/go.mod h1:fvULSL9WtNskkOB4i+Yyr6BKpNHXvmpGZj9969fCrfY= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -116,10 +116,14 @@ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJ github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-ethereum v1.17.2 h1:ag6geu0kn8Hv5FLKTpH+Hm2DHD+iuFtuqKxEuwUsDOI= github.com/ethereum/go-ethereum v1.17.2/go.mod h1:KHcRXfGOUfUmKg51IhQ0IowiqZ6PqZf08CMtk0g5K1o= +github.com/ethpandaops/eth-das-guardian v0.1.0 h1:pEiRvtzPdF2xMjhiA/i+zmxIKd4uCsOTWxESa55zaJk= +github.com/ethpandaops/eth-das-guardian v0.1.0/go.mod h1:7amdK4bN9N9Zp0b9Y9FcbDm1YrpeGBm8ix8qpiX0WuY= github.com/ethpandaops/ethcore v0.0.0-20260320045412-9cdd5d70a29c h1:uBRIitwcuCJlRGioqm0jQRIojiH8DSyLRFSTCCBxN6o= github.com/ethpandaops/ethcore v0.0.0-20260320045412-9cdd5d70a29c/go.mod h1:QsmYTdesob+vQ6pW4KtRVvxLZUNop3cdtd/DgD30hJU= github.com/ethpandaops/ethwallclock v0.4.0 h1:+sgnhf4pk6hLPukP076VxkiLloE4L0Yk1yat+ZyHh1g= github.com/ethpandaops/ethwallclock v0.4.0/go.mod h1:y0Cu+mhGLlem19vnAV2x0hpFS5KZ7oOi2SWYayv9l24= +github.com/ethpandaops/go-eth2-client v0.0.1 h1:Xifvb7RF24tguA6HxEaE2vIN1BsY44SOSH/B+CBSFPk= +github.com/ethpandaops/go-eth2-client v0.0.1/go.mod h1:9BBd/XIw1egZTkxtFGMvgXnsxX6ypKHKNKD7itqjmNQ= github.com/ferranbt/fastssz v1.0.0 h1:9EXXYsracSqQRBQiHeaVsG/KQeYblPf40hsQPb9Dzk8= github.com/ferranbt/fastssz v1.0.0/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= @@ -555,8 +559,8 @@ github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc= github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8= github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M= github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4= -github.com/pk910/dynamic-ssz v1.3.0 h1:b6v5v3HWAmdxSVKWk4GS7Y/lhURZPAHRQ7EEMUparQk= -github.com/pk910/dynamic-ssz v1.3.0/go.mod h1:NmeFF4jxzVwWC8cnEhUB7xMI++8hd/0OZvZHFrUvFfs= +github.com/pk910/dynamic-ssz v1.3.1-0.20260407212738-e97de623fd84 h1:J3H3PiaO4+ej5HTK/nG/wnAj0jx+Ek2+0s8o+zujI4I= +github.com/pk910/dynamic-ssz v1.3.1-0.20260407212738-e97de623fd84/go.mod h1:NmeFF4jxzVwWC8cnEhUB7xMI++8hd/0OZvZHFrUvFfs= github.com/pk910/hashtree-bindings v0.1.0 h1:w7NyRWFi2OaYEFvo9ADcE/QU6PMuVLl3hBgx92KiH9c= github.com/pk910/hashtree-bindings v0.1.0/go.mod h1:zrWt88783JmhBfcgni6kkIMYRdXTZi/FL//OyI5T/l4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= diff --git a/handlers/address.go b/handlers/address.go index f5eded061..0992d597c 100644 --- a/handlers/address.go +++ b/handlers/address.go @@ -11,8 +11,8 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -878,12 +878,17 @@ func loadWithdrawalsTab(ctx context.Context, pageData *models.AddressPageData, a for _, w := range dbWithdrawals { slot := w.BlockUid >> 16 entry := &models.AddressPageDataWithdrawal{ - BlockUid: w.BlockUid, - BlockTime: chainState.SlotToTime(phase0.Slot(slot)), - Type: w.Type, - Amount: w.Amount, - ValidatorIndex: w.Validator, - ValidatorName: services.GlobalBeaconService.GetValidatorName(w.Validator), + BlockUid: w.BlockUid, + BlockTime: chainState.SlotToTime(phase0.Slot(slot)), + Type: w.Type, + Amount: w.Amount, + ValidatorName: services.GlobalBeaconService.GetValidatorName(w.Validator), + } + if w.Validator&services.BuilderIndexFlag != 0 { + entry.IsBuilder = true + entry.ValidatorIndex = w.Validator &^ services.BuilderIndexFlag + } else { + entry.ValidatorIndex = w.Validator } if blockInfo, ok := blockMap[w.BlockUid]; ok && blockInfo.Block != nil { diff --git a/handlers/api/api_das_guardian.go b/handlers/api/api_das_guardian.go index 8d85bfea1..ab52ca91f 100644 --- a/handlers/api/api_das_guardian.go +++ b/handlers/api/api_das_guardian.go @@ -8,11 +8,11 @@ import ( "net/http" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/utils" - dasguardian "github.com/probe-lab/eth-das-guardian" + dasguardian "github.com/ethpandaops/eth-das-guardian" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/api_das_guardian_mass.go b/handlers/api/api_das_guardian_mass.go index d0afbd0b3..ec84bab86 100644 --- a/handlers/api/api_das_guardian_mass.go +++ b/handlers/api/api_das_guardian_mass.go @@ -9,11 +9,11 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/utils" - dasguardian "github.com/probe-lab/eth-das-guardian" + dasguardian "github.com/ethpandaops/eth-das-guardian" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/consolidation_requests_v1.go b/handlers/api/consolidation_requests_v1.go index 607c3e2c8..bb0de78db 100644 --- a/handlers/api/consolidation_requests_v1.go +++ b/handlers/api/consolidation_requests_v1.go @@ -6,10 +6,10 @@ import ( "net/http" "strconv" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/deposits_included_v1.go b/handlers/api/deposits_included_v1.go index 7da4200a0..18c811d15 100644 --- a/handlers/api/deposits_included_v1.go +++ b/handlers/api/deposits_included_v1.go @@ -7,10 +7,10 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/deposits_queue_v1.go b/handlers/api/deposits_queue_v1.go index 2e7c5676d..754b29687 100644 --- a/handlers/api/deposits_queue_v1.go +++ b/handlers/api/deposits_queue_v1.go @@ -8,12 +8,12 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/deposits_transactions_v1.go b/handlers/api/deposits_transactions_v1.go index fc5bc3922..d147c4983 100644 --- a/handlers/api/deposits_transactions_v1.go +++ b/handlers/api/deposits_transactions_v1.go @@ -7,12 +7,12 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/epoch_v1.go b/handlers/api/epoch_v1.go index 4670be711..b1fe7e095 100644 --- a/handlers/api/epoch_v1.go +++ b/handlers/api/epoch_v1.go @@ -6,8 +6,8 @@ import ( "net/http" "strconv" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/epochs_v1.go b/handlers/api/epochs_v1.go index a0e6a7716..e02871cb5 100644 --- a/handlers/api/epochs_v1.go +++ b/handlers/api/epochs_v1.go @@ -5,9 +5,9 @@ import ( "net/http" "strconv" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/general.go b/handlers/api/general.go index b11b0be4c..2e9de798c 100644 --- a/handlers/api/general.go +++ b/handlers/api/general.go @@ -10,10 +10,10 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/mev_blocks_v1.go b/handlers/api/mev_blocks_v1.go index 5f6102fa4..3bc948e7c 100644 --- a/handlers/api/mev_blocks_v1.go +++ b/handlers/api/mev_blocks_v1.go @@ -7,11 +7,11 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/network_forks_v1.go b/handlers/api/network_forks_v1.go index 7a2c82299..ebafd5ea7 100644 --- a/handlers/api/network_forks_v1.go +++ b/handlers/api/network_forks_v1.go @@ -6,9 +6,9 @@ import ( "net/http" "sort" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -112,7 +112,8 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo { // Helper function to add consensus fork addConsensusFork := func(name string, forkEpoch *uint64, forkVersion phase0.Version) { if forkEpoch != nil && *forkEpoch < uint64(18446744073709551615) { - forkDigest := chainState.GetForkDigest(forkVersion, nil) + blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*forkEpoch)) + forkDigest := chainState.GetForkDigest(forkVersion, blobParams) version := fmt.Sprintf("0x%x", forkVersion) epoch := *forkEpoch forks = append(forks, &APINetworkForkInfo{ @@ -135,6 +136,7 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo { addConsensusFork("Deneb", specs.DenebForkEpoch, specs.DenebForkVersion) addConsensusFork("Electra", specs.ElectraForkEpoch, specs.ElectraForkVersion) addConsensusFork("Fulu", specs.FuluForkEpoch, specs.FuluForkVersion) + addConsensusFork("Gloas", specs.GloasForkEpoch, specs.GloasForkVersion) // Add BPO forks from BLOB_SCHEDULE for i, blobSchedule := range specs.BlobSchedule { diff --git a/handlers/api/network_overview_v1.go b/handlers/api/network_overview_v1.go index e26929e30..582ce75ef 100644 --- a/handlers/api/network_overview_v1.go +++ b/handlers/api/network_overview_v1.go @@ -7,7 +7,6 @@ import ( "net/http" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/clients/execution/rpc" @@ -15,6 +14,7 @@ import ( "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/slashings_v1.go b/handlers/api/slashings_v1.go index d613c3e7c..2b83f6ef9 100644 --- a/handlers/api/slashings_v1.go +++ b/handlers/api/slashings_v1.go @@ -7,10 +7,10 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/slots_v1.go b/handlers/api/slots_v1.go index 0945f0a8a..cc14abed9 100644 --- a/handlers/api/slots_v1.go +++ b/handlers/api/slots_v1.go @@ -8,7 +8,6 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/execution" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" @@ -16,6 +15,7 @@ import ( "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/validator_eth1_v1.go b/handlers/api/validator_eth1_v1.go index 100f460db..013ebd8e2 100644 --- a/handlers/api/validator_eth1_v1.go +++ b/handlers/api/validator_eth1_v1.go @@ -8,10 +8,10 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/validator_names_v1.go b/handlers/api/validator_names_v1.go index 0a09243ed..db09482e4 100644 --- a/handlers/api/validator_names_v1.go +++ b/handlers/api/validator_names_v1.go @@ -9,8 +9,8 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/validators_activity_v1.go b/handlers/api/validators_activity_v1.go index c6fd20be8..48d63370d 100644 --- a/handlers/api/validators_activity_v1.go +++ b/handlers/api/validators_activity_v1.go @@ -10,10 +10,10 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/validators_v1.go b/handlers/api/validators_v1.go index 78ed1e51e..abebf5240 100644 --- a/handlers/api/validators_v1.go +++ b/handlers/api/validators_v1.go @@ -8,9 +8,9 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/voluntary_exits_v1.go b/handlers/api/voluntary_exits_v1.go index 60e53a665..bff45babe 100644 --- a/handlers/api/voluntary_exits_v1.go +++ b/handlers/api/voluntary_exits_v1.go @@ -7,10 +7,10 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/api/withdrawal_requests_v1.go b/handlers/api/withdrawal_requests_v1.go index 6daed46fe..18e133972 100644 --- a/handlers/api/withdrawal_requests_v1.go +++ b/handlers/api/withdrawal_requests_v1.go @@ -6,11 +6,11 @@ import ( "net/http" "strconv" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/blobs.go b/handlers/blobs.go index aca3c16d7..a49d73074 100644 --- a/handlers/blobs.go +++ b/handlers/blobs.go @@ -5,12 +5,12 @@ import ( "net/http" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/blocks.go b/handlers/blocks.go index f6d998425..0fc8a2c54 100644 --- a/handlers/blocks.go +++ b/handlers/blocks.go @@ -11,7 +11,6 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/execution" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" @@ -20,6 +19,7 @@ import ( "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -114,6 +114,7 @@ func buildBlocksPageData(ctx context.Context, firstSlot uint64, pageSize uint64, 17: true, 18: false, 19: false, + 20: false, // Builder (hidden by default) } } @@ -148,6 +149,7 @@ func buildBlocksPageData(ctx context.Context, firstSlot uint64, pageSize uint64, pageData.DisplayBlockSize = displayMap[17] pageData.DisplayRecvDelay = displayMap[18] pageData.DisplayExecTime = displayMap[19] + pageData.DisplayBuilder = displayMap[20] pageData.DisplayColCount = uint64(len(displayMap)) chainState := services.GlobalBeaconService.GetChainState() @@ -298,6 +300,18 @@ func buildBlocksPageData(ctx context.Context, firstSlot uint64, pageSize uint64, } } + // Add builder info + if pageData.DisplayBuilder { + if dbSlot.BuilderIndex == -1 { + slotData.HasBuilder = true + slotData.BuilderIndex = math.MaxUint64 + } else if dbSlot.BuilderIndex >= 0 { + slotData.HasBuilder = true + slotData.BuilderIndex = uint64(dbSlot.BuilderIndex) + slotData.BuilderName = services.GlobalBeaconService.GetValidatorName(uint64(dbSlot.BuilderIndex) | services.BuilderIndexFlag) + } + } + // Add execution times if available if pageData.DisplayExecTime && dbSlot.MinExecTime > 0 && dbSlot.MaxExecTime > 0 { slotData.MinExecTime = dbSlot.MinExecTime diff --git a/handlers/blocks_filtered.go b/handlers/blocks_filtered.go index ddadef785..5838dbd25 100644 --- a/handlers/blocks_filtered.go +++ b/handlers/blocks_filtered.go @@ -8,13 +8,13 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/builder.go b/handlers/builder.go new file mode 100644 index 000000000..2eec185a2 --- /dev/null +++ b/handlers/builder.go @@ -0,0 +1,505 @@ +package handlers + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/indexer/beacon" + "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/dora/templates" + "github.com/ethpandaops/dora/types/models" +) + +// BuilderDetail will return the main "builder" page using a go template +func BuilderDetail(w http.ResponseWriter, r *http.Request) { + var builderTemplateFiles = append(layoutTemplateFiles, + "builder/builder.html", + "builder/recentBlocks.html", + "builder/recentBids.html", + "builder/recentDeposits.html", + "builder/withdrawals.html", + "_shared/txDetailsModal.html", + "_svg/timeline.html", + ) + var notfoundTemplateFiles = append(layoutTemplateFiles, + "builder/notfound.html", + ) + + var pageTemplate = templates.GetTemplate(builderTemplateFiles...) + data := InitPageData(w, r, "builders", "/builder", "Builder", builderTemplateFiles) + + var builder *gloas.Builder + var builderIndex uint64 + var superseded bool + + vars := mux.Vars(r) + idxOrPubKey := strings.Replace(vars["idxOrPubKey"], "0x", "", -1) + builderPubKey, err := hex.DecodeString(idxOrPubKey) + if err != nil || len(builderPubKey) != 48 { + // search by index + idx, err := strconv.ParseUint(vars["idxOrPubKey"], 10, 64) + if err == nil { + builderIndex = idx + builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(idx)) + if builder == nil { + // Try from DB + dbBuilder := db.GetActiveBuilderByIndex(r.Context(), idx) + if dbBuilder != nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + superseded = dbBuilder.Superseded + } + } + } + } else { + // search by pubkey - check cache first (more accurate), then fall back to DB + var pubkey phase0.BLSPubKey + copy(pubkey[:], builderPubKey) + if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(pubkey); found { + idx := uint64(validatorIdx) + if idx&services.BuilderIndexFlag != 0 { + builderIndex = idx &^ services.BuilderIndexFlag + builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + } + } + + if builder == nil { + // Fall back to DB + dbBuilder := db.GetBuilderByPubkey(r.Context(), builderPubKey) + if dbBuilder != nil { + builderIndex = dbBuilder.BuilderIndex + superseded = dbBuilder.Superseded + builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(dbBuilder.BuilderIndex)) + if builder == nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + } + } + } + } + + if builder == nil { + data := InitPageData(w, r, "builders", "/builder", "Builder not found", notfoundTemplateFiles) + w.Header().Set("Content-Type", "text/html") + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", templates.GetTemplate(notfoundTemplateFiles...).ExecuteTemplate(w, "layout", data)) + return + } + + tabView := "blocks" + if r.URL.Query().Has("v") { + tabView = r.URL.Query().Get("v") + } + + var pageError error + pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1) + if pageError == nil { + data.Data, pageError = getBuilderPageData(builderIndex, superseded, tabView) + } + if data.Data == nil { + pageError = errors.New("builder not found") + } + if pageError != nil { + handlePageError(w, r, pageError) + return + } + w.Header().Set("Content-Type", "text/html") + + if r.URL.Query().Has("lazy") { + // return the selected tab content only (lazy loaded) + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "lazyPage", data.Data)) + } else { + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "layout", data)) + } +} + +func getBuilderPageData(builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, error) { + pageData := &models.BuilderPageData{} + pageCacheKey := fmt.Sprintf("builder:%v:%v", builderIndex, tabView) + pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { + pageData, cacheTimeout := buildBuilderPageData(pageCall.CallCtx, builderIndex, superseded, tabView) + pageCall.CacheTimeout = cacheTimeout + return pageData + }) + if pageErr == nil && pageRes != nil { + resData, resOk := pageRes.(*models.BuilderPageData) + if !resOk { + return nil, ErrInvalidPageModel + } + pageData = resData + } + return pageData, pageErr +} + +func buildBuilderPageData(ctx context.Context, builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, time.Duration) { + logrus.Debugf("builder page called: %v", builderIndex) + + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() + currentEpoch := chainState.CurrentEpoch() + + // Get builder data + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + // Try from DB + dbBuilder := db.GetActiveBuilderByIndex(ctx, builderIndex) + if dbBuilder != nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + superseded = dbBuilder.Superseded + } + } + if builder == nil { + return nil, 0 + } + + // Override balance from the latest epoch state (builder cache doesn't track balance changes within epochs). + balances := services.GlobalBeaconService.GetBuilderBalances() + if int(builderIndex) < len(balances) { + builder.Balance = balances[builderIndex] + } + + // Determine state + finalizedEpoch, _ := chainState.GetFinalizedCheckpoint() + state := "Active" + if superseded { + state = "Superseded" + } else if builder.WithdrawableEpoch <= currentEpoch { + state = "Exited" + } else if builder.DepositEpoch > finalizedEpoch { + state = "Pending" + } + + pageData := &models.BuilderPageData{ + CurrentEpoch: uint64(currentEpoch), + Index: builderIndex, + Name: services.GlobalBeaconService.GetValidatorName(builderIndex | services.BuilderIndexFlag), + PublicKey: builder.PublicKey[:], + Balance: uint64(builder.Balance), + ExecutionAddress: builder.ExecutionAddress[:], + Version: builder.Version, + State: state, + IsSuperseded: superseded, + TabView: tabView, + GloasIsActive: specs.GloasForkEpoch != nil && uint64(currentEpoch) >= *specs.GloasForkEpoch, + } + + // Deposit epoch + if builder.DepositEpoch < 18446744073709551615 { + pageData.ShowDeposit = true + pageData.DepositEpoch = uint64(builder.DepositEpoch) + pageData.DepositTs = chainState.EpochToTime(builder.DepositEpoch) + } + + // Withdrawable epoch + if builder.WithdrawableEpoch < 18446744073709551615 { + pageData.ShowWithdrawable = true + pageData.WithdrawableEpoch = uint64(builder.WithdrawableEpoch) + pageData.WithdrawableTs = chainState.EpochToTime(builder.WithdrawableEpoch) + } + + // Check for exit reason if builder has exited or is exiting + if pageData.ShowWithdrawable { + builderIndexWithFlag := builderIndex | services.BuilderIndexFlag + + // Check for voluntary exit + if exits, totalExits := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, &dbtypes.VoluntaryExitFilter{ + MinIndex: builderIndexWithFlag, + MaxIndex: builderIndexWithFlag, + }, 0, 1); totalExits > 0 && len(exits) > 0 { + pageData.ExitReason = "Builder submitted a voluntary exit request" + pageData.ExitReasonVoluntaryExit = true + pageData.ExitReasonSlot = exits[0].SlotNumber + + // Check for EL-triggered withdrawal request (full exit with amount=0) + } else { + zeroAmount := uint64(0) + if withdrawals, totalPendingTxs, totalReqs := services.GlobalBeaconService.GetWithdrawalRequestsByFilter(ctx, &services.CombinedWithdrawalRequestFilter{ + Filter: &dbtypes.WithdrawalRequestFilter{ + PublicKey: builder.PublicKey[:], + MaxAmount: &zeroAmount, + }, + }, 0, 1); totalPendingTxs+totalReqs > 0 && len(withdrawals) > 0 { + withdrawal := withdrawals[0] + pageData.ExitReason = "Builder submitted a full withdrawal request" + pageData.ExitReasonWithdrawal = true + if withdrawal.Request != nil { + pageData.ExitReasonSlot = withdrawal.Request.SlotNumber + } + + if withdrawal.Transaction != nil { + pageData.ExitReasonTxHash = withdrawal.Transaction.TxHash + pageData.ExitReasonTxDetails = &models.BuilderPageDataExitTxDetails{ + BlockNumber: withdrawal.Transaction.BlockNumber, + BlockHash: fmt.Sprintf("%#x", withdrawal.Transaction.BlockRoot), + BlockTime: withdrawal.Transaction.BlockTime, + TxOrigin: common.Address(withdrawal.Transaction.TxSender).Hex(), + TxTarget: common.Address(withdrawal.Transaction.TxTarget).Hex(), + TxHash: fmt.Sprintf("%#x", withdrawal.Transaction.TxHash), + } + } + } + } + } + + // Load tab-specific data + switch tabView { + case "blocks": + pageData.RecentBlocks = buildBuilderRecentBlocks(ctx, builderIndex, chainState) + if len(pageData.RecentBlocks) >= 20 { + pageData.HasMoreBlocks = true + } + case "bids": + pageData.RecentBids = buildBuilderRecentBids(ctx, builderIndex, chainState) + case "deposits": + pageData.RecentDeposits = buildBuilderRecentDeposits(ctx, builder.PublicKey[:], chainState) + case "withdrawals": + builderValidatorIndex := builderIndex | services.BuilderIndexFlag + withdrawalFilter := &dbtypes.WithdrawalFilter{ + MinIndex: builderValidatorIndex, + MaxIndex: builderValidatorIndex, + WithOrphaned: 1, + } + dbWithdrawals, totalRows := services.GlobalBeaconService.GetWithdrawalsByFilter(ctx, withdrawalFilter, 0, 10) + if totalRows > 10 { + pageData.AdditionalWithdrawalCount = totalRows - 10 + } + + // Batch resolve blocks (including ref slot blocks) + blockUids := make([]uint64, 0, len(dbWithdrawals)*2) + blockUidSet := make(map[uint64]bool, len(dbWithdrawals)*2) + for _, w := range dbWithdrawals { + if !blockUidSet[w.BlockUid] { + blockUidSet[w.BlockUid] = true + blockUids = append(blockUids, w.BlockUid) + } + if w.RefSlot != nil && !blockUidSet[*w.RefSlot] { + blockUidSet[*w.RefSlot] = true + blockUids = append(blockUids, *w.RefSlot) + } + } + blockMap := make(map[uint64]*dbtypes.AssignedSlot, len(blockUids)) + if len(blockUids) > 0 { + blockFilter := &dbtypes.BlockFilter{ + BlockUids: blockUids, + WithOrphaned: 1, + } + blocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, 0, uint32(len(blockUids)), 0) + for _, b := range blocks { + if b.Block != nil { + blockMap[b.Block.BlockUid] = b + } + } + } + + for _, w := range dbWithdrawals { + slot := w.BlockUid >> 16 + wd := &models.BuilderPageDataWithdrawal{ + SlotNumber: slot, + Time: chainState.SlotToTime(phase0.Slot(slot)), + Orphaned: w.Orphaned, + Type: w.Type, + Amount: w.Amount, + } + + if blockInfo, ok := blockMap[w.BlockUid]; ok && blockInfo.Block != nil { + wd.BlockRoot = blockInfo.Block.Root + } + + if w.RefSlot != nil { + wd.RefSlot = *w.RefSlot >> 16 + if refBlock, ok := blockMap[*w.RefSlot]; ok && refBlock.Block != nil { + wd.RefSlotRoot = refBlock.Block.Root + } + } + + pageData.Withdrawals = append(pageData.Withdrawals, wd) + } + pageData.WithdrawalCount = uint64(len(pageData.Withdrawals)) + } + + return pageData, 10 * time.Minute +} + +func buildBuilderRecentBlocks(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBlock { + // Filter blocks by builder index using the DB filter + builderIndexInt64 := int64(builderIndex) + filter := &dbtypes.BlockFilter{ + BuilderIndex: &builderIndexInt64, + WithOrphaned: 1, // Include both canonical and orphaned + WithMissing: 0, // Exclude missing blocks + } + + // Get blocks built by this builder via chainservice (cache + DB) + dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, filter, 0, 20, 0) + + validBlocks := make([]*dbtypes.Slot, 0, len(dbBlocks)) + for _, assignedSlot := range dbBlocks { + if assignedSlot.Block == nil { + continue + } + slot := assignedSlot.Block + + // Only include blocks with actual payloads + if slot.PayloadStatus != dbtypes.PayloadStatusCanonical && slot.PayloadStatus != dbtypes.PayloadStatusOrphaned { + continue + } + + if len(slot.EthBlockHash) > 0 { + validBlocks = append(validBlocks, slot) + } + } + + // Look up bids via the indexer's bid accessor (checks in-memory cache first, then DB). + // Bids are keyed by parent block root, so we look up per block and match by block hash + builder. + indexer := services.GlobalBeaconService.GetBeaconIndexer() + + // Build result + blocks := make([]*models.BuilderPageDataBlock, 0, len(validBlocks)) + for _, slot := range validBlocks { + block := &models.BuilderPageDataBlock{ + Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot.Slot))), + Slot: slot.Slot, + Ts: chainState.SlotToTime(phase0.Slot(slot.Slot)), + BlockRoot: slot.Root, + BlockHash: slot.EthBlockHash, + Status: uint16(slot.PayloadStatus), + FeeRecipient: slot.EthFeeRecipient, + GasLimit: slot.EthGasLimit, + } + + // Look up bid by parent root, then match by block hash and builder index + var parentRoot phase0.Root + copy(parentRoot[:], slot.ParentRoot) + bids := indexer.GetBlockBids(parentRoot) + for _, bid := range bids { + if bid.BuilderIndex == builderIndexInt64 && fmt.Sprintf("%x", bid.BlockHash) == fmt.Sprintf("%x", slot.EthBlockHash) { + block.Value = bid.Value + block.ElPayment = bid.ElPayment + break + } + } + + blocks = append(blocks, block) + } + + return blocks +} + +func buildBuilderRecentBids(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBid { + bids, _ := db.GetBidsByBuilderIndex(ctx, builderIndex, 0, 20) + if len(bids) == 0 { + return nil + } + + // Collect block hashes and determine slot range for batch lookup + bidBlockHashes := make(map[string]bool, len(bids)) + var minSlot, maxSlot uint64 + for i, bid := range bids { + bidBlockHashes[fmt.Sprintf("%x", bid.BlockHash)] = true + if i == 0 || bid.Slot > maxSlot { + maxSlot = bid.Slot + } + if i == 0 || bid.Slot < minSlot { + minSlot = bid.Slot + } + } + + // Batch fetch blocks for the slot range via chainservice (covers cache + DB) + canonicalBlockHashes := make(map[string]bool, len(bids)) + builderIndexInt64 := int64(builderIndex) + blockFilter := &dbtypes.BlockFilter{ + BuilderIndex: &builderIndexInt64, + MinSlot: &minSlot, + MaxSlot: &maxSlot, + WithOrphaned: 1, + WithMissing: 0, + } + dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, 0, uint32(len(bids)*2), 0) + for _, assignedSlot := range dbBlocks { + if assignedSlot.Block == nil { + continue + } + hashKey := fmt.Sprintf("%x", assignedSlot.Block.EthBlockHash) + if bidBlockHashes[hashKey] && assignedSlot.Block.PayloadStatus == dbtypes.PayloadStatusCanonical { + canonicalBlockHashes[hashKey] = true + } + } + + result := make([]*models.BuilderPageDataBid, 0, len(bids)) + for _, bid := range bids { + bidData := &models.BuilderPageDataBid{ + Slot: bid.Slot, + Ts: chainState.SlotToTime(phase0.Slot(bid.Slot)), + ParentRoot: bid.ParentRoot, + ParentHash: bid.ParentHash, + BlockHash: bid.BlockHash, + FeeRecipient: bid.FeeRecipient, + GasLimit: bid.GasLimit, + Value: bid.Value, + ElPayment: bid.ElPayment, + IsWinning: canonicalBlockHashes[fmt.Sprintf("%x", bid.BlockHash)], + } + + result = append(result, bidData) + } + + return result +} + +func buildBuilderRecentDeposits(ctx context.Context, pubkey []byte, chainState *consensus.ChainState) []*models.BuilderPageDataDeposit { + result := make([]*models.BuilderPageDataDeposit, 0) + + // Query deposit requests by builder pubkey + depositFilter := &services.CombinedDepositRequestFilter{ + Filter: &dbtypes.DepositTxFilter{ + PublicKey: pubkey, + WithOrphaned: 1, + }, + } + deposits, _ := services.GlobalBeaconService.GetDepositRequestsByFilter(ctx, depositFilter, 0, 20) + for _, deposit := range deposits { + entry := &models.BuilderPageDataDeposit{ + Type: "deposit", + Amount: deposit.Amount(), + DepositorAddress: deposit.SourceAddress(), + } + if deposit.Request != nil { + entry.SlotNumber = deposit.Request.SlotNumber + entry.SlotRoot = deposit.Request.SlotRoot + entry.Time = chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)) + entry.Orphaned = deposit.RequestOrphaned + } else if deposit.Transaction != nil { + entry.Time = chainState.SlotToTime(phase0.Slot(deposit.Transaction.BlockTime)) + } + + // Add transaction details if available + if deposit.Transaction != nil { + entry.HasTransaction = true + entry.TransactionHash = deposit.Transaction.TxHash + entry.TransactionDetails = &models.BuilderPageDataDepositTxDetails{ + BlockNumber: deposit.Transaction.BlockNumber, + BlockHash: fmt.Sprintf("%#x", deposit.Transaction.BlockRoot), + BlockTime: deposit.Transaction.BlockTime, + TxOrigin: common.Address(deposit.Transaction.TxSender).Hex(), + TxTarget: common.Address(deposit.Transaction.TxTarget).Hex(), + TxHash: fmt.Sprintf("%#x", deposit.Transaction.TxHash), + } + } + + result = append(result, entry) + } + + return result +} diff --git a/handlers/builders.go b/handlers/builders.go new file mode 100644 index 000000000..e2d3ce73e --- /dev/null +++ b/handlers/builders.go @@ -0,0 +1,298 @@ +package handlers + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/dora/templates" + "github.com/ethpandaops/dora/types/models" + "github.com/sirupsen/logrus" +) + +// Builders will return the main "builders" page using a go template +func Builders(w http.ResponseWriter, r *http.Request) { + var buildersTemplateFiles = append(layoutTemplateFiles, + "builders/builders.html", + "_svg/professor.html", + ) + + var pageTemplate = templates.GetTemplate(buildersTemplateFiles...) + data := InitPageData(w, r, "builders", "/builders", "Builders", buildersTemplateFiles) + + urlArgs := r.URL.Query() + var pageNumber uint64 = 1 + if urlArgs.Has("p") { + pageNumber, _ = strconv.ParseUint(urlArgs.Get("p"), 10, 64) + } + var pageSize uint64 = 50 + if urlArgs.Has("c") { + pageSize, _ = strconv.ParseUint(urlArgs.Get("c"), 10, 64) + } + if urlArgs.Has("json") && pageSize > 10000 { + pageSize = 10000 + } else if !urlArgs.Has("json") && pageSize > 1000 { + pageSize = 1000 + } + + var filterPubKey string + var filterIndex string + var filterExecutionAddr string + var filterStatus string + if urlArgs.Has("f") { + if urlArgs.Has("f.pubkey") { + filterPubKey = urlArgs.Get("f.pubkey") + } + if urlArgs.Has("f.index") { + filterIndex = urlArgs.Get("f.index") + } + if urlArgs.Has("f.execution_addr") { + filterExecutionAddr = urlArgs.Get("f.execution_addr") + } + if urlArgs.Has("f.status") { + filterStatus = strings.Join(urlArgs["f.status"], ",") + } + } + var sortOrder string + if urlArgs.Has("o") { + sortOrder = urlArgs.Get("o") + } + + var pageError error + pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1) + if pageError == nil { + data.Data, pageError = getBuildersPageData(pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + } + if pageError != nil { + handlePageError(w, r, pageError) + return + } + + if urlArgs.Has("json") { + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(data.Data) + if err != nil { + logrus.WithError(err).Error("error encoding builders data") + http.Error(w, "Internal server error", http.StatusServiceUnavailable) + } + return + } + + w.Header().Set("Content-Type", "text/html") + if handleTemplateError(w, r, "builders.go", "Builders", "", pageTemplate.ExecuteTemplate(w, "layout", data)) != nil { + return // an error has occurred and was processed + } +} + +func getBuildersPageData(pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, error) { + pageData := &models.BuildersPageData{} + pageCacheKey := fmt.Sprintf("builders:%v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { + pageData, cacheTimeout := buildBuildersPageData(pageCall.CallCtx, pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageCall.CacheTimeout = cacheTimeout + return pageData + }) + if pageErr == nil && pageRes != nil { + resData, resOk := pageRes.(*models.BuildersPageData) + if !resOk { + return nil, ErrInvalidPageModel + } + pageData = resData + } + return pageData, pageErr +} + +func buildBuildersPageData(ctx context.Context, pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, time.Duration) { + logrus.Debugf("builders page called: %v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageData := &models.BuildersPageData{} + cacheTime := 10 * time.Minute + + chainState := services.GlobalBeaconService.GetChainState() + + builderFilter := dbtypes.BuilderFilter{ + Limit: pageSize, + Offset: (pageNumber - 1) * pageSize, + } + + filterArgs := url.Values{} + if filterPubKey != "" || filterIndex != "" || filterExecutionAddr != "" || filterStatus != "" { + if filterPubKey != "" { + pageData.FilterPubKey = filterPubKey + filterArgs.Add("f.pubkey", filterPubKey) + filterPubKeyVal, _ := hex.DecodeString(strings.Replace(filterPubKey, "0x", "", -1)) + builderFilter.PubKey = filterPubKeyVal + } + if filterIndex != "" { + pageData.FilterIndex = filterIndex + filterArgs.Add("f.index", filterIndex) + filterIndexVal, _ := strconv.ParseUint(filterIndex, 10, 64) + builderFilter.MinIndex = &filterIndexVal + builderFilter.MaxIndex = &filterIndexVal + } + if filterExecutionAddr != "" { + pageData.FilterExecutionAddr = filterExecutionAddr + filterArgs.Add("f.execution_addr", filterExecutionAddr) + filterExecutionAddrVal, _ := hex.DecodeString(strings.Replace(filterExecutionAddr, "0x", "", -1)) + builderFilter.ExecutionAddress = filterExecutionAddrVal + } + if filterStatus != "" { + pageData.FilterStatus = filterStatus + filterArgs.Add("f.status", filterStatus) + filterStatusVal := strings.Split(filterStatus, ",") + builderFilter.Status = make([]dbtypes.BuilderStatus, 0, len(filterStatusVal)) + for _, status := range filterStatusVal { + switch status { + case "active": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusActiveFilter) + case "exited": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusExitedFilter) + case "superseded": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusSupersededFilter) + } + } + } + } + + // apply sort order + switch sortOrder { + case "index-d": + builderFilter.OrderBy = dbtypes.BuilderOrderIndexDesc + case "pubkey": + builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyAsc + case "pubkey-d": + builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyDesc + case "balance": + builderFilter.OrderBy = dbtypes.BuilderOrderBalanceAsc + case "balance-d": + builderFilter.OrderBy = dbtypes.BuilderOrderBalanceDesc + case "deposit": + builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochAsc + case "deposit-d": + builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochDesc + case "withdrawable": + builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochAsc + case "withdrawable-d": + builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochDesc + default: + builderFilter.OrderBy = dbtypes.BuilderOrderIndexAsc + pageData.IsDefaultSorting = true + sortOrder = "index" + } + pageData.Sorting = sortOrder + + // get latest builder set + builderSetRsp, builderSetLen := services.GlobalBeaconService.GetFilteredBuilderSet(ctx, &builderFilter, true) + if len(builderSetRsp) == 0 { + cacheTime = 5 * time.Minute + } + + currentEpoch := chainState.CurrentEpoch() + finalizedEpoch, _ := chainState.GetFinalizedCheckpoint() + + // get status options + pageData.FilterStatusOpts = []models.BuildersPageDataStatusOption{ + {Status: "active", Count: 0}, + {Status: "exited", Count: 0}, + {Status: "superseded", Count: 0}, + } + + totalPages := builderSetLen / pageSize + if (builderSetLen % pageSize) > 0 { + totalPages++ + } + if pageNumber == 0 { + pageData.IsDefaultPage = true + } else if pageNumber >= totalPages { + if totalPages == 0 { + pageNumber = 0 + } else { + pageNumber = totalPages + } + } + + pageData.PageSize = pageSize + pageData.TotalPages = totalPages + pageData.CurrentPageIndex = pageNumber + if pageNumber > 1 { + pageData.PrevPageIndex = pageNumber - 1 + } + if pageNumber < totalPages { + pageData.NextPageIndex = pageNumber + 1 + } + if totalPages > 1 { + pageData.LastPageIndex = totalPages + } + + // get builders + pageData.Builders = make([]*models.BuildersPageDataBuilder, 0, len(builderSetRsp)) + + for _, builder := range builderSetRsp { + if builder.Builder == nil { + continue + } + + builderData := &models.BuildersPageDataBuilder{ + Index: uint64(builder.Index), + PublicKey: builder.Builder.PublicKey[:], + ExecutionAddress: builder.Builder.ExecutionAddress[:], + Balance: uint64(builder.Builder.Balance), + } + + // Determine state + if builder.Superseded { + builderData.State = "Superseded" + } else if builder.Builder.WithdrawableEpoch <= currentEpoch { + builderData.State = "Exited" + } else if builder.Builder.DepositEpoch > finalizedEpoch { + builderData.State = "Pending" + } else { + builderData.State = "Active" + } + + // Deposit epoch + if builder.Builder.DepositEpoch < 18446744073709551615 { + builderData.ShowDeposit = true + builderData.DepositEpoch = uint64(builder.Builder.DepositEpoch) + builderData.DepositTs = chainState.EpochToTime(builder.Builder.DepositEpoch) + } + + // Withdrawable epoch + if builder.Builder.WithdrawableEpoch < 18446744073709551615 { + builderData.ShowWithdrawable = true + builderData.WithdrawableEpoch = uint64(builder.Builder.WithdrawableEpoch) + builderData.WithdrawableTs = chainState.EpochToTime(builder.Builder.WithdrawableEpoch) + } + + pageData.Builders = append(pageData.Builders, builderData) + } + pageData.BuilderCount = builderSetLen + pageData.FirstBuilder = pageNumber * pageSize + pageData.LastBuilder = pageData.FirstBuilder + uint64(len(pageData.Builders)) + + // Populate UrlParams for page jump functionality + pageData.UrlParams = make(map[string]string) + for key, values := range filterArgs { + if len(values) > 0 { + pageData.UrlParams[key] = values[0] + } + } + pageData.UrlParams["c"] = fmt.Sprintf("%v", pageData.PageSize) + + pageData.FilteredPageLink = fmt.Sprintf("/builders?f&%v&c=%v", filterArgs.Encode(), pageData.PageSize) + + // Sort status options alphabetically + sort.Slice(pageData.FilterStatusOpts, func(a, b int) bool { + return strings.Compare(pageData.FilterStatusOpts[a].Status, pageData.FilterStatusOpts[b].Status) < 0 + }) + + return pageData, cacheTime +} diff --git a/handlers/chain_forks.go b/handlers/chain_forks.go index 6eae95cff..e218fb955 100644 --- a/handlers/chain_forks.go +++ b/handlers/chain_forks.go @@ -9,13 +9,13 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/clients_cl.go b/handlers/clients_cl.go index c4ec6acf7..f31773f7e 100644 --- a/handlers/clients_cl.go +++ b/handlers/clients_cl.go @@ -9,7 +9,6 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/clients/consensus/rpc" @@ -17,6 +16,7 @@ import ( "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" ) diff --git a/handlers/consolidations.go b/handlers/consolidations.go index 36b880569..56c20f358 100644 --- a/handlers/consolidations.go +++ b/handlers/consolidations.go @@ -9,12 +9,12 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/deposits.go b/handlers/deposits.go index d83310b4f..e662b3aa0 100644 --- a/handlers/deposits.go +++ b/handlers/deposits.go @@ -9,8 +9,6 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" @@ -18,6 +16,9 @@ import ( "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -155,6 +156,9 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint // load initiated deposits dbDepositTxs := db.GetDepositTxs(ctx, 0, 20) for _, depositTx := range dbDepositTxs { + // Check if this is a builder deposit (0x03 withdrawal credentials) + isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03 + depositTxData := &models.DepositsPageDataInitiatedDeposit{ Index: depositTx.Index, Address: depositTx.TxSender, @@ -166,12 +170,31 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint Block: depositTx.BlockNumber, Orphaned: depositTx.Orphaned, Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2, + IsBuilder: isBuilder, } validatorIndex, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey)) if !found { depositTxData.ValidatorStatus = "Deposited" depositTxData.ValidatorExists = false + } else if uint64(validatorIndex)&services.BuilderIndexFlag != 0 { + builderIndex := uint64(validatorIndex) &^ services.BuilderIndexFlag + depositTxData.IsBuilder = true + depositTxData.ValidatorExists = true + depositTxData.ValidatorIndex = builderIndex + depositTxData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIndex)) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + depositTxData.ValidatorStatus = "Deposited" + } else { + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + depositTxData.ValidatorStatus = "Exited" + } else { + depositTxData.ValidatorStatus = "Active" + } + } } else { depositTxData.ValidatorExists = true depositTxData.ValidatorIndex = uint64(validatorIndex) @@ -218,15 +241,20 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint dbDeposits, _ := services.GlobalBeaconService.GetDepositRequestsByFilter(ctx, depositFilter, 0, uint32(20)) for _, deposit := range dbDeposits { + // Check if this is a builder deposit (0x03 withdrawal credentials) + wdCreds := deposit.WithdrawalCredentials() + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.DepositsPageDataIncludedDeposit{ PublicKey: deposit.PublicKey(), - Withdrawalcredentials: deposit.WithdrawalCredentials(), + Withdrawalcredentials: wdCreds, Amount: deposit.Amount(), Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)), SlotNumber: deposit.Request.SlotNumber, SlotRoot: deposit.Request.SlotRoot, Orphaned: deposit.RequestOrphaned, DepositorAddress: deposit.SourceAddress(), + IsBuilder: isBuilder, } if deposit.IsQueued { @@ -258,6 +286,24 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint validatorIndex, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(deposit.PublicKey())) if !found { depositData.ValidatorStatus = "Deposited" + } else if uint64(validatorIndex)&services.BuilderIndexFlag != 0 { + builderIndex := uint64(validatorIndex) &^ services.BuilderIndexFlag + depositData.IsBuilder = true + depositData.ValidatorExists = true + depositData.ValidatorIndex = builderIndex + depositData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIndex)) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + depositData.ValidatorStatus = "Deposited" + } else { + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + depositData.ValidatorStatus = "Exited" + } else { + depositData.ValidatorStatus = "Active" + } + } } else { depositData.ValidatorExists = true depositData.ValidatorIndex = uint64(validatorIndex) @@ -318,16 +364,39 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint } for _, queueEntry := range queuedDeposits.Queue[:limit] { + // Check if this is a builder deposit (0x03 withdrawal credentials) + wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:] + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.DepositsPageDataQueuedDeposit{ QueuePosition: queueEntry.QueuePos, EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate), PublicKey: queueEntry.PendingDeposit.Pubkey[:], - Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:], + Withdrawalcredentials: wdCreds, Amount: uint64(queueEntry.PendingDeposit.Amount), + IsBuilder: isBuilder, } if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositData.PublicKey)); !found { depositData.ValidatorStatus = "Deposited" + } else if uint64(validatorIdx)&services.BuilderIndexFlag != 0 { + builderIndex := uint64(validatorIdx) &^ services.BuilderIndexFlag + depositData.IsBuilder = true + depositData.ValidatorExists = true + depositData.ValidatorIndex = builderIndex + depositData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIdx)) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + depositData.ValidatorStatus = "Deposited" + } else { + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + depositData.ValidatorStatus = "Exited" + } else { + depositData.ValidatorStatus = "Active" + } + } } else { depositData.ValidatorExists = true depositData.ValidatorIndex = uint64(validatorIdx) diff --git a/handlers/el_consolidations.go b/handlers/el_consolidations.go index 85f7f7073..6c92d1336 100644 --- a/handlers/el_consolidations.go +++ b/handlers/el_consolidations.go @@ -8,7 +8,6 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/dbtypes" @@ -16,6 +15,7 @@ import ( "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/el_withdrawals.go b/handlers/el_withdrawals.go index 68149dca4..42b130012 100644 --- a/handlers/el_withdrawals.go +++ b/handlers/el_withdrawals.go @@ -8,13 +8,13 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -41,6 +41,7 @@ func ElWithdrawals(w http.ResponseWriter, r *http.Request) { } } + var entity string var minSlot uint64 var maxSlot uint64 var sourceAddr string @@ -52,6 +53,9 @@ func ElWithdrawals(w http.ResponseWriter, r *http.Request) { var pubkey string if urlArgs.Has("f") { + if urlArgs.Has("f.entity") { + entity = urlArgs.Get("f.entity") + } if urlArgs.Has("f.mins") { minSlot, _ = strconv.ParseUint(urlArgs.Get("f.mins"), 10, 64) } @@ -82,10 +86,21 @@ func ElWithdrawals(w http.ResponseWriter, r *http.Request) { } else { withOrphaned = 1 } + + // Apply builder flag to index filters when entity=builder + if entity == "builder" { + if minIndex > 0 { + minIndex |= services.BuilderIndexFlag + } + if maxIndex > 0 { + maxIndex |= services.BuilderIndexFlag + } + } + var pageError error pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 2) if pageError == nil { - data.Data, pageError = getFilteredElWithdrawalsPageData(pageIdx, pageSize, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, uint8(withOrphaned), uint8(withType), pubkey) + data.Data, pageError = getFilteredElWithdrawalsPageData(pageIdx, pageSize, entity, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, uint8(withOrphaned), uint8(withType), pubkey) } if pageError != nil { handlePageError(w, r, pageError) @@ -97,11 +112,11 @@ func ElWithdrawals(w http.ResponseWriter, r *http.Request) { } } -func getFilteredElWithdrawalsPageData(pageIdx uint64, pageSize uint64, minSlot uint64, maxSlot uint64, sourceAddr string, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8, withType uint8, pubkey string) (*models.ElWithdrawalsPageData, error) { +func getFilteredElWithdrawalsPageData(pageIdx uint64, pageSize uint64, entity string, minSlot uint64, maxSlot uint64, sourceAddr string, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8, withType uint8, pubkey string) (*models.ElWithdrawalsPageData, error) { pageData := &models.ElWithdrawalsPageData{} - pageCacheKey := fmt.Sprintf("el_withdrawals:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, withOrphaned, withType, pubkey) + pageCacheKey := fmt.Sprintf("el_withdrawals:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, entity, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, withOrphaned, withType, pubkey) pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { - pageData, cacheTimeout := buildFilteredElWithdrawalsPageData(pageCall.CallCtx, pageIdx, pageSize, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, withOrphaned, withType, pubkey) + pageData, cacheTimeout := buildFilteredElWithdrawalsPageData(pageCall.CallCtx, pageIdx, pageSize, entity, minSlot, maxSlot, sourceAddr, minIndex, maxIndex, vname, withOrphaned, withType, pubkey) pageCall.CacheTimeout = cacheTimeout return pageData }) @@ -115,8 +130,15 @@ func getFilteredElWithdrawalsPageData(pageIdx uint64, pageSize uint64, minSlot u return pageData, pageErr } -func buildFilteredElWithdrawalsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, minSlot uint64, maxSlot uint64, sourceAddr string, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8, withType uint8, pubkey string) (*models.ElWithdrawalsPageData, time.Duration) { +func buildFilteredElWithdrawalsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, entity string, minSlot uint64, maxSlot uint64, sourceAddr string, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8, withType uint8, pubkey string) (*models.ElWithdrawalsPageData, time.Duration) { + if entity == "" { + entity = "all" + } + filterArgs := url.Values{} + if entity != "all" { + filterArgs.Add("f.entity", entity) + } if minSlot != 0 { filterArgs.Add("f.mins", fmt.Sprintf("%v", minSlot)) } @@ -145,12 +167,21 @@ func buildFilteredElWithdrawalsPageData(ctx context.Context, pageIdx uint64, pag filterArgs.Add("f.pubkey", pubkey) } + // Display indices without the builder flag for the filter UI + displayMinIndex := minIndex + displayMaxIndex := maxIndex + if entity == "builder" { + displayMinIndex = minIndex &^ services.BuilderIndexFlag + displayMaxIndex = maxIndex &^ services.BuilderIndexFlag + } + pageData := &models.ElWithdrawalsPageData{ + FilterEntity: entity, FilterAddress: sourceAddr, FilterMinSlot: minSlot, FilterMaxSlot: maxSlot, - FilterMinIndex: minIndex, - FilterMaxIndex: maxIndex, + FilterMinIndex: displayMinIndex, + FilterMaxIndex: displayMaxIndex, FilterValidatorName: vname, FilterWithOrphaned: withOrphaned, FilterWithType: withType, @@ -213,7 +244,12 @@ func buildFilteredElWithdrawalsPageData(ctx context.Context, pageIdx uint64, pag } if validatorIndex := elWithdrawal.ValidatorIndex(); validatorIndex != nil { - elWithdrawalData.ValidatorIndex = *validatorIndex + if *validatorIndex&services.BuilderIndexFlag != 0 { + elWithdrawalData.IsBuilder = true + elWithdrawalData.ValidatorIndex = *validatorIndex &^ services.BuilderIndexFlag + } else { + elWithdrawalData.ValidatorIndex = *validatorIndex + } elWithdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex) elWithdrawalData.ValidatorValid = true } diff --git a/handlers/epoch.go b/handlers/epoch.go index 807975d47..9fb2e9b1d 100644 --- a/handlers/epoch.go +++ b/handlers/epoch.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -91,7 +91,7 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat specs := chainState.GetSpecs() currentSlot := chainState.CurrentSlot() currentEpoch := chainState.EpochOfSlot(currentSlot) - if epoch > uint64(currentEpoch) { + if epoch > uint64(currentEpoch)+1 { return nil, -1 } @@ -105,7 +105,7 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat } nextEpoch := epoch + 1 - if nextEpoch > uint64(currentEpoch) { + if nextEpoch > uint64(currentEpoch)+1 { nextEpoch = 0 } firstSlot := chainState.EpochToSlot(phase0.Epoch(epoch)) @@ -170,12 +170,18 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat pageData.MissedCount++ } + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.EpochPageDataSlot{ Slot: slot, Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), Ts: chainState.SlotToTime(phase0.Slot(slot)), Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing, Status: uint8(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), Proposer: dbSlot.Proposer, ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), AttestationCount: dbSlot.AttestationCount, diff --git a/handlers/epochs.go b/handlers/epochs.go index af56d8ae2..61463bd4f 100644 --- a/handlers/epochs.go +++ b/handlers/epochs.go @@ -8,10 +8,10 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/exits.go b/handlers/exits.go index 82944b078..a512da4a7 100644 --- a/handlers/exits.go +++ b/handlers/exits.go @@ -9,12 +9,13 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -159,43 +160,70 @@ func buildExitsPageData(ctx context.Context, firstEpoch uint64, pageSize uint64, dbVoluntaryExits, _ := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, voluntaryExitFilter, 0, uint32(20)) for _, voluntaryExit := range dbVoluntaryExits { exitData := &models.ExitsPageDataRecentExit{ - SlotNumber: voluntaryExit.SlotNumber, - SlotRoot: voluntaryExit.SlotRoot, - Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), - Orphaned: voluntaryExit.Orphaned, - ValidatorIndex: voluntaryExit.ValidatorIndex, - ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex), + SlotNumber: voluntaryExit.SlotNumber, + SlotRoot: voluntaryExit.SlotRoot, + Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), + Orphaned: voluntaryExit.Orphaned, } - validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) - if validator == nil { - exitData.ValidatorStatus = "Unknown" - } else { - exitData.PublicKey = validator.Validator.PublicKey[:] - exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials - - if strings.HasPrefix(validator.Status.String(), "pending") { - exitData.ValidatorStatus = "Pending" - } else if validator.Status == v1.ValidatorStateActiveOngoing { - exitData.ValidatorStatus = "Active" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveExiting { - exitData.ValidatorStatus = "Exiting" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveSlashed { - exitData.ValidatorStatus = "Slashed" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateExitedUnslashed { - exitData.ValidatorStatus = "Exited" - } else if validator.Status == v1.ValidatorStateExitedSlashed { - exitData.ValidatorStatus = "Slashed" + // Check if this is a builder exit (validator index has BuilderIndexFlag set) + if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 { + builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag + exitData.IsBuilder = true + exitData.ValidatorIndex = builderIndex + + // Resolve builder name via validatornames service (with BuilderIndexFlag) + exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + exitData.ValidatorStatus = "Unknown" } else { - exitData.ValidatorStatus = validator.Status.String() + exitData.PublicKey = builder.PublicKey[:] + + // Determine builder status + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + exitData.ValidatorStatus = "Exited" + } else { + exitData.ValidatorStatus = "Exiting" + } } + } else { + // Regular validator exit + exitData.ValidatorIndex = voluntaryExit.ValidatorIndex + exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) - if exitData.ShowUpcheck { - exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) - exitData.UpcheckMaximum = uint8(3) + validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) + if validator == nil { + exitData.ValidatorStatus = "Unknown" + } else { + exitData.PublicKey = validator.Validator.PublicKey[:] + exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials + + if strings.HasPrefix(validator.Status.String(), "pending") { + exitData.ValidatorStatus = "Pending" + } else if validator.Status == v1.ValidatorStateActiveOngoing { + exitData.ValidatorStatus = "Active" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveExiting { + exitData.ValidatorStatus = "Exiting" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveSlashed { + exitData.ValidatorStatus = "Slashed" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateExitedUnslashed { + exitData.ValidatorStatus = "Exited" + } else if validator.Status == v1.ValidatorStateExitedSlashed { + exitData.ValidatorStatus = "Slashed" + } else { + exitData.ValidatorStatus = validator.Status.String() + } + + if exitData.ShowUpcheck { + exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) + exitData.UpcheckMaximum = uint8(3) + } } } @@ -220,9 +248,14 @@ func buildExitsPageData(ctx context.Context, firstEpoch uint64, pageSize uint64, } if validatorIndex := exitReq.ValidatorIndex(); validatorIndex != nil { - exitReqData.ValidatorIndex = *validatorIndex - exitReqData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex) exitReqData.ValidatorValid = true + exitReqData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex) + if *validatorIndex&services.BuilderIndexFlag != 0 { + exitReqData.IsBuilder = true + exitReqData.ValidatorIndex = *validatorIndex &^ services.BuilderIndexFlag + } else { + exitReqData.ValidatorIndex = *validatorIndex + } } if request := exitReq.Request; request != nil { diff --git a/handlers/included_deposits.go b/handlers/included_deposits.go index 428540fa0..afd89b5ca 100644 --- a/handlers/included_deposits.go +++ b/handlers/included_deposits.go @@ -10,13 +10,13 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -201,15 +201,19 @@ func buildFilteredIncludedDepositsPageData(ctx context.Context, pageIdx uint64, chainState := services.GlobalBeaconService.GetChainState() for _, deposit := range dbDeposits { + wdCreds := deposit.WithdrawalCredentials() + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.IncludedDepositsPageDataDeposit{ PublicKey: deposit.PublicKey(), - Withdrawalcredentials: deposit.WithdrawalCredentials(), + Withdrawalcredentials: wdCreds, Amount: deposit.Amount(), Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)), SlotNumber: deposit.Request.SlotNumber, SlotRoot: deposit.Request.SlotRoot, Orphaned: deposit.RequestOrphaned, DepositorAddress: deposit.SourceAddress(), + IsBuilder: isBuilder, } if deposit.Request != nil { diff --git a/handlers/index.go b/handlers/index.go index 18faa8af6..f84b28738 100644 --- a/handlers/index.go +++ b/handlers/index.go @@ -13,7 +13,6 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" @@ -22,6 +21,7 @@ import ( "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -291,6 +291,19 @@ func buildIndexPageData(ctx context.Context) (*models.IndexPageData, time.Durati ForkDigest: forkDigest[:], }) } + if specs.GloasForkEpoch != nil && *specs.GloasForkEpoch < uint64(18446744073709551615) { + blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*specs.GloasForkEpoch)) + forkDigest := chainState.GetForkDigest(specs.GloasForkVersion, blobParams) + pageData.NetworkForks = append(pageData.NetworkForks, &models.IndexPageDataForks{ + Name: "Gloas", + Epoch: *specs.GloasForkEpoch, + Version: specs.GloasForkVersion[:], + Time: uint64(chainState.EpochToTime(phase0.Epoch(*specs.GloasForkEpoch)).Unix()), + Active: uint64(currentEpoch) >= *specs.GloasForkEpoch, + Type: "consensus", + ForkDigest: forkDigest[:], + }) + } // Add BPO forks from BLOB_SCHEDULE elBlobSchedule := services.GlobalBeaconService.GetExecutionChainState().GetFullBlobSchedule() @@ -426,14 +439,23 @@ func buildIndexPageRecentBlocksData(ctx context.Context, pageData *models.IndexP if blockData == nil { continue } + + epoch := chainState.EpochOfSlot(phase0.Slot(blockData.Slot)) + + payloadStatus := blockData.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + blockModel := &models.IndexPageDataBlocks{ - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(blockData.Slot))), - Slot: blockData.Slot, - Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)), - Proposer: blockData.Proposer, - ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer), - Status: uint64(blockData.Status), - BlockRoot: blockData.Root, + Epoch: uint64(epoch), + Slot: blockData.Slot, + Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)), + Proposer: blockData.Proposer, + ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer), + Status: uint64(blockData.Status), + PayloadStatus: uint8(payloadStatus), + BlockRoot: blockData.Root, } if blockData.EthBlockNumber != nil { blockModel.WithEthBlock = true @@ -471,16 +493,24 @@ func buildIndexPageRecentSlotsData(ctx context.Context, pageData *models.IndexPa dbSlot := dbSlots[dbIdx] dbIdx++ + epoch := chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot)) + + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.IndexPageDataSlots{ - Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))), - Ts: chainState.SlotToTime(phase0.Slot(slot)), - Status: uint64(dbSlot.Status), - Proposer: dbSlot.Proposer, - ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), - BlockRoot: dbSlot.Root, - ParentRoot: dbSlot.ParentRoot, - ForkGraph: make([]*models.IndexPageDataForkGraph, 0), + Slot: slot, + Epoch: uint64(epoch), + Ts: chainState.SlotToTime(phase0.Slot(slot)), + Status: uint64(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), + Proposer: dbSlot.Proposer, + ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), + BlockRoot: dbSlot.Root, + ParentRoot: dbSlot.ParentRoot, + ForkGraph: make([]*models.IndexPageDataForkGraph, 0), } pageData.RecentSlots = append(pageData.RecentSlots, slotData) blockCount++ diff --git a/handlers/initiated_deposits.go b/handlers/initiated_deposits.go index 173d72491..1e365f59a 100644 --- a/handlers/initiated_deposits.go +++ b/handlers/initiated_deposits.go @@ -9,14 +9,14 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -181,6 +181,8 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64, } for _, depositTx := range dbDepositTxs { + isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03 + depositTxData := &models.InitiatedDepositsPageDataDeposit{ Index: depositTx.Index, Address: depositTx.TxSender, @@ -193,6 +195,7 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64, Orphaned: depositTx.Orphaned, Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2, ValidatorStatus: "", + IsBuilder: isBuilder, } if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey)); !found { diff --git a/handlers/mev_blocks.go b/handlers/mev_blocks.go index 539356410..017d6fa05 100644 --- a/handlers/mev_blocks.go +++ b/handlers/mev_blocks.go @@ -8,13 +8,13 @@ import ( "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/pageData.go b/handlers/pageData.go index 7af25ce41..2b9c95240 100644 --- a/handlers/pageData.go +++ b/handlers/pageData.go @@ -90,6 +90,8 @@ func InitPageData(w http.ResponseWriter, r *http.Request, active, path, title st } func createMenuItems(active string) []types.MainMenuItem { + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() hiddenFor := []string{"confirmation", "login", "register"} if utils.SliceContains(hiddenFor, active) { @@ -204,6 +206,19 @@ func createMenuItems(active string) []types.MainMenuItem { Links: validatorMenuLinks, }) + if specs != nil && specs.GloasForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.GloasForkEpoch { + builderMenu := []types.NavigationLink{ + { + Label: "Builders", + Path: "/builders", + Icon: "fa-building", + }, + } + validatorMenu = append(validatorMenu, types.NavigationGroup{ + Links: builderMenu, + }) + } + validatorActionsGroup := types.NavigationGroup{ Links: []types.NavigationLink{ { @@ -219,8 +234,6 @@ func createMenuItems(active string) []types.MainMenuItem { }, } - chainState := services.GlobalBeaconService.GetChainState() - specs := chainState.GetSpecs() if specs != nil && specs.ElectraForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.ElectraForkEpoch { validatorActionsGroup.Links = append( validatorActionsGroup.Links, diff --git a/handlers/queued_consolidations.go b/handlers/queued_consolidations.go index a7c65ccab..9f03d51f3 100644 --- a/handlers/queued_consolidations.go +++ b/handlers/queued_consolidations.go @@ -10,11 +10,11 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" ) diff --git a/handlers/queued_deposits.go b/handlers/queued_deposits.go index 3b9f2ce67..bd71ee482 100644 --- a/handlers/queued_deposits.go +++ b/handlers/queued_deposits.go @@ -10,14 +10,14 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) // QueuedDeposits will return the "queued_deposits" page using a go template @@ -211,12 +211,16 @@ func buildQueuedDepositsPageData(ctx context.Context, pageIdx uint64, pageSize u for i := start; i < end; i++ { queueEntry := filteredQueue[i] + wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:] + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.QueuedDepositsPageDataDeposit{ QueuePosition: queueEntry.QueuePos, EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate), PublicKey: queueEntry.PendingDeposit.Pubkey[:], Amount: uint64(queueEntry.PendingDeposit.Amount), - Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:], + Withdrawalcredentials: wdCreds, + IsBuilder: isBuilder, } // Get validator status if exists diff --git a/handlers/queued_withdrawals.go b/handlers/queued_withdrawals.go index 29c732712..cd6b5ceb8 100644 --- a/handlers/queued_withdrawals.go +++ b/handlers/queued_withdrawals.go @@ -9,11 +9,11 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" ) diff --git a/handlers/search.go b/handlers/search.go index a0e9e7818..f941084d1 100644 --- a/handlers/search.go +++ b/handlers/search.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -92,9 +92,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } blockResult := &dbtypes.SearchBlockResult{} - err = db.ReaderDb.Get(blockResult, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.GetContext(ctx, blockResult, ` + SELECT slot, root, status + FROM slots WHERE slot = $1 AND status != 0 LIMIT 1`, searchQuery) if err == nil { @@ -128,9 +128,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR blockHash, err := hex.DecodeString(hashQuery) if err == nil { blockResult := &dbtypes.SearchBlockResult{} - err = db.ReaderDb.Get(blockResult, ` - SELECT slot, root, orphaned - FROM slots + err = db.ReaderDb.GetContext(ctx, blockResult, ` + SELECT slot, root, orphaned + FROM slots WHERE root = $1 OR state_root = $1 LIMIT 1`, blockHash) @@ -149,7 +149,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } names := &dbtypes.SearchNameResult{} - err = db.ReaderDb.Get(names, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.GetContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT name FROM validator_names @@ -166,7 +166,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } graffiti := &dbtypes.SearchGraffitiResult{} - err = db.ReaderDb.Get(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.GetContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT graffiti FROM slots @@ -261,7 +261,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se switch searchType { case "epochs": dbres := &dbtypes.SearchAheadEpochsResult{} - err = db.ReaderDb.Select(dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%") + err = db.ReaderDb.SelectContext(ctx, dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%") if err == nil { model := make([]models.SearchAheadEpochsResult, len(*dbres)) for idx, entry := range *dbres { @@ -299,9 +299,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } } else { dbres := &dbtypes.SearchAheadSlotsResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, status + FROM slots WHERE slot < $1 AND (root = $2 OR state_root = $2) ORDER BY slot LIMIT 1`, minSlotIdx, blockHash) if err != nil { @@ -335,9 +335,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadSlotsResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, status + FROM slots WHERE slot = $1 AND status != 0 ORDER BY slot LIMIT 10`, blockNumber) if err == nil { @@ -383,9 +383,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadExecBlocksResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, eth_block_hash, eth_block_number, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, eth_block_hash, eth_block_number, status + FROM slots WHERE slot < $1 AND eth_block_hash = $2 ORDER BY slot LIMIT 10`, minSlotIdx, blockHash) if err != nil { @@ -422,9 +422,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadExecBlocksResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, eth_block_hash, eth_block_number, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, eth_block_hash, eth_block_number, status + FROM slots WHERE slot < $1 AND eth_block_number = $2 ORDER BY slot LIMIT 10`, minSlotIdx, blockNumber) if err == nil { @@ -444,7 +444,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } case "graffiti": graffiti := &dbtypes.SearchAheadGraffitiResult{} - err = db.ReaderDb.Select(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT graffiti, count(*) as count FROM slots @@ -472,7 +472,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } case "valname": names := &dbtypes.SearchAheadValidatorNameResult{} - err = db.ReaderDb.Select(names, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT name, count(*) as count FROM validator_names @@ -523,7 +523,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) <= 96 { // Search by pubkey prefix validators := &dbtypes.SearchAheadValidatorResult{} - err = db.ReaderDb.Select(validators, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, validators, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT v.validator_index, v.pubkey FROM validators v @@ -573,7 +573,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) < 40 { // Search by address prefix in DB addresses := &dbtypes.SearchAheadAddressResult{} - err = db.ReaderDb.Select(addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT address, is_contract FROM el_accounts @@ -626,7 +626,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) < 64 { // Search by transaction hash prefix in DB transactions := &dbtypes.SearchAheadTransactionResult{} - err = db.ReaderDb.Select(transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT DISTINCT ON (tx_hash) tx_hash, block_number, reverted FROM el_transactions diff --git a/handlers/slashings.go b/handlers/slashings.go index 48ab4e1c0..9e4fa322f 100644 --- a/handlers/slashings.go +++ b/handlers/slashings.go @@ -9,12 +9,12 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/slot.go b/handlers/slot.go index a99a4c3c9..b12938eea 100644 --- a/handlers/slot.go +++ b/handlers/slot.go @@ -1,6 +1,7 @@ package handlers import ( + "bytes" "context" "encoding/hex" "encoding/json" @@ -8,15 +9,16 @@ import ( "math" "math/big" "net/http" + "slices" "strconv" "strings" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/bellatrix" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/bellatrix" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -46,6 +48,8 @@ func Slot(w http.ResponseWriter, r *http.Request) { "slot/deposit_requests.html", "slot/withdrawal_requests.html", "slot/consolidation_requests.html", + "slot/bids.html", + "slot/ptc_votes.html", ) var notfoundTemplateFiles = append(layoutTemplateFiles, "slot/notfound.html", @@ -530,6 +534,11 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock includedValidators := []uint64{} attEpochStatsValues := assignmentsMap[attEpoch] + if attVersioned.Version >= spec.DataVersionGloas { + payloadStatus := uint64(attData.Index) + attPageData.PayloadStatus = &payloadStatus + } + if attVersioned.Version >= spec.DataVersionElectra { // EIP-7549 attestation attAssignments = []uint64{} @@ -614,9 +623,16 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock pageData.VoluntaryExits = make([]*models.SlotPageVoluntaryExit, pageData.VoluntaryExitsCount) for i, exit := range voluntaryExits { + validatorIndex := uint64(exit.Message.ValidatorIndex) + isBuilder := validatorIndex&services.BuilderIndexFlag != 0 + displayIndex := validatorIndex + if isBuilder { + displayIndex = validatorIndex &^ services.BuilderIndexFlag + } pageData.VoluntaryExits[i] = &models.SlotPageVoluntaryExit{ - ValidatorIndex: uint64(exit.Message.ValidatorIndex), - ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(exit.Message.ValidatorIndex)), + ValidatorIndex: displayIndex, + ValidatorName: services.GlobalBeaconService.GetValidatorName(validatorIndex), + IsBuilder: isBuilder, Epoch: uint64(exit.Message.Epoch), Signature: exit.Signature[:], } @@ -732,7 +748,73 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock pageData.SyncAggParticipation = utils.SyncCommitteeParticipation(pageData.SyncAggregateBits, specs.SyncCommitteeSize) } - if executionPayload, _ := blockData.Block.ExecutionPayload(); executionPayload != nil { + if payloadBid, err := blockData.Block.SignedExecutionPayloadBid(); err == nil { + blobKzgCommitments, _ := payloadBid.BlobKZGCommitments() + parentBlockHash, _ := payloadBid.ParentBlockHash() + parentBlockRoot, _ := payloadBid.ParentBlockRoot() + blockHash, _ := payloadBid.BlockHash() + gasLimit, _ := payloadBid.GasLimit() + builderIndex, _ := payloadBid.BuilderIndex() + slot, _ := payloadBid.Slot() + value, _ := payloadBid.Value() + signature, _ := payloadBid.Signature() + + commitments := make([][]byte, len(blobKzgCommitments)) + for i := range blobKzgCommitments { + commitments[i] = blobKzgCommitments[i][:] + } + + pageData.PayloadHeader = &models.SlotPagePayloadHeader{ + PayloadStatus: uint16(0), + ParentBlockHash: parentBlockHash[:], + ParentBlockRoot: parentBlockRoot[:], + BlockHash: blockHash[:], + GasLimit: uint64(gasLimit), + BuilderIndex: uint64(builderIndex), + BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(builderIndex) | services.BuilderIndexFlag), + Slot: uint64(slot), + Value: uint64(value), + BlobKZGCommitments: commitments, + Signature: signature[:], + } + } + + var executionPayload *spec.VersionedExecutionPayload + if blockData.Block.Version >= spec.DataVersionGloas && blockData.Payload != nil { + executionPayload = &spec.VersionedExecutionPayload{ + Version: spec.DataVersionGloas, + Gloas: blockData.Payload.Message.Payload, + } + + // Determine payload status by checking if any canonical child + // builds on this block's execution payload. + pageData.PayloadHeader.PayloadStatus = uint16(dbtypes.PayloadStatusCanonical) + childSlots := services.GlobalBeaconService.GetDbBlocksByParentRoot(ctx, blockData.Root) + hasCanonicalChild := false + payloadIncluded := false + + for _, child := range childSlots { + if child.Status != dbtypes.Canonical { + continue + } + + hasCanonicalChild = true + + if bytes.Equal(child.EthBlockParentHash, pageData.PayloadHeader.BlockHash) { + payloadIncluded = true + + break + } + } + + if hasCanonicalChild && !payloadIncluded { + pageData.PayloadHeader.PayloadStatus = uint16(dbtypes.PayloadStatusOrphaned) + } + } else { + executionPayload, _ = blockData.Block.ExecutionPayload() + } + + if executionPayload != nil { pageData.ExecutionData = &models.SlotPageExecutionData{} if parentHash, err := executionPayload.ParentHash(); err == nil { @@ -834,6 +916,36 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock } } + if specs.DenebForkEpoch != nil && uint64(epoch) >= *specs.DenebForkEpoch { + pageData.BlobsCount = uint64(len(blobKzgCommitments)) + pageData.Blobs = make([]*models.SlotPageBlob, pageData.BlobsCount) + for i := range blobKzgCommitments { + blobData := &models.SlotPageBlob{ + Index: uint64(i), + KzgCommitment: blobKzgCommitments[i][:], + } + pageData.Blobs[i] = blobData + } + } + + if specs.ElectraForkEpoch != nil && uint64(epoch) >= *specs.ElectraForkEpoch { + var requests *electra.ExecutionRequests + if blockData.Block.Version >= spec.DataVersionGloas { + if blockData.Payload != nil { + requests = blockData.Payload.Message.ExecutionRequests + executionWithdrawals = blockData.Payload.Message.Payload.Withdrawals + } + } else { + requests, _ = blockData.Block.ExecutionRequests() + } + + if requests != nil { + getSlotPageDepositRequests(pageData, requests.Deposits) + getSlotPageWithdrawalRequests(pageData, requests.Withdrawals) + getSlotPageConsolidationRequests(pageData, requests.Consolidations) + } + } + if specs.CapellaForkEpoch != nil && uint64(epoch) >= *specs.CapellaForkEpoch { pageData.BLSChangesCount = uint64(len(blsToExecChanges)) pageData.BLSChanges = make([]*models.SlotPageBLSChange, pageData.BLSChangesCount) @@ -849,33 +961,83 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock pageData.WithdrawalsCount = uint64(len(executionWithdrawals)) pageData.Withdrawals = make([]*models.SlotPageWithdrawal, pageData.WithdrawalsCount) + + // Try to get enriched withdrawal data (type + ref slot) from the chain service. + // This works for both cached (unfinalized) and DB (finalized) blocks. + var enrichedWithdrawals []*dbtypes.Withdrawal + if cacheBlock := services.GlobalBeaconService.GetBeaconIndexer().GetBlockByRoot(blockData.Root); cacheBlock != nil { + isCanonical := slices.Contains(services.GlobalBeaconService.GetCanonicalForkKeys(), cacheBlock.GetForkId()) + enrichedWithdrawals = cacheBlock.GetDbWithdrawals(services.GlobalBeaconService.GetBeaconIndexer(), isCanonical) + } + if len(enrichedWithdrawals) == 0 { + dbWithdrawals, _ := db.GetWithdrawalsByBlockUid(ctx, blockUid) + enrichedWithdrawals = dbWithdrawals + } + + // Build a lookup map by block index for enrichment + enrichedMap := make(map[int16]*dbtypes.Withdrawal, len(enrichedWithdrawals)) + for _, ew := range enrichedWithdrawals { + enrichedMap[ew.BlockIdx] = ew + } + + // Batch resolve ref slot block roots + refBlockUids := make([]uint64, 0) + refBlockUidSet := make(map[uint64]bool) + for _, ew := range enrichedWithdrawals { + if ew.RefSlot != nil && !refBlockUidSet[*ew.RefSlot] { + refBlockUidSet[*ew.RefSlot] = true + refBlockUids = append(refBlockUids, *ew.RefSlot) + } + } + refBlockMap := make(map[uint64]*dbtypes.AssignedSlot, len(refBlockUids)) + if len(refBlockUids) > 0 { + refFilter := &dbtypes.BlockFilter{ + BlockUids: refBlockUids, + WithOrphaned: 1, + } + refBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, refFilter, 0, uint32(len(refBlockUids)), 0) + for _, b := range refBlocks { + if b.Block != nil { + refBlockMap[b.Block.BlockUid] = b + } + } + } + for i, withdrawal := range executionWithdrawals { - pageData.Withdrawals[i] = &models.SlotPageWithdrawal{ + validatorIndex := uint64(withdrawal.ValidatorIndex) + isBuilder := validatorIndex&services.BuilderIndexFlag != 0 + displayIndex := validatorIndex + if isBuilder { + displayIndex = validatorIndex &^ services.BuilderIndexFlag + } + wd := &models.SlotPageWithdrawal{ Index: uint64(withdrawal.Index), - ValidatorIndex: uint64(withdrawal.ValidatorIndex), - ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(withdrawal.ValidatorIndex)), + ValidatorIndex: displayIndex, + ValidatorName: services.GlobalBeaconService.GetValidatorName(validatorIndex), + IsBuilder: isBuilder, Address: withdrawal.Address[:], Amount: uint64(withdrawal.Amount), } - } - } - if specs.DenebForkEpoch != nil && uint64(epoch) >= *specs.DenebForkEpoch { - pageData.BlobsCount = uint64(len(blobKzgCommitments)) - pageData.Blobs = make([]*models.SlotPageBlob, pageData.BlobsCount) - for i := range blobKzgCommitments { - blobData := &models.SlotPageBlob{ - Index: uint64(i), - KzgCommitment: blobKzgCommitments[i][:], + // Enrich with type and ref slot from chain service data + if enriched, ok := enrichedMap[int16(i)]; ok { + wd.Type = enriched.Type + if enriched.RefSlot != nil { + wd.RefSlot = *enriched.RefSlot >> 16 + if refBlock, ok := refBlockMap[*enriched.RefSlot]; ok && refBlock.Block != nil { + wd.RefSlotRoot = refBlock.Block.Root + } + } } - pageData.Blobs[i] = blobData + + pageData.Withdrawals[i] = wd } } - if requests, err := blockData.Block.ExecutionRequests(); err == nil && requests != nil { - getSlotPageDepositRequests(pageData, requests.Deposits) - getSlotPageWithdrawalRequests(pageData, requests.Withdrawals) - getSlotPageConsolidationRequests(pageData, requests.Consolidations) + // Load execution payload bids for ePBS (gloas+) blocks + if blockData.Block.Version >= spec.DataVersionGloas { + getSlotPageBids(pageData) + getSlotPagePtcVotes(pageData, blockData, blockData.Header.Message.Slot) } return pageData @@ -1028,8 +1190,14 @@ func getSlotPageDepositRequests(pageData *models.SlotPageBlockData, depositReque if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositRequest.Pubkey)); found { receiptData.Exists = true - receiptData.ValidatorIndex = uint64(validatorIdx) - receiptData.ValidatorName = services.GlobalBeaconService.GetValidatorName(receiptData.ValidatorIndex) + rawIndex := uint64(validatorIdx) + if rawIndex&services.BuilderIndexFlag != 0 { + receiptData.IsBuilder = true + receiptData.ValidatorIndex = rawIndex &^ services.BuilderIndexFlag + } else { + receiptData.ValidatorIndex = rawIndex + } + receiptData.ValidatorName = services.GlobalBeaconService.GetValidatorName(rawIndex) } pageData.DepositRequests = append(pageData.DepositRequests, receiptData) @@ -1050,8 +1218,14 @@ func getSlotPageWithdrawalRequests(pageData *models.SlotPageBlockData, withdrawa if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(withdrawalRequest.ValidatorPubkey)); found { requestData.Exists = true - requestData.ValidatorIndex = uint64(validatorIdx) - requestData.ValidatorName = services.GlobalBeaconService.GetValidatorName(requestData.ValidatorIndex) + fullIndex := uint64(validatorIdx) + if fullIndex&services.BuilderIndexFlag != 0 { + requestData.IsBuilder = true + requestData.ValidatorIndex = fullIndex &^ services.BuilderIndexFlag + } else { + requestData.ValidatorIndex = fullIndex + } + requestData.ValidatorName = services.GlobalBeaconService.GetValidatorName(fullIndex) } pageData.WithdrawalRequests = append(pageData.WithdrawalRequests, requestData) @@ -1072,14 +1246,26 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol if sourceValidatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(consolidationRequest.SourcePubkey)); found { requestData.SourceFound = true - requestData.SourceIndex = uint64(sourceValidatorIdx) - requestData.SourceName = services.GlobalBeaconService.GetValidatorName(requestData.SourceIndex) + fullIndex := uint64(sourceValidatorIdx) + if fullIndex&services.BuilderIndexFlag != 0 { + requestData.SourceIsBuilder = true + requestData.SourceIndex = fullIndex &^ services.BuilderIndexFlag + } else { + requestData.SourceIndex = fullIndex + } + requestData.SourceName = services.GlobalBeaconService.GetValidatorName(fullIndex) } if targetValidatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(consolidationRequest.TargetPubkey)); found { requestData.TargetFound = true - requestData.TargetIndex = uint64(targetValidatorIdx) - requestData.TargetName = services.GlobalBeaconService.GetValidatorName(requestData.TargetIndex) + fullIndex := uint64(targetValidatorIdx) + if fullIndex&services.BuilderIndexFlag != 0 { + requestData.TargetIsBuilder = true + requestData.TargetIndex = fullIndex &^ services.BuilderIndexFlag + } else { + requestData.TargetIndex = fullIndex + } + requestData.TargetName = services.GlobalBeaconService.GetValidatorName(fullIndex) } pageData.ConsolidationRequests = append(pageData.ConsolidationRequests, requestData) @@ -1087,3 +1273,219 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol pageData.ConsolidationRequestsCount = uint64(len(pageData.ConsolidationRequests)) } + +func getSlotPageBids(pageData *models.SlotPageBlockData) { + beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer() + bids := beaconIndexer.GetBlockBids(phase0.Root(pageData.ParentRoot)) + + pageData.Bids = make([]*models.SlotPageBid, 0, len(bids)) + + // Get the winning block hash for comparison + var winningBlockHash []byte + if pageData.ExecutionData != nil { + winningBlockHash = pageData.ExecutionData.BlockHash + } + + for _, bid := range bids { + bidData := &models.SlotPageBid{ + ParentRoot: bid.ParentRoot, + ParentHash: bid.ParentHash, + BlockHash: bid.BlockHash, + FeeRecipient: bid.FeeRecipient, + GasLimit: bid.GasLimit, + BuilderIndex: uint64(bid.BuilderIndex), + BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(bid.BuilderIndex)), + IsSelfBuilt: bid.BuilderIndex < 0, + Slot: bid.Slot, + Value: bid.Value, + ElPayment: bid.ElPayment, + TotalValue: bid.Value + bid.ElPayment, + } + + // Check if this is the winning bid + if winningBlockHash != nil && len(bid.BlockHash) == len(winningBlockHash) { + isWinning := true + for i := range bid.BlockHash { + if bid.BlockHash[i] != winningBlockHash[i] { + isWinning = false + break + } + } + bidData.IsWinning = isWinning + } + + pageData.Bids = append(pageData.Bids, bidData) + } + + // Sort by total value (value + el_payment) descending + for i := 0; i < len(pageData.Bids)-1; i++ { + for j := i + 1; j < len(pageData.Bids); j++ { + if pageData.Bids[j].TotalValue > pageData.Bids[i].TotalValue { + pageData.Bids[i], pageData.Bids[j] = pageData.Bids[j], pageData.Bids[i] + } + } + } + + pageData.BidsCount = uint64(len(pageData.Bids)) +} + +// getSlotPagePtcVotes extracts PTC (Payload Timeliness Committee) votes from a Gloas block. +// PTC votes are included in blocks as payload attestations for the PREVIOUS slot. +func getSlotPagePtcVotes(pageData *models.SlotPageBlockData, blockData *services.CombinedBlockResponse, blockSlot phase0.Slot) { + // Only Gloas+ blocks have payload attestations + if blockData.Block.Version < spec.DataVersionGloas || blockData.Block.Gloas == nil { + return + } + + payloadAttestations := blockData.Block.Gloas.Message.Body.PayloadAttestations + if len(payloadAttestations) == 0 { + return + } + + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() + + // PTC votes are for the previous slot + votedSlot := blockSlot - 1 + votedEpoch := chainState.EpochOfSlot(votedSlot) + + // Get epoch stats for the voted slot to retrieve PTC duties + var ptcDuties []phase0.ValidatorIndex + beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer() + epochStats := beaconIndexer.GetEpochStatsByEpoch(votedEpoch) + for _, es := range epochStats { + values := es.GetValues(true) + if values != nil && values.PtcDuties != nil { + slotInEpoch := uint64(votedSlot) % specs.SlotsPerEpoch + if slotInEpoch < uint64(len(values.PtcDuties)) && values.PtcDuties[slotInEpoch] != nil { + // Convert from active indice indices to validator indices + ptcDuties = make([]phase0.ValidatorIndex, len(values.PtcDuties[slotInEpoch])) + for i, activeIdx := range values.PtcDuties[slotInEpoch] { + if int(activeIdx) < len(values.ActiveIndices) { + ptcDuties[i] = values.ActiveIndices[activeIdx] + } + } + break + } + } + } + + // PTC_SIZE is a spec constant (512). The Bitvector is always PTC_SIZE bits. + // On small validator sets, validators appear multiple times in PTC duties + // via weighted selection, but voting is tracked by bit position. + ptcSize := specs.PtcSize + + // Build PTC votes structure + ptcVotes := &models.SlotPagePtcVotes{ + VotedSlot: uint64(votedSlot), + TotalPtcSize: ptcSize, + Aggregates: make([]*models.SlotPagePtcAggregate, 0, len(payloadAttestations)), + } + + // Track voted bit positions across all aggregates + votedPositions := make(map[uint64]bool, ptcSize) + totalVotes := uint64(0) + + for _, pa := range payloadAttestations { + if pa == nil || pa.Data == nil { + continue + } + + // Set voted block root from first attestation + if ptcVotes.VotedBlockRoot == nil { + ptcVotes.VotedBlockRoot = pa.Data.BeaconBlockRoot[:] + } + + aggregate := &models.SlotPagePtcAggregate{ + PayloadPresent: pa.Data.PayloadPresent, + BlobDataAvailable: pa.Data.BlobDataAvailable, + AggregationBits: pa.AggregationBits, + Signature: pa.Signature[:], + Validators: make([]types.NamedValidator, 0), + } + + // Count votes from aggregation bits and map to unique validators + bitCount := uint64(len(pa.AggregationBits)) * 8 + if bitCount > ptcSize { + bitCount = ptcSize + } + aggValidatorSet := make(map[uint64]bool) + for i := uint64(0); i < bitCount; i++ { + if (pa.AggregationBits[i/8]>>(i%8))&1 == 1 { + votedPositions[i] = true + if int(i) < len(ptcDuties) { + vidx := uint64(ptcDuties[i]) + if !aggValidatorSet[vidx] { + aggValidatorSet[vidx] = true + aggregate.Validators = append(aggregate.Validators, types.NamedValidator{ + Index: vidx, + Name: services.GlobalBeaconService.GetValidatorName(vidx), + }) + } + } + } + } + + aggregate.VoteCount = uint64(len(aggregate.Validators)) + totalVotes += aggregate.VoteCount + + ptcVotes.Aggregates = append(ptcVotes.Aggregates, aggregate) + } + + // Calculate participation by unique validators, not bit positions. + // On small validator sets, the same validator occupies multiple PTC positions. + // A validator votes at their first PTC position only (ptc.index()), leaving + // duplicate positions unset. Count unique voters/non-voters for display. + voterSet := make(map[uint64]bool) + if len(ptcDuties) > 0 { + for i := range ptcDuties { + if votedPositions[uint64(i)] { + voterSet[uint64(ptcDuties[i])] = true + } + } + + // Non-voters: unique validators with NO voted position at all + nonVoterSet := make(map[uint64]bool) + for _, vidx := range ptcDuties { + v := uint64(vidx) + if !voterSet[v] { + nonVoterSet[v] = true + } + } + nonVoters := make([]types.NamedValidator, 0, len(nonVoterSet)) + for vidx := range nonVoterSet { + nonVoters = append(nonVoters, types.NamedValidator{ + Index: vidx, + Name: services.GlobalBeaconService.GetValidatorName(vidx), + }) + } + ptcVotes.NonVoters = nonVoters + ptcVotes.NonVoterCount = uint64(len(nonVoters)) + } + + // Calculate participation rate based on unique validators + totalUniqueValidators := uint64(len(voterSet)) + ptcVotes.NonVoterCount + if totalUniqueValidators > 0 { + ptcVotes.TotalPtcSize = totalUniqueValidators + ptcVotes.Participation = float64(len(voterSet)) / float64(totalUniqueValidators) + ptcVotes.NonVoterPercent = float64(ptcVotes.NonVoterCount) / float64(totalUniqueValidators) * 100 + + // Recalculate aggregate vote percentages based on unique validators + for _, agg := range ptcVotes.Aggregates { + agg.VotePercent = float64(agg.VoteCount) / float64(totalUniqueValidators) * 100 + } + } else if ptcSize > 0 { + // No duties available, use bit positions as approximation + totalVoted := uint64(len(votedPositions)) + ptcVotes.NonVoterCount = ptcSize - totalVoted + ptcVotes.Participation = float64(totalVoted) / float64(ptcSize) + ptcVotes.NonVoterPercent = float64(ptcVotes.NonVoterCount) / float64(ptcSize) * 100 + + for _, agg := range ptcVotes.Aggregates { + agg.VotePercent = float64(agg.VoteCount) / float64(ptcSize) * 100 + } + } + + pageData.PtcVotes = ptcVotes + pageData.PtcVotesCount = totalVotes +} diff --git a/handlers/slot_download.go b/handlers/slot_download.go index ec6b9288b..a905732e1 100644 --- a/handlers/slot_download.go +++ b/handlers/slot_download.go @@ -6,13 +6,13 @@ import ( "fmt" "net/http" - "github.com/attestantio/go-eth2-client/spec/bellatrix" - "github.com/attestantio/go-eth2-client/spec/phase0" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethpandaops/dora/blockdb" bdbtypes "github.com/ethpandaops/dora/blockdb/types" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/go-eth2-client/spec/bellatrix" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/golang/snappy" dynssz "github.com/pk910/dynamic-ssz" ) diff --git a/handlers/slots.go b/handlers/slots.go index 8daa368fb..155d27458 100644 --- a/handlers/slots.go +++ b/handlers/slots.go @@ -11,7 +11,6 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" @@ -19,6 +18,7 @@ import ( "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -119,6 +119,7 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64, 17: false, 18: !hasSnooperClients, // Disable receive delay if snooper clients exist 19: hasSnooperClients, // Enable exec time if snooper clients exist + 20: false, // Builder (hidden by default) } } @@ -153,15 +154,13 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64, pageData.DisplayBlockSize = displayMap[17] pageData.DisplayRecvDelay = displayMap[18] pageData.DisplayExecTime = displayMap[19] + pageData.DisplayBuilder = displayMap[20] pageData.DisplayColCount = uint64(len(displayMap)) chainState := services.GlobalBeaconService.GetChainState() currentSlot := chainState.CurrentSlot() currentEpoch := chainState.EpochOfSlot(currentSlot) maxSlot := currentSlot + 8 - if maxSlot >= chainState.EpochToSlot(currentEpoch+1) { - maxSlot = chainState.EpochToSlot(currentEpoch+1) - 1 - } if firstSlot > uint64(maxSlot) { pageData.IsDefaultPage = true firstSlot = uint64(maxSlot) @@ -255,12 +254,19 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64, dbSlot := dbSlots[dbIdx] dbIdx++ + epoch := chainState.EpochOfSlot(phase0.Slot(slot)) + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.SlotsPageDataSlot{ Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(phase0.Slot(slot)), Finalized: finalized, Status: uint8(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing, Synchronized: dbSlot.SyncParticipation != -1, Proposer: dbSlot.Proposer, @@ -303,6 +309,18 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64, } } + // Add builder info + if pageData.DisplayBuilder { + if dbSlot.BuilderIndex == -1 { + slotData.HasBuilder = true + slotData.BuilderIndex = math.MaxUint64 + } else if dbSlot.BuilderIndex >= 0 { + slotData.HasBuilder = true + slotData.BuilderIndex = uint64(dbSlot.BuilderIndex) + slotData.BuilderName = services.GlobalBeaconService.GetValidatorName(uint64(dbSlot.BuilderIndex) | services.BuilderIndexFlag) + } + } + // Add execution times if available if pageData.DisplayExecTime && dbSlot.MinExecTime > 0 && dbSlot.MaxExecTime > 0 { slotData.MinExecTime = dbSlot.MinExecTime diff --git a/handlers/slots_filtered.go b/handlers/slots_filtered.go index 2cdcf8a07..a466c753e 100644 --- a/handlers/slots_filtered.go +++ b/handlers/slots_filtered.go @@ -3,12 +3,12 @@ package handlers import ( "context" "fmt" + "math" "net/http" "net/url" "strconv" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" @@ -16,6 +16,7 @@ import ( "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -50,8 +51,8 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) { var invertgraffiti bool var invertextradata bool var invertproposer bool - var withOrphaned uint64 - var withMissing uint64 + var statusMask uint64 = 0x07 + var payloadMask uint64 = 0x07 var minSyncAgg string var maxSyncAgg string var minExecTime string @@ -63,6 +64,7 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) { var forkIds string var minEpoch string var maxEpoch string + var builder string if urlArgs.Has("f") { if urlArgs.Has("f.graffiti") { @@ -86,11 +88,11 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) { if urlArgs.Has("f.pinvert") { invertproposer = urlArgs.Get("f.pinvert") == "on" } - if urlArgs.Has("f.orphaned") { - withOrphaned, _ = strconv.ParseUint(urlArgs.Get("f.orphaned"), 10, 64) + if urlArgs.Has("f.status") { + statusMask, _ = strconv.ParseUint(urlArgs.Get("f.status"), 0, 64) } - if urlArgs.Has("f.missing") { - withMissing, _ = strconv.ParseUint(urlArgs.Get("f.missing"), 10, 64) + if urlArgs.Has("f.pstatus") { + payloadMask, _ = strconv.ParseUint(urlArgs.Get("f.pstatus"), 0, 64) } if urlArgs.Has("f.minsync") { minSyncAgg = urlArgs.Get("f.minsync") @@ -125,14 +127,14 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) { if urlArgs.Has("f.maxepoch") { maxEpoch = urlArgs.Get("f.maxepoch") } - } else { - withOrphaned = 1 - withMissing = 1 + if urlArgs.Has("f.builder") { + builder = urlArgs.Get("f.builder") + } } var pageError error pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 2) if pageError == nil { - data.Data, pageError = getFilteredSlotsPageData(pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, uint8(withOrphaned), uint8(withMissing), minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns) + data.Data, pageError = getFilteredSlotsPageData(pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, uint8(statusMask), uint8(payloadMask), minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, builder, displayColumns) } if pageError != nil { handlePageError(w, r, pageError) @@ -144,11 +146,11 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) { } } -func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, withOrphaned uint8, withMissing uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) (*models.SlotsFilteredPageData, error) { +func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, statusMask uint8, payloadMask uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, builder string, displayColumns uint64) (*models.SlotsFilteredPageData, error) { pageData := &models.SlotsFilteredPageData{} - pageCacheKey := fmt.Sprintf("slots_filtered:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, withOrphaned, withMissing, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns) + pageCacheKey := fmt.Sprintf("slots_filtered:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, statusMask, payloadMask, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, builder, displayColumns) pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { - return buildFilteredSlotsPageData(pageCall.CallCtx, pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, withOrphaned, withMissing, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns) + return buildFilteredSlotsPageData(pageCall.CallCtx, pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, statusMask, payloadMask, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, builder, displayColumns) }) if pageErr == nil && pageRes != nil { resData, resOk := pageRes.(*models.SlotsFilteredPageData) @@ -160,7 +162,7 @@ func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string, return pageData, pageErr } -func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, withOrphaned uint8, withMissing uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) *models.SlotsFilteredPageData { +func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, statusMask uint8, payloadMask uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, builder string, displayColumns uint64) *models.SlotsFilteredPageData { chainState := services.GlobalBeaconService.GetChainState() filterArgs := url.Values{} if graffiti != "" { @@ -184,11 +186,11 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui if invertproposer { filterArgs.Add("f.pinvert", "on") } - if withOrphaned != 0 { - filterArgs.Add("f.orphaned", fmt.Sprintf("%v", withOrphaned)) + if statusMask != 0x07 { + filterArgs.Add("f.status", fmt.Sprintf("0x%x", statusMask)) } - if withMissing != 0 { - filterArgs.Add("f.missing", fmt.Sprintf("%v", withMissing)) + if payloadMask != 0x07 { + filterArgs.Add("f.pstatus", fmt.Sprintf("0x%x", payloadMask)) } if minSyncAgg != "" { filterArgs.Add("f.minsync", minSyncAgg) @@ -223,6 +225,9 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui if maxEpoch != "" { filterArgs.Add("f.maxepoch", maxEpoch) } + if builder != "" { + filterArgs.Add("f.builder", builder) + } // Check if snooper clients are configured hasSnooperClients := false @@ -270,27 +275,53 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui filterArgs.Add("d", fmt.Sprintf("0x%x", displayMask)) } + // Map statusMask to WithOrphaned/WithMissing for DB filter + hasMissing := statusMask&0x01 != 0 + hasCanonical := statusMask&0x02 != 0 + hasOrphaned := statusMask&0x04 != 0 + + var withMissing, withOrphaned uint8 + if !hasMissing { + withMissing = 0 + } else if !hasCanonical && !hasOrphaned { + withMissing = 2 + } else { + withMissing = 1 + } + if !hasOrphaned { + withOrphaned = 0 + } else if !hasCanonical && !hasMissing { + withOrphaned = 2 + } else { + withOrphaned = 1 + } + pageData := &models.SlotsFilteredPageData{ - FilterGraffiti: graffiti, - FilterExtraData: extradata, - FilterProposer: proposer, - FilterProposerName: pname, - FilterInvertGraffiti: invertgraffiti, - FilterInvertExtraData: invertextradata, - FilterInvertProposer: invertproposer, - FilterWithOrphaned: withOrphaned, - FilterWithMissing: withMissing, - FilterMinSyncAgg: minSyncAgg, - FilterMaxSyncAgg: maxSyncAgg, - FilterMinExecTime: minExecTime, - FilterMaxExecTime: maxExecTime, - FilterMinTxCount: minTxCount, - FilterMaxTxCount: maxTxCount, - FilterMinBlobCount: minBlobCount, - FilterMaxBlobCount: maxBlobCount, - FilterForkIds: forkIds, - FilterMinEpoch: minEpoch, - FilterMaxEpoch: maxEpoch, + FilterGraffiti: graffiti, + FilterExtraData: extradata, + FilterProposer: proposer, + FilterProposerName: pname, + FilterInvertGraffiti: invertgraffiti, + FilterInvertExtraData: invertextradata, + FilterInvertProposer: invertproposer, + FilterStatusMissing: hasMissing, + FilterStatusCanonical: hasCanonical, + FilterStatusOrphaned: hasOrphaned, + FilterPayloadMissing: payloadMask&0x01 != 0, + FilterPayloadCanonical: payloadMask&0x02 != 0, + FilterPayloadOrphaned: payloadMask&0x04 != 0, + FilterMinSyncAgg: minSyncAgg, + FilterMaxSyncAgg: maxSyncAgg, + FilterMinExecTime: minExecTime, + FilterMaxExecTime: maxExecTime, + FilterMinTxCount: minTxCount, + FilterMaxTxCount: maxTxCount, + FilterMinBlobCount: minBlobCount, + FilterMaxBlobCount: maxBlobCount, + FilterForkIds: forkIds, + FilterMinEpoch: minEpoch, + FilterMaxEpoch: maxEpoch, + FilterBuilder: builder, DisplayEpoch: displayMap[1], DisplaySlot: displayMap[2], @@ -310,6 +341,7 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui DisplayBlockSize: displayMap[16], DisplayRecvDelay: displayMap[17], DisplayExecTime: displayMap[18], + DisplayBuilder: displayMap[19], DisplayColCount: uint64(len(displayMap)), HasSnooperClients: hasSnooperClients, @@ -346,6 +378,7 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui InvertProposer: invertproposer, WithOrphaned: withOrphaned, WithMissing: withMissing, + WithPayloadMask: dbtypes.PayloadStatusMask(payloadMask), } if proposer != "" { pidx, _ := strconv.ParseUint(proposer, 10, 64) @@ -435,13 +468,14 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui blockFilter.MaxEpoch = &maxEp } } - - withScheduledCount := chainState.GetSpecs().SlotsPerEpoch - uint64(chainState.SlotToSlotIndex(currentSlot)) - 1 - if withScheduledCount > 16 { - withScheduledCount = 16 + if builder != "" { + builderIdx, err := strconv.ParseInt(builder, 10, 64) + if err == nil { + blockFilter.BuilderIndex = &builderIdx + } } - dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, pageIdx, uint32(pageSize), withScheduledCount) + dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, pageIdx, uint32(pageSize), 16) mevBlocksMap := make(map[string]*dbtypes.MevBlock) if pageData.DisplayMevBlock { @@ -465,12 +499,13 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui break } slot := phase0.Slot(dbBlock.Slot) + epoch := chainState.EpochOfSlot(slot) slotData := &models.SlotsFilteredPageDataSlot{ Slot: uint64(slot), - Epoch: uint64(chainState.EpochOfSlot(slot)), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(slot), - Finalized: finalizedEpoch >= chainState.EpochOfSlot(slot), + Finalized: finalizedEpoch >= epoch, Synchronized: true, Scheduled: slot >= currentSlot, Proposer: dbBlock.Proposer, @@ -502,6 +537,12 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui slotData.EthBlockNumber = *dbBlock.Block.EthBlockNumber } + payloadStatus := dbBlock.Block.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData.PayloadStatus = uint8(payloadStatus) + if pageData.DisplayMevBlock && dbBlock.Block.EthBlockHash != nil { if mevBlock, exists := mevBlocksMap[fmt.Sprintf("%x", dbBlock.Block.EthBlockHash)]; exists { slotData.IsMevBlock = true @@ -517,6 +558,18 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui } } + // Add builder info + if pageData.DisplayBuilder { + if dbBlock.Block.BuilderIndex == -1 { + slotData.HasBuilder = true + slotData.BuilderIndex = math.MaxUint64 // Self-built sentinel + } else if dbBlock.Block.BuilderIndex >= 0 { + slotData.HasBuilder = true + slotData.BuilderIndex = uint64(dbBlock.Block.BuilderIndex) + slotData.BuilderName = services.GlobalBeaconService.GetValidatorName(uint64(dbBlock.Block.BuilderIndex) | services.BuilderIndexFlag) + } + } + // Add execution times if available if pageData.DisplayExecTime && dbBlock.Block.MinExecTime > 0 && dbBlock.Block.MaxExecTime > 0 { slotData.MinExecTime = dbBlock.Block.MinExecTime diff --git a/handlers/submit_consolidation.go b/handlers/submit_consolidation.go index be07a297e..f5669465a 100644 --- a/handlers/submit_consolidation.go +++ b/handlers/submit_consolidation.go @@ -12,9 +12,9 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/consensus" diff --git a/handlers/submit_deposit.go b/handlers/submit_deposit.go index 72fc8eef4..7b2932b83 100644 --- a/handlers/submit_deposit.go +++ b/handlers/submit_deposit.go @@ -13,8 +13,8 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/ethereum/go-ethereum/common" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/db" diff --git a/handlers/submit_withdrawal.go b/handlers/submit_withdrawal.go index 8f958ddd1..9cbd473ff 100644 --- a/handlers/submit_withdrawal.go +++ b/handlers/submit_withdrawal.go @@ -9,9 +9,9 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/execution/rpc" diff --git a/handlers/transaction.go b/handlers/transaction.go index 5865b69e1..eb5a86d58 100644 --- a/handlers/transaction.go +++ b/handlers/transaction.go @@ -12,9 +12,9 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/golang/snappy" "github.com/gorilla/mux" dynssz "github.com/pk910/dynamic-ssz" diff --git a/handlers/validator.go b/handlers/validator.go index aa4976152..0f0999c70 100644 --- a/handlers/validator.go +++ b/handlers/validator.go @@ -11,9 +11,9 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -35,7 +35,7 @@ func Validator(w http.ResponseWriter, r *http.Request) { "validator/withdrawalRequests.html", "validator/consolidationRequests.html", "validator/withdrawals.html", - "validator/txDetails.html", + "_shared/txDetailsModal.html", "_svg/timeline.html", ) var notfoundTemplateFiles = append(layoutTemplateFiles, @@ -597,7 +597,8 @@ func buildValidatorPageData(ctx context.Context, validatorIndex uint64, tabView // load recent withdrawals (beacon chain withdrawals) if pageData.TabView == "withdrawals" { withdrawalFilter := &dbtypes.WithdrawalFilter{ - Validator: &validatorIndex, + MinIndex: validatorIndex, + MaxIndex: validatorIndex, WithOrphaned: 1, } dbWithdrawals, totalRows := services.GlobalBeaconService.GetWithdrawalsByFilter(ctx, withdrawalFilter, 0, 10) @@ -624,14 +625,18 @@ func buildValidatorPageData(ctx context.Context, validatorIndex uint64, tabView } } - // Batch resolve blocks - blockUids := make([]uint64, 0, len(dbWithdrawals)) - blockUidSet := make(map[uint64]bool, len(dbWithdrawals)) + // Batch resolve blocks (including ref slot blocks) + blockUids := make([]uint64, 0, len(dbWithdrawals)*2) + blockUidSet := make(map[uint64]bool, len(dbWithdrawals)*2) for _, w := range dbWithdrawals { if !blockUidSet[w.BlockUid] { blockUidSet[w.BlockUid] = true blockUids = append(blockUids, w.BlockUid) } + if w.RefSlot != nil && !blockUidSet[*w.RefSlot] { + blockUidSet[*w.RefSlot] = true + blockUids = append(blockUids, *w.RefSlot) + } } blockMap := make(map[uint64]*dbtypes.AssignedSlot, len(blockUids)) if len(blockUids) > 0 { @@ -675,6 +680,13 @@ func buildValidatorPageData(ctx context.Context, validatorIndex uint64, tabView } } + if withdrawal.RefSlot != nil { + withdrawalData.RefSlot = *withdrawal.RefSlot >> 16 + if refBlock, ok := blockMap[*withdrawal.RefSlot]; ok && refBlock.Block != nil { + withdrawalData.RefSlotRoot = refBlock.Block.Root + } + } + pageData.Withdrawals = append(pageData.Withdrawals, withdrawalData) } diff --git a/handlers/validator_slots.go b/handlers/validator_slots.go index 3cf97d9a4..19106ee5a 100644 --- a/handlers/validator_slots.go +++ b/handlers/validator_slots.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -113,12 +113,13 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx break } slot := blockAssignment.Slot + epoch := chainState.EpochOfSlot(phase0.Slot(slot)) slotData := &models.ValidatorSlotsPageDataSlot{ Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(phase0.Slot(slot)), - Finalized: finalizedEpoch >= chainState.EpochOfSlot(phase0.Slot(slot)), + Finalized: finalizedEpoch >= epoch, Status: uint8(0), Proposer: validator, ProposerName: pageData.Name, @@ -141,6 +142,12 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx slotData.WithEthBlock = true slotData.EthBlockNumber = *dbBlock.EthBlockNumber } + + payloadStatus := dbBlock.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData.PayloadStatus = uint8(payloadStatus) } pageData.Slots = append(pageData.Slots, slotData) } diff --git a/handlers/validators.go b/handlers/validators.go index e23f94b41..c9c5c9e57 100644 --- a/handlers/validators.go +++ b/handlers/validators.go @@ -12,7 +12,7 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" diff --git a/handlers/validators_activity.go b/handlers/validators_activity.go index e58e65e4a..6487e536f 100644 --- a/handlers/validators_activity.go +++ b/handlers/validators_activity.go @@ -10,11 +10,11 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/validators_offline.go b/handlers/validators_offline.go index 94a8e2f0e..e058db87b 100644 --- a/handlers/validators_offline.go +++ b/handlers/validators_offline.go @@ -9,13 +9,13 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/handlers/validators_summary.go b/handlers/validators_summary.go index 21625694e..351735efe 100644 --- a/handlers/validators_summary.go +++ b/handlers/validators_summary.go @@ -9,7 +9,7 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/clients/execution" diff --git a/handlers/voluntary_exits.go b/handlers/voluntary_exits.go index 9b8c883b7..067249140 100644 --- a/handlers/voluntary_exits.go +++ b/handlers/voluntary_exits.go @@ -8,12 +8,13 @@ import ( "strconv" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -40,6 +41,7 @@ func VoluntaryExits(w http.ResponseWriter, r *http.Request) { } } + var entity string var minSlot uint64 var maxSlot uint64 var minIndex uint64 @@ -48,6 +50,9 @@ func VoluntaryExits(w http.ResponseWriter, r *http.Request) { var withOrphaned uint64 if urlArgs.Has("f") { + if urlArgs.Has("f.entity") { + entity = urlArgs.Get("f.entity") + } if urlArgs.Has("f.mins") { minSlot, _ = strconv.ParseUint(urlArgs.Get("f.mins"), 10, 64) } @@ -69,10 +74,21 @@ func VoluntaryExits(w http.ResponseWriter, r *http.Request) { } else { withOrphaned = 1 } + + // Apply builder flag to index filters when entity=builder + if entity == "builder" { + if minIndex > 0 { + minIndex |= services.BuilderIndexFlag + } + if maxIndex > 0 { + maxIndex |= services.BuilderIndexFlag + } + } + var pageError error pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 2) if pageError == nil { - data.Data, pageError = getFilteredVoluntaryExitsPageData(pageIdx, pageSize, minSlot, maxSlot, minIndex, maxIndex, vname, uint8(withOrphaned)) + data.Data, pageError = getFilteredVoluntaryExitsPageData(pageIdx, pageSize, entity, minSlot, maxSlot, minIndex, maxIndex, vname, uint8(withOrphaned)) } if pageError != nil { handlePageError(w, r, pageError) @@ -84,11 +100,11 @@ func VoluntaryExits(w http.ResponseWriter, r *http.Request) { } } -func getFilteredVoluntaryExitsPageData(pageIdx uint64, pageSize uint64, minSlot uint64, maxSlot uint64, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8) (*models.VoluntaryExitsPageData, error) { +func getFilteredVoluntaryExitsPageData(pageIdx uint64, pageSize uint64, entity string, minSlot uint64, maxSlot uint64, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8) (*models.VoluntaryExitsPageData, error) { pageData := &models.VoluntaryExitsPageData{} - pageCacheKey := fmt.Sprintf("voluntary_exits:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, minSlot, maxSlot, minIndex, maxIndex, vname, withOrphaned) + pageCacheKey := fmt.Sprintf("voluntary_exits:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, entity, minSlot, maxSlot, minIndex, maxIndex, vname, withOrphaned) pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { - return buildFilteredVoluntaryExitsPageData(pageCall.CallCtx, pageIdx, pageSize, minSlot, maxSlot, minIndex, maxIndex, vname, withOrphaned) + return buildFilteredVoluntaryExitsPageData(pageCall.CallCtx, pageIdx, pageSize, entity, minSlot, maxSlot, minIndex, maxIndex, vname, withOrphaned) }) if pageErr == nil && pageRes != nil { resData, resOk := pageRes.(*models.VoluntaryExitsPageData) @@ -100,8 +116,15 @@ func getFilteredVoluntaryExitsPageData(pageIdx uint64, pageSize uint64, minSlot return pageData, pageErr } -func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, minSlot uint64, maxSlot uint64, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8) *models.VoluntaryExitsPageData { +func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, entity string, minSlot uint64, maxSlot uint64, minIndex uint64, maxIndex uint64, vname string, withOrphaned uint8) *models.VoluntaryExitsPageData { + if entity == "" { + entity = "all" + } + filterArgs := url.Values{} + if entity != "all" { + filterArgs.Add("f.entity", entity) + } if minSlot != 0 { filterArgs.Add("f.mins", fmt.Sprintf("%v", minSlot)) } @@ -121,11 +144,20 @@ func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pa filterArgs.Add("f.orphaned", fmt.Sprintf("%v", withOrphaned)) } + // Display indices without the builder flag for the filter UI + displayMinIndex := minIndex + displayMaxIndex := maxIndex + if entity == "builder" { + displayMinIndex = minIndex &^ services.BuilderIndexFlag + displayMaxIndex = maxIndex &^ services.BuilderIndexFlag + } + pageData := &models.VoluntaryExitsPageData{ + FilterEntity: entity, FilterMinSlot: minSlot, FilterMaxSlot: maxSlot, - FilterMinIndex: minIndex, - FilterMaxIndex: maxIndex, + FilterMinIndex: displayMinIndex, + FilterMaxIndex: displayMaxIndex, FilterValidatorName: vname, FilterWithOrphaned: withOrphaned, } @@ -164,40 +196,67 @@ func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pa SlotRoot: voluntaryExit.SlotRoot, Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), Orphaned: voluntaryExit.Orphaned, - ValidatorIndex: voluntaryExit.ValidatorIndex, - ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex), ValidatorStatus: "", } - validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) - if validator == nil { - voluntaryExitData.ValidatorStatus = "Unknown" - } else { - voluntaryExitData.PublicKey = validator.Validator.PublicKey[:] - voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials - - if strings.HasPrefix(validator.Status.String(), "pending") { - voluntaryExitData.ValidatorStatus = "Pending" - } else if validator.Status == v1.ValidatorStateActiveOngoing { - voluntaryExitData.ValidatorStatus = "Active" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveExiting { - voluntaryExitData.ValidatorStatus = "Exiting" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveSlashed { - voluntaryExitData.ValidatorStatus = "Slashed" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateExitedUnslashed { - voluntaryExitData.ValidatorStatus = "Exited" - } else if validator.Status == v1.ValidatorStateExitedSlashed { - voluntaryExitData.ValidatorStatus = "Slashed" + // Check if this is a builder exit (validator index has BuilderIndexFlag set) + if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 { + builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag + voluntaryExitData.IsBuilder = true + voluntaryExitData.ValidatorIndex = builderIndex + + // Resolve builder name via validatornames service (with BuilderIndexFlag) + voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + voluntaryExitData.ValidatorStatus = "Unknown" } else { - voluntaryExitData.ValidatorStatus = validator.Status.String() + voluntaryExitData.PublicKey = builder.PublicKey[:] + + // Determine builder status + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + voluntaryExitData.ValidatorStatus = "Exited" + } else { + voluntaryExitData.ValidatorStatus = "Exiting" + } } + } else { + // Regular validator exit + voluntaryExitData.ValidatorIndex = voluntaryExit.ValidatorIndex + voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) + if validator == nil { + voluntaryExitData.ValidatorStatus = "Unknown" + } else { + voluntaryExitData.PublicKey = validator.Validator.PublicKey[:] + voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials + + if strings.HasPrefix(validator.Status.String(), "pending") { + voluntaryExitData.ValidatorStatus = "Pending" + } else if validator.Status == v1.ValidatorStateActiveOngoing { + voluntaryExitData.ValidatorStatus = "Active" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveExiting { + voluntaryExitData.ValidatorStatus = "Exiting" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveSlashed { + voluntaryExitData.ValidatorStatus = "Slashed" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateExitedUnslashed { + voluntaryExitData.ValidatorStatus = "Exited" + } else if validator.Status == v1.ValidatorStateExitedSlashed { + voluntaryExitData.ValidatorStatus = "Slashed" + } else { + voluntaryExitData.ValidatorStatus = validator.Status.String() + } - if voluntaryExitData.ShowUpcheck { - voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) - voluntaryExitData.UpcheckMaximum = uint8(3) + if voluntaryExitData.ShowUpcheck { + voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) + voluntaryExitData.UpcheckMaximum = uint8(3) + } } } diff --git a/handlers/withdrawals.go b/handlers/withdrawals.go index f7ccdf9b8..8937f61e1 100644 --- a/handlers/withdrawals.go +++ b/handlers/withdrawals.go @@ -9,13 +9,13 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" "github.com/ethpandaops/dora/templates" "github.com/ethpandaops/dora/types/models" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -144,7 +144,12 @@ func buildWithdrawalsPageData(ctx context.Context, firstEpoch uint64, pageSize u } if validatorIndex := withdrawal.ValidatorIndex(); validatorIndex != nil { - withdrawalData.ValidatorIndex = *validatorIndex + if *validatorIndex&services.BuilderIndexFlag != 0 { + withdrawalData.IsBuilder = true + withdrawalData.ValidatorIndex = *validatorIndex &^ services.BuilderIndexFlag + } else { + withdrawalData.ValidatorIndex = *validatorIndex + } withdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex) withdrawalData.ValidatorValid = true } @@ -198,14 +203,18 @@ func buildWithdrawalsPageData(ctx context.Context, firstEpoch uint64, pageSize u } } - // Batch resolve blocks - blockUids := make([]uint64, 0, len(dbWithdrawals)) - blockUidSet := make(map[uint64]bool, len(dbWithdrawals)) + // Batch resolve blocks (including ref slot blocks) + blockUids := make([]uint64, 0, len(dbWithdrawals)*2) + blockUidSet := make(map[uint64]bool, len(dbWithdrawals)*2) for _, w := range dbWithdrawals { if !blockUidSet[w.BlockUid] { blockUidSet[w.BlockUid] = true blockUids = append(blockUids, w.BlockUid) } + if w.RefSlot != nil && !blockUidSet[*w.RefSlot] { + blockUidSet[*w.RefSlot] = true + blockUids = append(blockUids, *w.RefSlot) + } } blockMap := make(map[uint64]*dbtypes.AssignedSlot, len(blockUids)) if len(blockUids) > 0 { @@ -232,7 +241,12 @@ func buildWithdrawalsPageData(ctx context.Context, firstEpoch uint64, pageSize u } withdrawalData.HasValidator = true - withdrawalData.ValidatorIndex = withdrawal.Validator + if withdrawal.Validator&services.BuilderIndexFlag != 0 { + withdrawalData.IsBuilder = true + withdrawalData.ValidatorIndex = withdrawal.Validator &^ services.BuilderIndexFlag + } else { + withdrawalData.ValidatorIndex = withdrawal.Validator + } withdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(withdrawal.Validator) if withdrawal.AccountID > 0 { @@ -250,6 +264,13 @@ func buildWithdrawalsPageData(ctx context.Context, firstEpoch uint64, pageSize u } } + if withdrawal.RefSlot != nil { + withdrawalData.RefSlot = *withdrawal.RefSlot >> 16 + if refBlock, ok := blockMap[*withdrawal.RefSlot]; ok && refBlock.Block != nil { + withdrawalData.RefSlotRoot = refBlock.Block.Root + } + } + pageData.BeaconWithdrawals = append(pageData.BeaconWithdrawals, withdrawalData) } pageData.BeaconWithdrawalCount = uint64(len(pageData.BeaconWithdrawals)) diff --git a/handlers/withdrawals_list.go b/handlers/withdrawals_list.go index 2c672ecd0..8860ac179 100644 --- a/handlers/withdrawals_list.go +++ b/handlers/withdrawals_list.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/db" @@ -44,7 +44,10 @@ func WithdrawalsList(w http.ResponseWriter, r *http.Request) { } } - var validator string + var entity string + var minIndex uint64 + var maxIndex uint64 + var vname string var address string var minAmount string var maxAmount string @@ -52,8 +55,17 @@ func WithdrawalsList(w http.ResponseWriter, r *http.Request) { var withOrphaned uint64 if urlArgs.Has("f") { - if urlArgs.Has("f.validator") { - validator = urlArgs.Get("f.validator") + if urlArgs.Has("f.entity") { + entity = urlArgs.Get("f.entity") + } + if urlArgs.Has("f.mini") { + minIndex, _ = strconv.ParseUint(urlArgs.Get("f.mini"), 10, 64) + } + if urlArgs.Has("f.maxi") { + maxIndex, _ = strconv.ParseUint(urlArgs.Get("f.maxi"), 10, 64) + } + if urlArgs.Has("f.vname") { + vname = urlArgs.Get("f.vname") } if urlArgs.Has("f.address") { address = urlArgs.Get("f.address") @@ -74,10 +86,20 @@ func WithdrawalsList(w http.ResponseWriter, r *http.Request) { withOrphaned = 1 } + // Apply builder flag to index filters when entity=builder + if entity == "builder" { + if minIndex > 0 { + minIndex |= services.BuilderIndexFlag + } + if maxIndex > 0 { + maxIndex |= services.BuilderIndexFlag + } + } + var pageError error pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 2) if pageError == nil { - data.Data, pageError = getFilteredWithdrawalsListPageData(pageIdx, pageSize, validator, address, withType, minAmount, maxAmount, uint8(withOrphaned)) + data.Data, pageError = getFilteredWithdrawalsListPageData(pageIdx, pageSize, entity, minIndex, maxIndex, vname, address, withType, minAmount, maxAmount, uint8(withOrphaned)) } if pageError != nil { handlePageError(w, r, pageError) @@ -89,11 +111,11 @@ func WithdrawalsList(w http.ResponseWriter, r *http.Request) { } } -func getFilteredWithdrawalsListPageData(pageIdx uint64, pageSize uint64, validator string, address string, withType string, minAmount string, maxAmount string, withOrphaned uint8) (*models.WithdrawalsListPageData, error) { +func getFilteredWithdrawalsListPageData(pageIdx uint64, pageSize uint64, entity string, minIndex uint64, maxIndex uint64, vname string, address string, withType string, minAmount string, maxAmount string, withOrphaned uint8) (*models.WithdrawalsListPageData, error) { pageData := &models.WithdrawalsListPageData{} - pageCacheKey := fmt.Sprintf("withdrawals_list:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, validator, address, withType, minAmount, maxAmount, withOrphaned) + pageCacheKey := fmt.Sprintf("withdrawals_list:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, entity, minIndex, maxIndex, vname, address, withType, minAmount, maxAmount, withOrphaned) pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { - pageData, cacheTimeout := buildFilteredWithdrawalsListPageData(pageCall.CallCtx, pageIdx, pageSize, validator, address, withType, minAmount, maxAmount, withOrphaned) + pageData, cacheTimeout := buildFilteredWithdrawalsListPageData(pageCall.CallCtx, pageIdx, pageSize, entity, minIndex, maxIndex, vname, address, withType, minAmount, maxAmount, withOrphaned) pageCall.CacheTimeout = cacheTimeout return pageData }) @@ -107,10 +129,23 @@ func getFilteredWithdrawalsListPageData(pageIdx uint64, pageSize uint64, validat return pageData, pageErr } -func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, pageSize uint64, validator string, address string, withType string, minAmount string, maxAmount string, withOrphaned uint8) (*models.WithdrawalsListPageData, time.Duration) { +func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, pageSize uint64, entity string, minIndex uint64, maxIndex uint64, vname string, address string, withType string, minAmount string, maxAmount string, withOrphaned uint8) (*models.WithdrawalsListPageData, time.Duration) { + if entity == "" { + entity = "all" + } + filterArgs := url.Values{} - if validator != "" { - filterArgs.Add("f.validator", validator) + if entity != "all" { + filterArgs.Add("f.entity", entity) + } + if minIndex != 0 { + filterArgs.Add("f.mini", fmt.Sprintf("%v", minIndex)) + } + if maxIndex != 0 { + filterArgs.Add("f.maxi", fmt.Sprintf("%v", maxIndex)) + } + if vname != "" { + filterArgs.Add("f.vname", vname) } if address != "" { filterArgs.Add("f.address", address) @@ -128,16 +163,27 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p filterArgs.Add("f.orphaned", fmt.Sprintf("%v", withOrphaned)) } + // Display indices without the builder flag for the filter UI + displayMinIndex := minIndex + displayMaxIndex := maxIndex + if entity == "builder" { + displayMinIndex = minIndex &^ services.BuilderIndexFlag + displayMaxIndex = maxIndex &^ services.BuilderIndexFlag + } + pageData := &models.WithdrawalsListPageData{ - FilterValidator: validator, - FilterAddress: address, - FilterWithType: withType, - FilterMinAmount: minAmount, - FilterMaxAmount: maxAmount, - FilterWithOrphaned: withOrphaned, + FilterEntity: entity, + FilterMinIndex: displayMinIndex, + FilterMaxIndex: displayMaxIndex, + FilterValidatorName: vname, + FilterAddress: address, + FilterWithType: withType, + FilterMinAmount: minAmount, + FilterMaxAmount: maxAmount, + FilterWithOrphaned: withOrphaned, } cacheTimeout := 5 * time.Minute - logrus.Debugf("withdrawals_list page called: %v:%v [%v,%v,%v,%v,%v,%v]", pageIdx, pageSize, validator, address, withType, minAmount, maxAmount, withOrphaned) + logrus.Debugf("withdrawals_list page called: %v:%v [%v,%v,%v,%v,%v,%v,%v,%v]", pageIdx, pageSize, entity, minIndex, maxIndex, vname, address, withType, minAmount, maxAmount) if pageIdx == 1 { pageData.IsDefaultPage = true } else { @@ -156,17 +202,14 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p // Build filter withdrawalFilter := &dbtypes.WithdrawalFilter{ + MinIndex: minIndex, + MaxIndex: maxIndex, WithOrphaned: withOrphaned, } - // Parse validator filter (index or name) - if validator != "" { - validatorIndex, err := strconv.ParseUint(validator, 10, 64) - if err == nil { - withdrawalFilter.Validator = &validatorIndex - } else { - withdrawalFilter.ValidatorName = validator - } + // Only apply name filter when a specific entity is selected + if entity != "all" && vname != "" { + withdrawalFilter.ValidatorName = vname } // Parse address filter -> resolve to account_id @@ -183,7 +226,7 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p // Parse type filter (comma-separated list of type values 1-3) if withType != "" { for _, t := range strings.Split(withType, ",") { - if v, err := strconv.ParseUint(strings.TrimSpace(t), 10, 8); err == nil && v >= 1 && v <= 3 { + if v, err := strconv.ParseUint(strings.TrimSpace(t), 10, 8); err == nil && v >= 1 && v <= 6 { withdrawalFilter.Types = append(withdrawalFilter.Types, uint8(v)) } } @@ -226,14 +269,18 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p } } - // Batch resolve blocks for block root and number - blockUids := make([]uint64, 0, len(dbWithdrawals)) - blockUidSet := make(map[uint64]bool, len(dbWithdrawals)) + // Batch resolve blocks for block root, number, and ref slot + blockUids := make([]uint64, 0, len(dbWithdrawals)*2) + blockUidSet := make(map[uint64]bool, len(dbWithdrawals)*2) for _, w := range dbWithdrawals { if !blockUidSet[w.BlockUid] { blockUidSet[w.BlockUid] = true blockUids = append(blockUids, w.BlockUid) } + if w.RefSlot != nil && !blockUidSet[*w.RefSlot] { + blockUidSet[*w.RefSlot] = true + blockUids = append(blockUids, *w.RefSlot) + } } blockMap := make(map[uint64]*dbtypes.AssignedSlot, len(blockUids)) if len(blockUids) > 0 { @@ -261,7 +308,12 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p } withdrawalData.HasValidator = true - withdrawalData.ValidatorIndex = withdrawal.Validator + if withdrawal.Validator&services.BuilderIndexFlag != 0 { + withdrawalData.IsBuilder = true + withdrawalData.ValidatorIndex = withdrawal.Validator &^ services.BuilderIndexFlag + } else { + withdrawalData.ValidatorIndex = withdrawal.Validator + } withdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(withdrawal.Validator) // Resolve address from account_id @@ -281,6 +333,14 @@ func buildFilteredWithdrawalsListPageData(ctx context.Context, pageIdx uint64, p } } + // Resolve ref slot (block UID of the slot this builder payment refers to) + if withdrawal.RefSlot != nil { + withdrawalData.RefSlot = *withdrawal.RefSlot >> 16 + if refBlock, ok := blockMap[*withdrawal.RefSlot]; ok && refBlock.Block != nil { + withdrawalData.RefSlotRoot = refBlock.Block.Root + } + } + pageData.Withdrawals = append(pageData.Withdrawals, withdrawalData) } pageData.WithdrawalCount = uint64(len(pageData.Withdrawals)) diff --git a/indexer/beacon/bidcache.go b/indexer/beacon/bidcache.go new file mode 100644 index 000000000..d3e1c1329 --- /dev/null +++ b/indexer/beacon/bidcache.go @@ -0,0 +1,211 @@ +package beacon + +import ( + "sync" + + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + "github.com/jmoiron/sqlx" +) + +const ( + // bidCacheMaxSlots is the maximum number of slots to keep in the cache + bidCacheMaxSlots = 15 + // bidCacheFlushThreshold is the slot span that triggers a flush + bidCacheFlushThreshold = 15 + // bidCacheRetainSlots is the number of slots to retain after a flush + bidCacheRetainSlots = 10 +) + +// bidCacheKey uniquely identifies a bid in the cache +type bidCacheKey struct { + ParentRoot phase0.Root + ParentHash phase0.Hash32 + BlockHash phase0.Hash32 + BuilderIndex int64 +} + +// blockBidCache caches execution payload bids for recent blocks. +// Bids for older slots are ignored. The cache is flushed to DB on shutdown +// or when the slot span exceeds the threshold. +type blockBidCache struct { + indexer *Indexer + cacheMutex sync.RWMutex + bids map[bidCacheKey]*dbtypes.BlockBid + minSlot phase0.Slot + maxSlot phase0.Slot +} + +// newBlockBidCache creates a new instance of blockBidCache. +func newBlockBidCache(indexer *Indexer) *blockBidCache { + return &blockBidCache{ + indexer: indexer, + bids: make(map[bidCacheKey]*dbtypes.BlockBid, 64), + } +} + +// loadFromDB loads bids from the last N slots from the database. +func (cache *blockBidCache) loadFromDB(currentSlot phase0.Slot) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + minSlot := phase0.Slot(0) + if currentSlot > bidCacheRetainSlots { + minSlot = currentSlot - bidCacheRetainSlots + } + + dbBids := db.GetBidsForSlotRange(cache.indexer.ctx, uint64(minSlot)) + for _, bid := range dbBids { + key := bidCacheKey{ + ParentRoot: phase0.Root(bid.ParentRoot), + ParentHash: phase0.Hash32(bid.ParentHash), + BlockHash: phase0.Hash32(bid.BlockHash), + BuilderIndex: bid.BuilderIndex, + } + cache.bids[key] = bid + + slot := phase0.Slot(bid.Slot) + if cache.minSlot == 0 || slot < cache.minSlot { + cache.minSlot = slot + } + if slot > cache.maxSlot { + cache.maxSlot = slot + } + } + + if len(dbBids) > 0 { + cache.indexer.logger.Infof("loaded %d bids from DB (slots %d-%d)", len(dbBids), cache.minSlot, cache.maxSlot) + } +} + +// AddBid adds a bid to the cache. Returns true if the bid was added, +// false if it was ignored (too old) or already exists. +func (cache *blockBidCache) AddBid(bid *dbtypes.BlockBid) bool { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + slot := phase0.Slot(bid.Slot) + + // Ignore bids for slots that are too old + if cache.maxSlot > 0 && slot+bidCacheMaxSlots < cache.maxSlot { + return false + } + + key := bidCacheKey{ + ParentRoot: phase0.Root(bid.ParentRoot), + ParentHash: phase0.Hash32(bid.ParentHash), + BlockHash: phase0.Hash32(bid.BlockHash), + BuilderIndex: bid.BuilderIndex, + } + + // Check if bid already exists + if _, exists := cache.bids[key]; exists { + return false + } + + cache.bids[key] = bid + + // Update slot bounds + if cache.minSlot == 0 || slot < cache.minSlot { + cache.minSlot = slot + } + if slot > cache.maxSlot { + cache.maxSlot = slot + } + + return true +} + +// GetBidsForBlockRoot returns all bids for a given parent block root. +func (cache *blockBidCache) GetBidsForBlockRoot(blockRoot phase0.Root) []*dbtypes.BlockBid { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + result := make([]*dbtypes.BlockBid, 0) + for key, bid := range cache.bids { + if key.ParentRoot == blockRoot { + result = append(result, bid) + } + } + + return result +} + +// checkAndFlush checks if the cache needs to be flushed and performs the flush if necessary. +// This should be called periodically (e.g., on each new block). +func (cache *blockBidCache) checkAndFlush() error { + cache.cacheMutex.Lock() + + // Check if we need to flush + if cache.maxSlot == 0 || cache.maxSlot-cache.minSlot < bidCacheFlushThreshold { + cache.cacheMutex.Unlock() + return nil + } + + // Calculate the cutoff slot - we'll flush bids older than this + cutoffSlot := cache.maxSlot - bidCacheRetainSlots + + // Collect bids to flush (from minSlot to cutoffSlot) + bidsToFlush := make([]*dbtypes.BlockBid, 0) + for key, bid := range cache.bids { + if phase0.Slot(bid.Slot) < cutoffSlot { + bidsToFlush = append(bidsToFlush, bid) + delete(cache.bids, key) + } + } + + // Update minSlot + cache.minSlot = cutoffSlot + + cache.cacheMutex.Unlock() + + // Write to DB outside of lock + if len(bidsToFlush) > 0 { + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + return db.InsertBids(bidsToFlush, tx) + }) + if err != nil { + cache.indexer.logger.Errorf("error flushing bids to db: %v", err) + return err + } + cache.indexer.logger.Debugf("flushed %d bids to DB (slots < %d)", len(bidsToFlush), cutoffSlot) + } + + return nil +} + +// flushAll flushes all cached bids to the database. +// This should be called on shutdown. +func (cache *blockBidCache) flushAll() error { + cache.cacheMutex.Lock() + + if len(cache.bids) == 0 { + cache.cacheMutex.Unlock() + return nil + } + + bidsToFlush := make([]*dbtypes.BlockBid, 0, len(cache.bids)) + for _, bid := range cache.bids { + bidsToFlush = append(bidsToFlush, bid) + } + + // Clear the cache + cache.bids = make(map[bidCacheKey]*dbtypes.BlockBid, 64) + cache.minSlot = 0 + cache.maxSlot = 0 + + cache.cacheMutex.Unlock() + + // Write to DB outside of lock + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + return db.InsertBids(bidsToFlush, tx) + }) + if err != nil { + cache.indexer.logger.Errorf("error flushing all bids to db: %v", err) + return err + } + + cache.indexer.logger.Infof("flushed %d bids to DB on shutdown", len(bidsToFlush)) + return nil +} diff --git a/indexer/beacon/block.go b/indexer/beacon/block.go index 2e5b5a36c..e66c40696 100644 --- a/indexer/beacon/block.go +++ b/indexer/beacon/block.go @@ -3,53 +3,60 @@ package beacon import ( "context" "fmt" + "math" "math/rand/v2" "sync" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" btypes "github.com/ethpandaops/dora/blockdb/types" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" dynssz "github.com/pk910/dynamic-ssz" ) // Block represents a beacon block. type Block struct { - Root phase0.Root - Slot phase0.Slot - BlockUID uint64 - dynSsz *dynssz.DynSsz - parentRoot *phase0.Root - dependentRoot *phase0.Root - forkId ForkKey - forkChecked bool - headerMutex sync.Mutex - headerChan chan bool - header *phase0.SignedBeaconBlockHeader - blockMutex sync.Mutex - blockChan chan bool - block *spec.VersionedSignedBeaconBlock - blockIndex *BlockBodyIndex - recvDelay int32 - executionTimes []ExecutionTime // execution times from snooper clients - minExecutionTime uint16 - maxExecutionTime uint16 - execTimeUpdate *time.Ticker - executionTimesMux sync.RWMutex - isInFinalizedDb bool // block is in finalized table (slots) - isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks) - isDisposed bool // block is disposed - processingStatus dbtypes.UnfinalizedBlockStatus - seenMutex sync.RWMutex - seenMap map[uint16]*Client - processedActivity uint8 - blockResults [][]uint8 - blockResultsMutex sync.Mutex + Root phase0.Root + Slot phase0.Slot + BlockUID uint64 + dynSsz *dynssz.DynSsz + parentRoot *phase0.Root + dependentRoot *phase0.Root + forkId ForkKey + forkChecked bool + headerMutex sync.Mutex + headerChan chan bool + header *phase0.SignedBeaconBlockHeader + blockMutex sync.Mutex + blockChan chan bool + block *spec.VersionedSignedBeaconBlock + executionPayloadMutex sync.Mutex + executionPayloadChan chan bool + executionPayload *gloas.SignedExecutionPayloadEnvelope + blockIndex *BlockBodyIndex + recvDelay int32 + executionTimes []ExecutionTime // execution times from snooper clients + minExecutionTime uint16 + maxExecutionTime uint16 + execTimeUpdate *time.Ticker + executionTimesMux sync.RWMutex + isInFinalizedDb bool // block is in finalized table (slots) + isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks) + hasExecutionPayload bool // block has an execution payload (either in cache or db) + isPayloadOrphaned bool // payload is orphaned (next block doesn't build on it) + isDisposed bool // block is disposed + processingStatus dbtypes.UnfinalizedBlockStatus + seenMutex sync.RWMutex + seenMap map[uint16]*Client + processedActivity uint8 + blockResults [][]uint8 + blockResultsMutex sync.Mutex } // BlockBodyIndex holds important block properties that are used as index for cache lookups. @@ -58,10 +65,12 @@ type BlockBodyIndex struct { Graffiti [32]byte ExecutionExtraData []byte ExecutionHash phase0.Hash32 + ExecutionParentHash phase0.Hash32 ExecutionNumber uint64 SyncParticipation float32 EthTransactionCount uint64 BlobCount uint64 + BuilderIndex uint64 GasUsed uint64 GasLimit uint64 BlockSize uint64 @@ -69,21 +78,16 @@ type BlockBodyIndex struct { // newBlock creates a new Block instance. func newBlock(dynSsz *dynssz.DynSsz, root phase0.Root, slot phase0.Slot, blockUID uint64) *Block { - if blockUID == 0 { - // use highest possible block UID as default - blockUID = (uint64(slot) << 16) | 0xffff + return &Block{ + Root: root, + Slot: slot, + BlockUID: blockUID, + dynSsz: dynSsz, + seenMap: make(map[uint16]*Client), + headerChan: make(chan bool), + blockChan: make(chan bool), + executionPayloadChan: make(chan bool), } - block := &Block{ - Root: root, - Slot: slot, - BlockUID: blockUID, - dynSsz: dynSsz, - seenMap: make(map[uint16]*Client), - headerChan: make(chan bool), - blockChan: make(chan bool), - } - - return block } func (block *Block) Dispose() { @@ -170,7 +174,7 @@ func (block *Block) GetBlock(ctx context.Context) *spec.VersionedSignedBeaconBlo } if block.isInUnfinalizedDb { - dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:]) + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, false) if dbBlock != nil { blockBody, err := UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ) if err == nil { @@ -188,6 +192,10 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec return nil } + if block.block != nil { + return block.block + } + if ctx == nil { ctx = context.Background() } @@ -201,6 +209,45 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec return block.block } +// GetExecutionPayload returns the execution payload of this block. +func (block *Block) GetExecutionPayload(ctx context.Context) *gloas.SignedExecutionPayloadEnvelope { + if block.executionPayload != nil { + return block.executionPayload + } + + if block.hasExecutionPayload && block.isInUnfinalizedDb { + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, false, true) + if dbBlock != nil { + payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + if err == nil { + return payload + } + } + } + + return nil +} + +// AwaitExecutionPayload waits for the execution payload of this block to be available. +func (block *Block) AwaitExecutionPayload(ctx context.Context, timeout time.Duration) *gloas.SignedExecutionPayloadEnvelope { + if ctx == nil { + ctx = context.Background() + } + + select { + case <-block.executionPayloadChan: + case <-time.After(timeout): + case <-ctx.Done(): + } + + return block.executionPayload +} + +// HasExecutionPayload returns true if this block has an execution payload. +func (block *Block) HasExecutionPayload() bool { + return block.hasExecutionPayload +} + // GetParentRoot returns the parent root of this block. func (block *Block) GetParentRoot() *phase0.Root { if block.isDisposed { @@ -264,7 +311,7 @@ func (block *Block) SetBlock(body *spec.VersionedSignedBeaconBlock) { return } - block.setBlockIndex(body) + block.setBlockIndex(body, nil) block.block = body if block.blockChan != nil { @@ -295,7 +342,7 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo return false, err } - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, nil) block.block = blockBody if block.blockChan != nil { close(block.blockChan) @@ -305,35 +352,106 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo return true, nil } +// SetExecutionPayload sets the execution payload of this block. +func (block *Block) SetExecutionPayload(payload *gloas.SignedExecutionPayloadEnvelope) { + block.setBlockIndex(block.block, payload) + block.executionPayload = payload + block.hasExecutionPayload = true + + if block.executionPayloadChan != nil { + close(block.executionPayloadChan) + block.executionPayloadChan = nil + } +} + +// EnsureExecutionPayload ensures that the execution payload of this block is available. +func (block *Block) EnsureExecutionPayload(loadExecutionPayload func() (*gloas.SignedExecutionPayloadEnvelope, error)) (bool, error) { + if block.executionPayload != nil { + return false, nil + } + + if block.hasExecutionPayload { + return false, nil + } + + block.executionPayloadMutex.Lock() + defer block.executionPayloadMutex.Unlock() + + if block.executionPayload != nil { + return false, nil + } + + payload, err := loadExecutionPayload() + if err != nil { + return false, err + } + + if payload == nil { + return false, nil + } + + block.setBlockIndex(block.block, payload) + block.executionPayload = payload + block.hasExecutionPayload = true + if block.executionPayloadChan != nil { + close(block.executionPayloadChan) + block.executionPayloadChan = nil + } + + return true, nil +} + // setBlockIndex sets the block index of this block. -func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock) { +func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) { if body == nil { return } - blockIndex := &BlockBodyIndex{} - blockIndex.Graffiti, _ = body.Graffiti() + blockIndex := block.blockIndex + if blockIndex == nil { + blockIndex = &BlockBodyIndex{} + } + + if body != nil { + blockIndex.Graffiti, _ = body.Graffiti() + blockIndex.ExecutionExtraData, _ = getBlockExecutionExtraData(body) + blockIndex.ExecutionHash, _ = body.ExecutionBlockHash() + if execNumber, err := body.ExecutionBlockNumber(); err == nil { + blockIndex.ExecutionNumber = uint64(execNumber) + } + if transactions, err := body.ExecutionTransactions(); err == nil { + blockIndex.EthTransactionCount = uint64(len(transactions)) + } + if blobKzgCommitments, err := body.BlobKZGCommitments(); err == nil { + blockIndex.BlobCount = uint64(len(blobKzgCommitments)) + } + if builderIndex, err := getBlockPayloadBuilderIndex(body); err == nil { + blockIndex.BuilderIndex = uint64(builderIndex) + } else { + blockIndex.BuilderIndex = math.MaxUint64 + } + if parentHash, err := getBlockExecutionParentHash(body); err == nil { + blockIndex.ExecutionParentHash = parentHash + } + if executionPayload, err := body.ExecutionPayload(); err == nil { + gasUsed, _ := executionPayload.GasUsed() + blockIndex.GasUsed = gasUsed - executionPayload, _ := body.ExecutionPayload() - if executionPayload != nil { - blockIndex.ExecutionExtraData, _ = executionPayload.ExtraData() - blockIndex.ExecutionHash, _ = executionPayload.BlockHash() - blockIndex.ExecutionNumber, _ = executionPayload.BlockNumber() + gasLimit, _ := executionPayload.GasLimit() + blockIndex.GasLimit = gasLimit + } + } + if payload != nil { + blockIndex.ExecutionNumber = uint64(payload.Message.Payload.BlockNumber) + blockIndex.ExecutionParentHash = payload.Message.Payload.ParentHash // Calculate transaction count - executionTransactions, _ := executionPayload.Transactions() + executionTransactions := payload.Message.Payload.Transactions blockIndex.EthTransactionCount = uint64(len(executionTransactions)) - // Calculate blob count - blobKzgCommitments, _ := body.BlobKZGCommitments() - blockIndex.BlobCount = uint64(len(blobKzgCommitments)) - // Get gas used and gas limit - gasUsed, _ := executionPayload.GasUsed() - blockIndex.GasUsed = gasUsed - - gasLimit, _ := executionPayload.GasLimit() - blockIndex.GasLimit = gasLimit + blockIndex.GasUsed = payload.Message.Payload.GasUsed + blockIndex.GasLimit = payload.Message.Payload.GasLimit } // Calculate block size @@ -372,7 +490,7 @@ func (block *Block) GetBlockIndex(ctx context.Context) *BlockBodyIndex { blockBody := block.GetBlock(ctx) if blockBody != nil { - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, block.GetExecutionPayload(ctx)) } return block.blockIndex @@ -399,7 +517,7 @@ func (block *Block) buildUnfinalizedBlock(ctx context.Context, compress bool) (* return nil, fmt.Errorf("marshal exec times ssz failed: %v", err) } - return &dbtypes.UnfinalizedBlock{ + unfinalizedBlock := &dbtypes.UnfinalizedBlock{ Root: block.Root[:], Slot: uint64(block.Slot), HeaderVer: 1, @@ -413,7 +531,18 @@ func (block *Block) buildUnfinalizedBlock(ctx context.Context, compress bool) (* MaxExecTime: uint32(block.maxExecutionTime), ExecTimes: execTimesSSZ, BlockUid: block.BlockUID, - }, nil + } + + if block.executionPayload != nil { + payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress) + if err != nil { + return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err) + } + unfinalizedBlock.PayloadVer = payloadVer + unfinalizedBlock.PayloadSSZ = payloadSSZ + } + + return unfinalizedBlock, nil } // buildOrphanedBlock builds an orphaned block from the block data. @@ -432,14 +561,25 @@ func (block *Block) buildOrphanedBlock(ctx context.Context, compress bool) (*dbt return nil, fmt.Errorf("marshal block ssz failed: %v", err) } - return &dbtypes.OrphanedBlock{ + orphanedBlock := &dbtypes.OrphanedBlock{ Root: block.Root[:], HeaderVer: 1, HeaderSSZ: headerSSZ, BlockVer: blockVer, BlockSSZ: blockSSZ, BlockUid: block.BlockUID, - }, nil + } + + if block.executionPayload != nil { + payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress) + if err != nil { + return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err) + } + orphanedBlock.PayloadVer = payloadVer + orphanedBlock.PayloadSSZ = payloadSSZ + } + + return orphanedBlock, nil } func (block *Block) writeToBlockDb(ctx context.Context) error { @@ -447,7 +587,7 @@ func (block *Block) writeToBlockDb(ctx context.Context) error { return nil } - _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) { + _, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) { headerSSZ, err := block.header.MarshalSSZ() if err != nil { return nil, fmt.Errorf("marshal header ssz failed: %v", err) @@ -478,9 +618,12 @@ func (block *Block) unpruneBlockBody(ctx context.Context) { return } - dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:]) + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, true) if dbBlock != nil { block.block, _ = UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ) + if len(dbBlock.PayloadSSZ) > 0 { + block.executionPayload, _ = UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + } } } diff --git a/indexer/beacon/block_helper.go b/indexer/beacon/block_helper.go index c943ede1c..24be5efec 100644 --- a/indexer/beacon/block_helper.go +++ b/indexer/beacon/block_helper.go @@ -4,14 +4,15 @@ import ( "errors" "fmt" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/altair" - "github.com/attestantio/go-eth2-client/spec/bellatrix" - "github.com/attestantio/go-eth2-client/spec/capella" - "github.com/attestantio/go-eth2-client/spec/deneb" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/altair" + "github.com/ethpandaops/go-eth2-client/spec/bellatrix" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/deneb" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" dynssz "github.com/pk910/dynamic-ssz" ) @@ -47,6 +48,9 @@ func MarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, block *spec.Ver case spec.DataVersionFulu: version = uint64(block.Version) ssz, err = dynSsz.MarshalSSZ(block.Fulu) + case spec.DataVersionGloas: + version = uint64(block.Version) + ssz, err = dynSsz.MarshalSSZ(block.Gloas) default: err = fmt.Errorf("unknown block version") } @@ -118,6 +122,11 @@ func UnmarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, version uint6 if err := dynSsz.UnmarshalSSZ(block.Fulu, ssz); err != nil { return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err) } + case spec.DataVersionGloas: + block.Gloas = &gloas.SignedBeaconBlock{} + if err := dynSsz.UnmarshalSSZ(block.Gloas, ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err) + } default: return nil, fmt.Errorf("unknown block version") } @@ -148,6 +157,9 @@ func MarshalVersionedSignedBeaconBlockJson(block *spec.VersionedSignedBeaconBloc case spec.DataVersionFulu: version = uint64(block.Version) jsonRes, err = block.Fulu.MarshalJSON() + case spec.DataVersionGloas: + version = uint64(block.Version) + jsonRes, err = block.Gloas.MarshalJSON() default: err = fmt.Errorf("unknown block version") } @@ -201,12 +213,195 @@ func unmarshalVersionedSignedBeaconBlockJson(version uint64, ssz []byte) (*spec. if err := block.Fulu.UnmarshalJSON(ssz); err != nil { return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err) } + case spec.DataVersionGloas: + block.Gloas = &gloas.SignedBeaconBlock{} + if err := block.Gloas.UnmarshalJSON(ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err) + } default: return nil, fmt.Errorf("unknown block version") } return block, nil } +// MarshalVersionedSignedExecutionPayloadEnvelopeSSZ marshals a signed execution payload envelope using SSZ encoding. +func MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, payload *gloas.SignedExecutionPayloadEnvelope, compress bool) (version uint64, ssz []byte, err error) { + if utils.Config.KillSwitch.DisableSSZEncoding { + // SSZ encoding disabled, use json instead + version, ssz, err = marshalVersionedSignedExecutionPayloadEnvelopeJson(payload) + } else { + // SSZ encoding + version = uint64(spec.DataVersionGloas) + ssz, err = dynSsz.MarshalSSZ(payload) + } + + if compress { + ssz = compressBytes(ssz) + version |= compressionFlag + } + + return +} + +// UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ unmarshals a versioned signed execution payload envelope using SSZ encoding. +func UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) { + if (version & compressionFlag) != 0 { + // decompress + if d, err := decompressBytes(ssz); err != nil { + return nil, fmt.Errorf("failed to decompress: %v", err) + } else { + ssz = d + version &= ^compressionFlag + } + } + + if (version & jsonVersionFlag) != 0 { + // JSON encoding + return unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version, ssz) + } + + if version != uint64(spec.DataVersionGloas) { + return nil, fmt.Errorf("unknown version") + } + + // SSZ encoding + payload := &gloas.SignedExecutionPayloadEnvelope{} + if err := dynSsz.UnmarshalSSZ(payload, ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err) + } + + return payload, nil +} + +// marshalVersionedSignedExecutionPayloadEnvelopeJson marshals a versioned signed execution payload envelope using JSON encoding. +func marshalVersionedSignedExecutionPayloadEnvelopeJson(payload *gloas.SignedExecutionPayloadEnvelope) (version uint64, jsonRes []byte, err error) { + version = uint64(spec.DataVersionGloas) + jsonRes, err = payload.MarshalJSON() + + version |= jsonVersionFlag + + return +} + +// unmarshalVersionedSignedExecutionPayloadEnvelopeJson unmarshals a versioned signed execution payload envelope using JSON encoding. +func unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) { + if version&jsonVersionFlag == 0 { + return nil, fmt.Errorf("no json encoding") + } + + if version-jsonVersionFlag != uint64(spec.DataVersionGloas) { + return nil, fmt.Errorf("unknown version") + } + + payload := &gloas.SignedExecutionPayloadEnvelope{} + if err := payload.UnmarshalJSON(ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err) + } + return payload, nil +} + +// getBlockExecutionExtraData returns the extra data from the execution payload of a versioned signed beacon block. +func getBlockExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) { + switch v.Version { + case spec.DataVersionBellatrix: + if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no bellatrix block") + } + + return v.Bellatrix.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionCapella: + if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no capella block") + } + + return v.Capella.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionDeneb: + if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no deneb block") + } + + return v.Deneb.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no electra block") + } + + return v.Electra.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionGloas: + return nil, nil + default: + return nil, errors.New("unknown version") + } +} + +// getBlockPayloadBuilderIndex returns the builder index from the execution payload of a versioned signed beacon block. +func getBlockPayloadBuilderIndex(v *spec.VersionedSignedBeaconBlock) (gloas.BuilderIndex, error) { + switch v.Version { + case spec.DataVersionPhase0: + return 0, errors.New("no builder index in phase0 block") + case spec.DataVersionAltair: + return 0, errors.New("no builder index in altair block") + case spec.DataVersionBellatrix: + return 0, errors.New("no builder index in bellatrix block") + case spec.DataVersionCapella: + return 0, errors.New("no builder index in capella block") + case spec.DataVersionDeneb: + return 0, errors.New("no builder index in deneb block") + case spec.DataVersionElectra: + return 0, errors.New("no builder index in electra block") + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil { + return 0, errors.New("no gloas block") + } + + return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.BuilderIndex, nil + default: + return 0, errors.New("unknown version") + } +} + +// getBlockExecutionParentHash returns the parent hash from the execution payload of a versioned signed beacon block. +func getBlockExecutionParentHash(v *spec.VersionedSignedBeaconBlock) (phase0.Hash32, error) { + switch v.Version { + case spec.DataVersionPhase0: + return phase0.Hash32{}, errors.New("no parent hash in phase0 block") + case spec.DataVersionAltair: + return phase0.Hash32{}, errors.New("no parent hash in altair block") + case spec.DataVersionBellatrix: + if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no bellatrix block") + } + + return v.Bellatrix.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionCapella: + if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no capella block") + } + + return v.Capella.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionDeneb: + if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no deneb block") + } + + return v.Deneb.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no electra block") + } + + return v.Electra.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil { + return phase0.Hash32{}, errors.New("no gloas block") + } + + return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.ParentBlockHash, nil + default: + return phase0.Hash32{}, errors.New("unknown version") + } +} + // getStateRandaoMixes returns the RANDAO mixes from a versioned beacon state. func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) { switch v.Version { @@ -252,6 +447,12 @@ func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) { } return v.Fulu.RANDAOMixes, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.RANDAOMixes == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.RANDAOMixes, nil default: return nil, errors.New("unknown version") } @@ -274,6 +475,8 @@ func getStateDepositIndex(state *spec.VersionedBeaconState) uint64 { return state.Electra.ETH1DepositIndex case spec.DataVersionFulu: return state.Fulu.ETH1DepositIndex + case spec.DataVersionGloas: + return state.Gloas.ETH1DepositIndex } return 0 } @@ -319,6 +522,12 @@ func getStateCurrentSyncCommittee(v *spec.VersionedBeaconState) ([]phase0.BLSPub } return v.Fulu.CurrentSyncCommittee.Pubkeys, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.CurrentSyncCommittee == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.CurrentSyncCommittee.Pubkeys, nil default: return nil, errors.New("unknown version") } @@ -349,6 +558,12 @@ func getStateDepositBalanceToConsume(v *spec.VersionedBeaconState) (phase0.Gwei, } return v.Fulu.DepositBalanceToConsume, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return 0, errors.New("no gloas block") + } + + return v.Gloas.DepositBalanceToConsume, nil default: return 0, errors.New("unknown version") } @@ -368,17 +583,23 @@ func getStatePendingDeposits(v *spec.VersionedBeaconState) ([]*electra.PendingDe case spec.DataVersionDeneb: return nil, errors.New("no pending deposits in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingDeposits == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingDeposits, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingDeposits == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingDeposits, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingDeposits, nil default: return nil, errors.New("unknown version") } @@ -398,22 +619,40 @@ func getStatePendingWithdrawals(v *spec.VersionedBeaconState) ([]*electra.Pendin case spec.DataVersionDeneb: return nil, errors.New("no pending withdrawals in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingPartialWithdrawals == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingPartialWithdrawals, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingPartialWithdrawals == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingPartialWithdrawals, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingPartialWithdrawals, nil default: return nil, errors.New("unknown version") } } +// getStateBuilderPendingWithdrawals returns the builder pending withdrawals from a versioned beacon state. +func getStateBuilderPendingWithdrawals(v *spec.VersionedBeaconState) ([]*gloas.BuilderPendingWithdrawal, error) { + if v.Version < spec.DataVersionGloas { + return nil, nil // no builder pending withdrawals before gloas + } + if v.Gloas == nil { + return nil, errors.New("no gloas state") + } + + return v.Gloas.BuilderPendingWithdrawals, nil +} + // getStatePendingConsolidations returns the pending consolidations from a versioned beacon state. func getStatePendingConsolidations(v *spec.VersionedBeaconState) ([]*electra.PendingConsolidation, error) { switch v.Version { @@ -428,17 +667,23 @@ func getStatePendingConsolidations(v *spec.VersionedBeaconState) ([]*electra.Pen case spec.DataVersionDeneb: return nil, errors.New("no pending consolidations in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingConsolidations == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingConsolidations, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingConsolidations == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingConsolidations, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingConsolidations, nil default: return nil, errors.New("unknown version") } @@ -460,11 +705,17 @@ func getStateProposerLookahead(v *spec.VersionedBeaconState) ([]phase0.Validator case spec.DataVersionElectra: return nil, errors.New("no proposer lookahead in electra") case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.ProposerLookahead == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.ProposerLookahead, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.ProposerLookahead, nil default: return nil, errors.New("unknown version") } @@ -487,6 +738,8 @@ func getBlockSize(dynSsz *dynssz.DynSsz, block *spec.VersionedSignedBeaconBlock) return dynSsz.SizeSSZ(block.Electra) case spec.DataVersionFulu: return dynSsz.SizeSSZ(block.Fulu) + case spec.DataVersionGloas: + return dynSsz.SizeSSZ(block.Gloas) default: return 0, errors.New("unknown version") } diff --git a/indexer/beacon/blockcache.go b/indexer/beacon/blockcache.go index f59cc4898..f5c79ac17 100644 --- a/indexer/beacon/blockcache.go +++ b/indexer/beacon/blockcache.go @@ -5,9 +5,9 @@ import ( "sort" "sync" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) var zeroHash = phase0.Hash32{} diff --git a/indexer/beacon/buildercache.go b/indexer/beacon/buildercache.go new file mode 100644 index 000000000..0a6bb62e9 --- /dev/null +++ b/indexer/beacon/buildercache.go @@ -0,0 +1,740 @@ +package beacon + +import ( + "bytes" + "fmt" + "hash/crc64" + "math" + "runtime/debug" + "sync" + "time" + + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + "github.com/jmoiron/sqlx" + + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" +) + +// BuilderIndexFlag separates builder indices from validator indices in the pubkey cache +const BuilderIndexFlag = uint64(1 << 40) + +// Builder status flag constants representing different builder states +const ( + BuilderStatusExited uint16 = 1 << iota // Builder has exited (withdrawable_epoch reached) + BuilderStatusSuperseded // Builder index was reused, this pubkey is no longer active +) + +// builderCache manages the in-memory cache of builder states and handles updates +type builderCache struct { + indexer *Indexer + builderSetCache []*builderEntry + cacheMutex sync.RWMutex + triggerDbUpdate chan bool +} + +// builderEntry represents a single builder's state in the cache +type builderEntry struct { + builderDiffs []*builderDiff + finalChecksum uint64 + finalBuilder *gloas.Builder + activeData *BuilderData + statusFlags uint16 +} + +// BuilderData contains the essential builder state information for active builders. +// Only WithdrawableEpoch can change during a builder's lifetime; all other fields are static. +type BuilderData struct { + WithdrawableEpoch phase0.Epoch +} + +// builderDiff represents an updated builder entry in the builder set cache. +type builderDiff struct { + epoch phase0.Epoch + dependentRoot phase0.Root + builder *gloas.Builder +} + +// newBuilderCache initializes a new builder cache instance and starts the persist loop +func newBuilderCache(indexer *Indexer) *builderCache { + cache := &builderCache{ + indexer: indexer, + triggerDbUpdate: make(chan bool, 1), + } + + go cache.runPersistLoop() + + return cache +} + +// updateBuilderSet processes builder set updates and maintains the cache state +func (cache *builderCache) updateBuilderSet(slot phase0.Slot, dependentRoot phase0.Root, builders []*gloas.Builder) { + chainState := cache.indexer.consensusPool.GetChainState() + epoch := chainState.EpochOfSlot(slot) + currentEpoch := chainState.CurrentEpoch() + finalizedEpoch, finalizedRoot := chainState.GetFinalizedCheckpoint() + cutOffEpoch := phase0.Epoch(0) + if currentEpoch > phase0.Epoch(cache.indexer.inMemoryEpochs) { + cutOffEpoch = currentEpoch - phase0.Epoch(cache.indexer.inMemoryEpochs) + } + if cutOffEpoch > finalizedEpoch { + cutOffEpoch = finalizedEpoch + } + + if epoch < cutOffEpoch { + cache.indexer.logger.Infof("ignoring old builder set update for epoch %d", epoch) + return + } + + isFinalizedBuilderSet := false + if slot == 0 { + isFinalizedBuilderSet = true // genesis + } else if epoch <= finalizedEpoch { + finalizedBlock := cache.indexer.blockCache.getBlockByRoot(finalizedRoot) + if finalizedBlock != nil { + finalizedDependentBlock := cache.indexer.blockCache.getDependentBlock(chainState, finalizedBlock, nil) + if finalizedDependentBlock != nil && bytes.Equal(finalizedDependentBlock.Root[:], dependentRoot[:]) { + isFinalizedBuilderSet = true + } + } + } + + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + t1 := time.Now() + + if len(cache.builderSetCache) < len(builders) { + if len(builders) > cap(cache.builderSetCache) { + newCache := make([]*builderEntry, len(builders), len(builders)+1000) + copy(newCache, cache.builderSetCache) + cache.builderSetCache = newCache + } else { + cache.builderSetCache = cache.builderSetCache[:len(builders)] + } + } + + isParentMap := map[phase0.Root]bool{} + isAheadMap := map[phase0.Root]bool{} + updatedCount := uint64(0) + + for i := range builders { + var parentChecksum uint64 + var parentBuilder *gloas.Builder + parentEpoch := phase0.Epoch(0) + + aheadDiffIdx := 0 + foundAhead := false + aheadEpoch := phase0.Epoch(math.MaxInt64) + + cachedBuilder := cache.builderSetCache[i] + if cachedBuilder == nil { + cachedBuilder = &builderEntry{} + cache.builderSetCache[i] = cachedBuilder + + cache.indexer.pubkeyCache.Add(builders[i].PublicKey, phase0.ValidatorIndex(uint64(i)|BuilderIndexFlag)) + } else { + parentBuilder = cachedBuilder.finalBuilder + parentChecksum = cachedBuilder.finalChecksum + } + + deleteKeys := []int{} + + if !isFinalizedBuilderSet { + for diffkey, diff := range cachedBuilder.builderDiffs { + if diff.epoch < cutOffEpoch { + deleteKeys = append(deleteKeys, diffkey) + continue + } + + if diff.epoch < epoch { + isParent, checkedParent := isParentMap[diff.dependentRoot] + if !checkedParent { + isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, dependentRoot) + isParentMap[diff.dependentRoot] = isParent + } + + if isParent && diff.epoch > parentEpoch { + parentBuilder = diff.builder + parentEpoch = diff.epoch + } + } + + if diff.epoch > epoch { + isAhead, checkedAhead := isAheadMap[diff.dependentRoot] + if !checkedAhead { + isAhead = cache.indexer.blockCache.isCanonicalBlock(dependentRoot, diff.dependentRoot) + isAheadMap[diff.dependentRoot] = isAhead + } + + if isAhead && diff.epoch < aheadEpoch { + aheadDiffIdx = diffkey + aheadEpoch = diff.epoch + foundAhead = true + } + } + } + + if parentBuilder != nil { + parentChecksum = calculateBuilderChecksum(parentBuilder) + } + } + + checksum := calculateBuilderChecksum(builders[i]) + if checksum == parentChecksum { + continue + } + + if isFinalizedBuilderSet { + cachedBuilder.finalBuilder = builders[i] + cachedBuilder.finalChecksum = checksum + cachedBuilder.statusFlags = GetBuilderStatusFlags(builders[i]) + updatedCount++ + + activeData := &BuilderData{ + WithdrawableEpoch: builders[i].WithdrawableEpoch, + } + if cache.isActiveBuilder(activeData) { + cachedBuilder.activeData = activeData + } + } + + if foundAhead && cache.checkBuilderEqual(cachedBuilder.builderDiffs[aheadDiffIdx].builder, builders[i]) { + if isFinalizedBuilderSet { + deleteKeys = append(deleteKeys, aheadDiffIdx) + } else { + diff := cachedBuilder.builderDiffs[aheadDiffIdx] + diff.epoch = epoch + diff.dependentRoot = dependentRoot + cachedBuilder.builderDiffs[aheadDiffIdx] = diff + } + } else if isFinalizedBuilderSet { + } else if len(deleteKeys) == 0 { + cachedBuilder.builderDiffs = append(cachedBuilder.builderDiffs, &builderDiff{ + epoch: epoch, + dependentRoot: dependentRoot, + builder: builders[i], + }) + } else { + cachedBuilder.builderDiffs[deleteKeys[0]] = &builderDiff{ + epoch: epoch, + dependentRoot: dependentRoot, + builder: builders[i], + } + deleteKeys = deleteKeys[1:] + } + + if len(deleteKeys) > 0 { + lastIdx := len(cachedBuilder.builderDiffs) - 1 + delLen := len(deleteKeys) + for delIdx := 0; delIdx < delLen; delIdx++ { + for delLen > 0 && deleteKeys[delLen-1] == lastIdx { + lastIdx-- + delLen-- + } + if delLen == 0 { + break + } + cachedBuilder.builderDiffs[deleteKeys[delIdx]] = cachedBuilder.builderDiffs[lastIdx] + lastIdx-- + } + + cachedBuilder.builderDiffs = cachedBuilder.builderDiffs[:lastIdx+1] + } + } + + if updatedCount > 0 { + select { + case cache.triggerDbUpdate <- true: + default: + } + } + + isFinalizedStr := "" + if isFinalizedBuilderSet { + isFinalizedStr = "finalized " + } + cache.indexer.logger.Infof("processed %vbuilder set update for epoch %d in %v", isFinalizedStr, epoch, time.Since(t1)) +} + +// checkBuilderEqual compares two builder states for equality +func (cache *builderCache) checkBuilderEqual(builder1 *gloas.Builder, builder2 *gloas.Builder) bool { + if builder1 == nil && builder2 == nil { + return true + } + if builder1 == nil || builder2 == nil { + return false + } + return bytes.Equal(builder1.PublicKey[:], builder2.PublicKey[:]) && + builder1.Version == builder2.Version && + bytes.Equal(builder1.ExecutionAddress[:], builder2.ExecutionAddress[:]) && + builder1.DepositEpoch == builder2.DepositEpoch && + builder1.WithdrawableEpoch == builder2.WithdrawableEpoch +} + +// GetBuilderStatusFlags calculates the status flags for a builder +func GetBuilderStatusFlags(builder *gloas.Builder) uint16 { + flags := uint16(0) + if builder.WithdrawableEpoch != FarFutureEpoch { + flags |= BuilderStatusExited + } + return flags +} + +// getBuilderSetSize returns the current number of builders in the builder set +func (cache *builderCache) getBuilderSetSize() uint64 { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + return uint64(len(cache.builderSetCache)) +} + +// setFinalizedEpoch updates the builder cache when a new epoch is finalized. +// dependentRoot is the dependent root of the finalized epoch (last block of the parent epoch). +func (cache *builderCache) setFinalizedEpoch(epoch phase0.Epoch, dependentRoot phase0.Root) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + updatedCount := uint64(0) + + for _, cachedBuilder := range cache.builderSetCache { + if cachedBuilder == nil { + continue + } + + // Find the finalized builder state + for _, diff := range cachedBuilder.builderDiffs { + if diff.dependentRoot == dependentRoot { + cachedBuilder.finalBuilder = diff.builder + cachedBuilder.finalChecksum = calculateBuilderChecksum(diff.builder) + cachedBuilder.statusFlags = GetBuilderStatusFlags(diff.builder) + updatedCount++ + + cachedBuilder.activeData = &BuilderData{ + WithdrawableEpoch: diff.builder.WithdrawableEpoch, + } + break + } + } + + // Clean up old diffs + newDiffs := make([]*builderDiff, 0) + for _, diff := range cachedBuilder.builderDiffs { + if diff.epoch > epoch { + newDiffs = append(newDiffs, diff) + } + } + cachedBuilder.builderDiffs = newDiffs + + // Clear old active data + if cachedBuilder.activeData != nil { + if !cache.isActiveBuilder(cachedBuilder.activeData) { + cachedBuilder.activeData = nil + } + } + } + + if updatedCount > 0 { + select { + case cache.triggerDbUpdate <- true: + default: + } + } +} + +// BuilderSetStreamer is a callback for streaming builder data +type BuilderSetStreamer func(index gloas.BuilderIndex, flags uint16, activeData *BuilderData, builder *gloas.Builder) error + +// streamBuilderSetForRoot streams the builder set for a given blockRoot +func (cache *builderCache) streamBuilderSetForRoot(blockRoot phase0.Root, onlyActive bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + isParentMap := map[phase0.Root]bool{} + isAheadMap := map[phase0.Root]bool{} + + for index, cachedBuilder := range cache.builderSetCache { + if cachedBuilder == nil { + continue + } + + latestBuilder := cachedBuilder.finalBuilder + builderData := cachedBuilder.activeData + builderEpoch := phase0.Epoch(0) + + var aheadBuilder *gloas.Builder + aheadEpoch := phase0.Epoch(math.MaxInt64) + + for _, diff := range cachedBuilder.builderDiffs { + isParent, checkedParent := isParentMap[diff.dependentRoot] + if !checkedParent { + isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) + isParentMap[diff.dependentRoot] = isParent + } + + if isParent && diff.epoch >= builderEpoch { + builderData = &BuilderData{ + WithdrawableEpoch: diff.builder.WithdrawableEpoch, + } + builderEpoch = diff.epoch + latestBuilder = diff.builder + } + + if !isParent && builderData == nil { + isAhead, checkedAhead := isAheadMap[diff.dependentRoot] + if !checkedAhead { + isAhead = cache.indexer.blockCache.isCanonicalBlock(blockRoot, diff.dependentRoot) + isAheadMap[diff.dependentRoot] = isAhead + } + + if isAhead && diff.epoch < aheadEpoch { + aheadBuilder = diff.builder + aheadEpoch = diff.epoch + } + } + } + + if builderData == nil && aheadBuilder != nil { + builderData = &BuilderData{ + WithdrawableEpoch: aheadBuilder.WithdrawableEpoch, + } + latestBuilder = aheadBuilder + } + + if onlyActive && (builderData == nil || (epoch != nil && builderData.WithdrawableEpoch <= *epoch)) { + continue + } + + builderFlags := cachedBuilder.statusFlags + if latestBuilder != nil { + builderFlags = GetBuilderStatusFlags(latestBuilder) + } + + err := cb(gloas.BuilderIndex(index), builderFlags, builderData, latestBuilder) + if err != nil { + return err + } + } + + return nil +} + +// UnwrapDbBuilder converts a dbtypes.Builder to a gloas.Builder +func UnwrapDbBuilder(dbBuilder *dbtypes.Builder) *gloas.Builder { + builder := &gloas.Builder{ + Version: dbBuilder.Version, + Balance: 0, // Balance not persisted + DepositEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.DepositEpoch)), + WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)), + } + copy(builder.PublicKey[:], dbBuilder.Pubkey) + copy(builder.ExecutionAddress[:], dbBuilder.ExecutionAddress) + return builder +} + +// isActiveBuilder determines if a builder is currently active +func (cache *builderCache) isActiveBuilder(builder *BuilderData) bool { + currentEpoch := cache.indexer.consensusPool.GetChainState().CurrentEpoch() + cutOffEpoch := phase0.Epoch(0) + if currentEpoch > 10 { + cutOffEpoch = currentEpoch - 10 + } + + return builder.WithdrawableEpoch > cutOffEpoch +} + +// getBuilderByIndex returns the builder by index for a given forkId +func (cache *builderCache) getBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder { + canonicalHead := cache.indexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil + } + + return cache.getBuilderByIndexAndRoot(index, canonicalHead.Root) +} + +// getBuilderByIndexAndRoot returns the builder by index for a given blockRoot +func (cache *builderCache) getBuilderByIndexAndRoot(index gloas.BuilderIndex, blockRoot phase0.Root) *gloas.Builder { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + if uint64(index) >= uint64(len(cache.builderSetCache)) { + return nil + } + + cachedBuilder := cache.builderSetCache[index] + if cachedBuilder == nil { + return nil + } + + builder := cachedBuilder.finalBuilder + builderEpoch := phase0.Epoch(0) + + // Find the latest valid diff + for _, diff := range cachedBuilder.builderDiffs { + if cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) && diff.epoch >= builderEpoch { + builder = diff.builder + builderEpoch = diff.epoch + } + } + + // Fallback to db if builder is not found in cache + if builder == nil { + if dbBuilder := db.GetActiveBuilderByIndex(cache.indexer.ctx, uint64(index)); dbBuilder != nil { + builder = UnwrapDbBuilder(dbBuilder) + } + } else { + // Return a copy + builder = &gloas.Builder{ + PublicKey: builder.PublicKey, + Version: builder.Version, + ExecutionAddress: builder.ExecutionAddress, + Balance: builder.Balance, + DepositEpoch: builder.DepositEpoch, + WithdrawableEpoch: builder.WithdrawableEpoch, + } + } + + return builder +} + +// calculateBuilderChecksum generates a CRC64 checksum of all builder fields (except balance) +func calculateBuilderChecksum(b *gloas.Builder) uint64 { + if b == nil { + return 0 + } + + data := make([]byte, 0, 80) + data = append(data, b.PublicKey[:]...) + data = append(data, b.Version) + data = append(data, b.ExecutionAddress[:]...) + data = append(data, uint64ToBytes(uint64(b.DepositEpoch))...) + data = append(data, uint64ToBytes(uint64(b.WithdrawableEpoch))...) + + return crc64.Checksum(data, crc64Table) +} + +// prepopulateFromDB pre-populates the builder set cache from the database +func (cache *builderCache) prepopulateFromDB() (uint64, error) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + maxIndex, err := db.GetMaxBuilderIndex(cache.indexer.ctx) + if err != nil { + return 0, fmt.Errorf("error getting max builder index: %w", err) + } + + if maxIndex == 0 { + return 0, nil + } + + cache.builderSetCache = make([]*builderEntry, maxIndex+1, maxIndex+1+1000) + + restoreCount := uint64(0) + + batchSize := uint64(10000) + for start := uint64(0); start <= maxIndex; start += batchSize { + end := min(start+batchSize, maxIndex) + + builders := db.GetBuilderRange(cache.indexer.ctx, start, end) + for _, dbBuilder := range builders { + if dbBuilder.Superseded { + continue + } + + builder := UnwrapDbBuilder(dbBuilder) + builderEntry := &builderEntry{ + finalChecksum: calculateBuilderChecksum(builder), + } + builderData := &BuilderData{ + WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)), + } + if cache.isActiveBuilder(builderData) { + builderEntry.activeData = builderData + } + builderEntry.statusFlags = GetBuilderStatusFlags(builder) + + cache.builderSetCache[dbBuilder.BuilderIndex] = builderEntry + + cache.indexer.pubkeyCache.Add(builder.PublicKey, phase0.ValidatorIndex(dbBuilder.BuilderIndex|BuilderIndexFlag)) + + restoreCount++ + } + } + + return restoreCount, nil +} + +// runPersistLoop handles the background persistence of builder states to the database +func (cache *builderCache) runPersistLoop() { + defer func() { + if err := recover(); err != nil { + cache.indexer.logger.WithError(err.(error)).Errorf( + "uncaught panic in indexer.beacon.builderCache.runPersistLoop subroutine: %v, stack: %v", + err, string(debug.Stack())) + time.Sleep(10 * time.Second) + + go cache.runPersistLoop() + } + }() + + for range cache.triggerDbUpdate { + time.Sleep(2 * time.Second) + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + hasMore, err := cache.persistBuilders(tx) + if hasMore { + select { + case cache.triggerDbUpdate <- true: + default: + } + } + return err + }) + if err != nil { + cache.indexer.logger.WithError(err).Errorf("error persisting builders") + } + } +} + +// persistBuilders writes a batch of builder states to the database +func (cache *builderCache) persistBuilders(tx *sqlx.Tx) (bool, error) { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + const batchSize = 1000 + const maxPerRun = 10000 + + batch := make([]*dbtypes.Builder, 0, batchSize) + batchIndices := make([]uint64, 0, batchSize) + supersededPubkeys := make([][]byte, 0) + persisted := 0 + firstIndex := uint64(0) + lastIndex := uint64(0) + hasMore := false + + for index, entry := range cache.builderSetCache { + if entry == nil || entry.finalBuilder == nil { + continue + } + + if persisted == 0 && len(batch) == 0 { + firstIndex = uint64(index) + } + lastIndex = uint64(index) + + dbBuilder := &dbtypes.Builder{ + Pubkey: entry.finalBuilder.PublicKey[:], + BuilderIndex: uint64(index), + Version: entry.finalBuilder.Version, + ExecutionAddress: entry.finalBuilder.ExecutionAddress[:], + DepositEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.DepositEpoch)), + WithdrawableEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.WithdrawableEpoch)), + Superseded: false, + } + + batch = append(batch, dbBuilder) + batchIndices = append(batchIndices, uint64(index)) + + if len(batch) >= batchSize { + superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices) + if err != nil { + return false, err + } + supersededPubkeys = append(supersededPubkeys, superseded...) + + // Clear finalBuilder for persisted entries + for _, idx := range batchIndices { + if cache.builderSetCache[idx] != nil { + cache.builderSetCache[idx].finalBuilder = nil + } + } + + batch = batch[:0] + batchIndices = batchIndices[:0] + persisted += batchSize + + if persisted >= maxPerRun { + hasMore = true + break + } + } + } + + // Persist remaining batch + if len(batch) > 0 { + superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices) + if err != nil { + return false, err + } + supersededPubkeys = append(supersededPubkeys, superseded...) + + // Clear finalBuilder for persisted entries + for _, idx := range batchIndices { + if cache.builderSetCache[idx] != nil { + cache.builderSetCache[idx].finalBuilder = nil + } + } + + persisted += len(batch) + } + + // Batch mark superseded builders + if len(supersededPubkeys) > 0 { + err := db.SetBuildersSuperseded(supersededPubkeys, tx) + if err != nil { + return false, fmt.Errorf("error marking builders as superseded: %w", err) + } + } + + if persisted > 0 || len(supersededPubkeys) > 0 { + cache.indexer.logger.Infof("persisted %d builders to db [%d-%d], marked %d as superseded", + persisted, firstIndex, lastIndex, len(supersededPubkeys)) + } + + return hasMore, nil +} + +// persistBuilderBatch persists a batch of builders and returns pubkeys that were superseded +func (cache *builderCache) persistBuilderBatch(tx *sqlx.Tx, batch []*dbtypes.Builder, indices []uint64) ([][]byte, error) { + if len(batch) == 0 { + return nil, nil + } + + // Get range for this batch + minIndex := indices[0] + maxIndex := indices[0] + for _, idx := range indices[1:] { + if idx < minIndex { + minIndex = idx + } + if idx > maxIndex { + maxIndex = idx + } + } + + // Fetch existing builders in this batch's range + existingBuilders := db.GetBuilderRange(cache.indexer.ctx, minIndex, maxIndex) + existingByIndex := make(map[uint64]*dbtypes.Builder, len(existingBuilders)) + for _, b := range existingBuilders { + existingByIndex[b.BuilderIndex] = b + } + + // Find superseded pubkeys + supersededPubkeys := make([][]byte, 0) + for i, dbBuilder := range batch { + if existing, ok := existingByIndex[indices[i]]; ok { + if !bytes.Equal(existing.Pubkey, dbBuilder.Pubkey) { + supersededPubkeys = append(supersededPubkeys, existing.Pubkey) + } + } + } + + // Insert batch + err := db.InsertBuilderBatch(batch, tx) + if err != nil { + return nil, fmt.Errorf("error persisting builder batch: %w", err) + } + + return supersededPubkeys, nil +} diff --git a/indexer/beacon/canonical.go b/indexer/beacon/canonical.go index c0d51dde0..aae2b11f8 100644 --- a/indexer/beacon/canonical.go +++ b/indexer/beacon/canonical.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) const FarFutureEpoch = phase0.Epoch(math.MaxUint64) @@ -61,6 +61,10 @@ func (indexer *Indexer) IsCanonicalBlockByHead(block *Block, headBlock *Block) b return false } + if block == headBlock { + return true + } + if block.forkChecked && headBlock.forkChecked { parentForkIds := indexer.forkCache.getParentForkIds(headBlock.forkId) return slices.Contains(parentForkIds, block.forkId) diff --git a/indexer/beacon/client.go b/indexer/beacon/client.go index 7176d925a..957e68b79 100644 --- a/indexer/beacon/client.go +++ b/indexer/beacon/client.go @@ -8,15 +8,16 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/clients/consensus/rpc" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/utils" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" ) @@ -33,7 +34,9 @@ type Client struct { archive bool skipValidators bool - streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent] + streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent] + executionPayloadSubscription *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] + executionPayloadBidSubscription *utils.Subscription[*gloas.SignedExecutionPayloadBid] headRoot phase0.Root } @@ -80,6 +83,8 @@ func (c *Client) startIndexing() { // single ordered subscription for block & head events to preserve SSE ordering c.streamSubscription = c.client.SubscribeStreamEvent(100, true) + c.executionPayloadSubscription = c.client.SubscribeExecutionPayloadAvailableEvent(100, true) + c.executionPayloadBidSubscription = c.client.SubscribeExecutionPayloadBidEvent(100, true) go c.startClientLoop() } @@ -144,7 +149,7 @@ func (c *Client) runClientLoop() error { c.headRoot = headRoot - headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false) + headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false, true) if err != nil { return fmt.Errorf("failed processing head block: %v", err) } @@ -185,6 +190,16 @@ func (c *Client) runClientLoop() error { headEvent.Slot, headEvent.Block.String(), err) } } + case executionPayloadEvent := <-c.executionPayloadSubscription.Channel(): + err := c.processExecutionPayloadAvailableEvent(executionPayloadEvent) + if err != nil { + c.logger.Errorf("failed processing execution payload %v (%v): %v", executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot.String(), err) + } + case executionPayloadBidEvent := <-c.executionPayloadBidSubscription.Channel(): + err := c.processExecutionPayloadBidEvent(executionPayloadBidEvent) + if err != nil { + c.logger.Errorf("failed processing execution payload bid %v (%v): %v", executionPayloadBidEvent.Message.Slot, executionPayloadBidEvent.Message.ParentBlockRoot.String(), err) + } } } @@ -245,50 +260,59 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error { chainState := c.client.GetPool().GetChainState() dependentRoot := headEvent.CurrentDutyDependentRoot - - var dependentBlock *Block if !bytes.Equal(dependentRoot[:], consensus.NullRoot[:]) { block.dependentRoot = &dependentRoot - - dependentBlock = c.indexer.blockCache.getBlockByRoot(dependentRoot) - if dependentBlock == nil { - c.logger.Warnf("dependent block (%v) not found after backfilling", dependentRoot.String()) - } - } else { - dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, block, c) } // walk back the chain of epoch stats to ensure we have all duties & epoch specific data for the clients chain currentBlock := block - currentEpoch := chainState.EpochOfSlot(currentBlock.Slot) + headEpoch := chainState.EpochOfSlot(currentBlock.Slot) + currentEpoch := headEpoch minInMemorySlot := c.indexer.getMinInMemorySlot() absoluteMinInMemoryEpoch := c.indexer.getAbsoluteMinInMemoryEpoch() for { - if dependentBlock != nil && currentBlock.Slot >= minInMemorySlot { - epoch := chainState.EpochOfSlot(currentBlock.Slot) + parentRoot := currentBlock.GetParentRoot() + if parentRoot == nil { + break + } - // only request state for epochs that are allowed in memory by configuration - // we accept some gaps here, these will be fixed by the pruning/finalization process - requestState := epoch >= absoluteMinInMemoryEpoch + isEpochStart := false + parentBlock := c.indexer.blockCache.getBlockByRoot(*parentRoot) + + if currentBlock.Slot == 0 { + isEpochStart = true + } else if currentBlock.dependentRoot != nil && *parentRoot == *currentBlock.dependentRoot && (parentBlock == nil || parentBlock.Slot > 0) { + isEpochStart = true + } else if parentBlock != nil && chainState.EpochOfSlot(parentBlock.Slot) < currentEpoch { + isEpochStart = true + } else if parentBlock == nil && chainState.EpochOfSlot(currentBlock.Slot) == currentEpoch { + // parent block is not in cache, but we're still in currentEpoch. + // this block is the oldest block in cache for this epoch, so treat its + // parent root as the dependent root for epoch boundary detection. + isEpochStart = true + } + + if isEpochStart { + epoch := chainState.EpochOfSlot(currentBlock.Slot) + dependentRoot := *parentRoot // ensure epoch stats for the epoch - epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, requestState) + epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentRoot) + + if epoch >= absoluteMinInMemoryEpoch { + c.indexer.epochCache.ensureEpochDependentState(epochStats, currentBlock.Root) + } if !epochStats.addRequestedBy(c) { break } - if epochStats.dependentState == nil && epoch == currentEpoch { - // always load most recent dependent state to ensure we have the latest validator set - c.indexer.epochCache.addEpochStateRequest(epochStats) - } - } else { - if dependentBlock == nil { - c.logger.Debugf("epoch stats check failed: dependent block for %v:%v (%v) not found", currentBlock.Slot, chainState.EpochOfSlot(currentBlock.Slot), currentBlock.Root.String()) - } + } + + if parentBlock == nil || parentBlock.Slot < minInMemorySlot { break } - currentBlock = dependentBlock - dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, currentBlock, c) + currentBlock = parentBlock + currentEpoch = chainState.EpochOfSlot(currentBlock.Slot) } c.headRoot = block.Root @@ -297,7 +321,7 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error { // processStreamBlock processes a block received from the stream (either via block or head events). func (c *Client) processStreamBlock(slot phase0.Slot, root phase0.Root) (*Block, error) { - block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true) + block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true, false) if err != nil { return nil, err } @@ -351,7 +375,7 @@ func (c *Client) processReorg(oldHead *Block, newHead *Block) error { } // processBlock processes a block (from stream & polling). -func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) { +func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool, loadPayload bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) { chainState := c.client.GetPool().GetChainState() finalizedSlot := chainState.GetFinalizedSlot() processingTimes = make([]time.Duration, 3) @@ -409,6 +433,25 @@ func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0 return } + if loadPayload { + newPayload, _ := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + t1 := time.Now() + defer func() { + processingTimes[0] += time.Since(t1) + }() + + return LoadExecutionPayload(c.getContext(), c, root) + }) + + if !isNew && newPayload { + // write payload to db + err = c.persistExecutionPayload(block) + if err != nil { + return + } + } + } + if slot >= finalizedSlot && isNew { c.indexer.blockCache.addBlockToParentMap(block) c.indexer.blockCache.addBlockToExecBlockMap(block) @@ -532,7 +575,7 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error { if parentBlock == nil { var err error - parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false) + parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false, true) if err != nil { return fmt.Errorf("could not process block [0x%x]: %v", parentRoot, err) } @@ -559,3 +602,87 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error { } return nil } + +// processExecutionPayloadEvent processes an execution payload event from the event stream. +func (c *Client) processExecutionPayloadAvailableEvent(executionPayloadEvent *v1.ExecutionPayloadAvailableEvent) error { + if c.client.GetStatus() != consensus.ClientStatusOnline && c.client.GetStatus() != consensus.ClientStatusOptimistic { + // client is not ready, skip + return nil + } + + chainState := c.client.GetPool().GetChainState() + finalizedSlot := chainState.GetFinalizedSlot() + + var block *Block + + if executionPayloadEvent.Slot < finalizedSlot { + // block is in finalized epoch + // known block or a new orphaned block + + // don't add to cache, process this block right after loading the details + block = newBlock(c.indexer.dynSsz, executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot, 0) + + dbBlockHead := db.GetBlockHeadByRoot(c.getContext(), executionPayloadEvent.BlockRoot[:]) + if dbBlockHead != nil { + block.isInFinalizedDb = true + block.parentRoot = (*phase0.Root)(dbBlockHead.ParentRoot) + } + + } else { + block, _ = c.indexer.blockCache.createOrGetBlock(executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot) + } + + if block == nil { + c.logger.Warnf("execution payload event for unknown block %v:%v [0x%x]", chainState.EpochOfSlot(executionPayloadEvent.Slot), executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot) + return nil + } + + newPayload, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + return LoadExecutionPayload(c.getContext(), c, executionPayloadEvent.BlockRoot) + }) + if err != nil { + return err + } + + if newPayload { + // write payload to db + err = c.persistExecutionPayload(block) + if err != nil { + return err + } + } + + return nil +} + +func (c *Client) persistExecutionPayload(block *Block) error { + payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, c.indexer.blockCompression) + if err != nil { + return fmt.Errorf("marshal execution payload ssz failed: %v", err) + } + + return db.RunDBTransaction(func(tx *sqlx.Tx) error { + err := db.UpdateUnfinalizedBlockPayload(c.getContext(), tx, block.Root[:], payloadVer, payloadSSZ) + if err != nil { + return err + } + + return nil + }) +} + +func (c *Client) processExecutionPayloadBidEvent(executionPayloadBidEvent *gloas.SignedExecutionPayloadBid) error { + bid := &dbtypes.BlockBid{ + ParentRoot: executionPayloadBidEvent.Message.ParentBlockRoot[:], + ParentHash: executionPayloadBidEvent.Message.ParentBlockHash[:], + BlockHash: executionPayloadBidEvent.Message.BlockHash[:], + FeeRecipient: executionPayloadBidEvent.Message.FeeRecipient[:], + GasLimit: uint64(executionPayloadBidEvent.Message.GasLimit), + BuilderIndex: int64(executionPayloadBidEvent.Message.BuilderIndex), + Slot: uint64(executionPayloadBidEvent.Message.Slot), + Value: uint64(executionPayloadBidEvent.Message.Value), + ElPayment: uint64(executionPayloadBidEvent.Message.ExecutionPayment), + } + c.indexer.blockBidCache.AddBid(bid) + return nil +} diff --git a/indexer/beacon/debug.go b/indexer/beacon/debug.go index a23a306a9..7bfc5248c 100644 --- a/indexer/beacon/debug.go +++ b/indexer/beacon/debug.go @@ -4,7 +4,7 @@ import ( "reflect" "unsafe" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) type CacheDebugStats struct { diff --git a/indexer/beacon/duties/duties.go b/indexer/beacon/duties/duties.go index 9a6ee6402..ee313aeed 100644 --- a/indexer/beacon/duties/duties.go +++ b/indexer/beacon/duties/duties.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) const seedSize = int8(32) @@ -357,3 +357,62 @@ func swapOrNot(buf []byte, byteV byte, i ActiveIndiceIndex, input []ActiveIndice } return byteV, source } + +// GetPtcDuties returns the Payload Timeliness Committee (PTC) members for a given slot. +// The PTC is selected from the concatenated attestation committees for the slot using +// balance-weighted selection without shuffling. +func GetPtcDuties( + spec *consensus.ChainSpec, + state *BeaconState, + attesterDuties [][]ActiveIndiceIndex, + slot phase0.Slot, +) ([]ActiveIndiceIndex, error) { + if spec.PtcSize == 0 { + return nil, nil + } + + epoch := phase0.Epoch(slot / phase0.Slot(spec.SlotsPerEpoch)) + + // Derive PTC seed: hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot)) + seedData := []byte{} + seedHash := GetSeed(spec, state, epoch, spec.DomainPtcAttester) + seedData = append(seedData, seedHash[:]...) + seedData = append(seedData, UintToBytes(uint64(slot))...) + seed := Hash(seedData) + + // Concatenate all committee indices for the slot (in order) + indices := make([]ActiveIndiceIndex, 0) + for _, committee := range attesterDuties { + indices = append(indices, committee...) + } + + if len(indices) == 0 { + return nil, errors.New("empty committee indices") + } + + // Balance-weighted selection without shuffling (shuffle_indices=false) + // Uses same acceptance logic as GetProposerIndex (Electra-style 16-bit random values) + maxRandomValue := uint64(1<<16 - 1) + total := uint64(len(indices)) + selected := make([]ActiveIndiceIndex, 0, spec.PtcSize) + + for i := uint64(0); uint64(len(selected)) < spec.PtcSize; i++ { + // No shuffling - traverse indices in order + nextIndex := i % total + candidateIndex := indices[nextIndex] + + // Balance-weighted acceptance check (same as proposer selection) + b := append(seed[:], UintToBytes(i/16)...) + offset := (i % 16) * 2 + hash := Hash(b) + randomValue := BytesToUint(hash[offset : offset+2]) + + effectiveBal := uint64(state.GetEffectiveBalance(candidateIndex)) + + if effectiveBal*maxRandomValue >= spec.MaxEffectiveBalanceElectra*randomValue { + selected = append(selected, candidateIndex) + } + } + + return selected, nil +} diff --git a/indexer/beacon/epochcache.go b/indexer/beacon/epochcache.go index 2ed805ef6..7e455e5f4 100644 --- a/indexer/beacon/epochcache.go +++ b/indexer/beacon/epochcache.go @@ -10,10 +10,11 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/dora/indexer/beacon/statetransition" ) // epochStatsKey is the primary key for EpochStats entries in cache. @@ -30,12 +31,18 @@ func getEpochStatsKey(epoch phase0.Epoch, dependentRoot phase0.Root) epochStatsK return key } +// epochStateKey is the key for epochState entries in cache. +// Includes targetEpoch because the same dependentRoot may serve multiple epochs +// (when all slots in intermediate epochs are missed), and each epoch needs its +// own post-epoch-transition state values. +type epochStateKey = epochStatsKey + // epochCache is the cache for EpochStats (epoch status) and epochState (beacon state) structures. type epochCache struct { indexer *Indexer cacheMutex sync.RWMutex // mutex to protect statsMap & stateMap for concurrent read/write statsMap map[epochStatsKey]*EpochStats // epoch status cache by epochStatsKey - stateMap map[phase0.Root]*epochState // beacon state cache by dependentRoot + stateMap map[epochStateKey]*epochState // beacon state cache by (dependentRoot, targetEpoch) loadingChan chan bool // limits concurrent state calls by channel capacity syncMutex sync.Mutex // mutex to protect syncCache for concurrent access syncCache []phase0.ValidatorIndex // global sync committee cache for reuse if matching @@ -52,7 +59,7 @@ func newEpochCache(indexer *Indexer) *epochCache { cache := &epochCache{ indexer: indexer, statsMap: map[epochStatsKey]*EpochStats{}, - stateMap: map[phase0.Root]*epochState{}, + stateMap: map[epochStateKey]*epochState{}, loadingChan: make(chan bool, indexer.maxParallelStateCalls), votesCache: lru.NewCache[epochVotesKey, *EpochVotes](500), @@ -65,7 +72,7 @@ func newEpochCache(indexer *Indexer) *epochCache { } // createOrGetEpochStats gets an existing EpochStats entry for the given epoch and dependentRoot or creates a new instance if not found. -func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root, createStateRequest bool) *EpochStats { +func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats { cache.cacheMutex.Lock() defer cache.cacheMutex.Unlock() @@ -77,43 +84,37 @@ func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot cache.statsMap[statsKey] = epochStats } - // get or create beacon state which the epoch status depends on (dependentRoot beacon state) - epochState := cache.stateMap[dependentRoot] - if epochState == nil && !epochStats.ready && createStateRequest { - epochState = newEpochState(dependentRoot) - cache.stateMap[dependentRoot] = epochState - - cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epoch, dependentRoot.String()) - } - - if epochState != nil { - epochStats.dependentState = epochState - - if epochState.loadingStatus == 2 && !epochStats.ready { - // dependent state is already loaded, process it - go epochStats.processState(cache.indexer, nil) - } - } - return epochStats } -func (cache *epochCache) addEpochStateRequest(epochStats *EpochStats) { +func (cache *epochCache) ensureEpochDependentState(epochStats *EpochStats, firstBlockRoot phase0.Root) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + if epochStats.dependentState != nil { return } - cache.cacheMutex.Lock() - defer cache.cacheMutex.Unlock() - - epochState := cache.stateMap[epochStats.dependentRoot] - if epochState == nil { - epochState = newEpochState(epochStats.dependentRoot) - cache.stateMap[epochStats.dependentRoot] = epochState + // get or create beacon state for this (dependentRoot, epoch) combination. + // Always loads the post-state of the dependent root (last block of parent epoch). + // For Fulu+, the epoch transition is applied after loading via statetransition.PrepareEpochPreState. + stateKey := getEpochStatsKey(epochStats.epoch, epochStats.dependentRoot) + epochState := cache.stateMap[stateKey] + if epochState == nil && !epochStats.ready { + epochState = newEpochState(epochStats.dependentRoot, epochStats.epoch) + cache.stateMap[stateKey] = epochState cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epochStats.epoch, epochStats.dependentRoot.String()) } - epochStats.dependentState = epochState + + if epochState != nil { + epochStats.dependentState = epochState + + if epochState.loadingStatus == 2 && !epochStats.ready { + // dependent state is already loaded, process it + go epochStats.processState(cache.indexer, nil, 0) + } + } } func (cache *epochCache) getEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats { @@ -126,13 +127,27 @@ func (cache *epochCache) getEpochStats(epoch phase0.Epoch, dependentRoot phase0. } // getPendingEpochStats gets all EpochStats with unloaded epochStates. +// Skips stats where another epochState with the same dependentRoot is already loading, +// since those will be processed via the dedup path in loadEpochStats after the +// first load completes. func (cache *epochCache) getPendingEpochStats() []*EpochStats { cache.cacheMutex.Lock() defer cache.cacheMutex.Unlock() + // Collect roots that are currently being loaded. + loadingRoots := make(map[phase0.Root]bool) + for _, state := range cache.stateMap { + if state.loadingStatus == 1 { + loadingRoots[state.slotRoot] = true + } + } + pendingStats := make([]*EpochStats, 0) for _, stats := range cache.statsMap { if stats.dependentState != nil && stats.dependentState.loadingStatus == 0 { + if loadingRoots[stats.dependentState.slotRoot] { + continue // another epochState with same root is already loading + } pendingStats = append(pendingStats, stats) } } @@ -224,19 +239,9 @@ func (cache *epochCache) removeEpochStats(epochStats *EpochStats) { delete(cache.statsMap, statsKey) if epochStats.dependentState != nil { - foundOtherStats := false - for _, stats := range cache.statsMap { - if bytes.Equal(stats.dependentRoot[:], epochStats.dependentRoot[:]) { - foundOtherStats = true - break - } - } - - if !foundOtherStats { - // no other epoch status depends on this beacon state - epochStats.dependentState.dispose() - delete(cache.stateMap, epochStats.dependentRoot) - } + stateKey := getEpochStatsKey(epochStats.epoch, epochStats.dependentRoot) + epochStats.dependentState.dispose() + delete(cache.stateMap, stateKey) } } @@ -251,7 +256,7 @@ func (cache *epochCache) removeUnreferencedEpochStates() uint64 { defer cache.cacheMutex.Unlock() removed := uint64(0) - for _, state := range cache.stateMap { + for root, state := range cache.stateMap { found := false for _, stats := range cache.statsMap { if stats.dependentState == state { @@ -262,7 +267,7 @@ func (cache *epochCache) removeUnreferencedEpochStates() uint64 { if !found { state.dispose() - delete(cache.stateMap, state.slotRoot) + delete(cache.stateMap, root) removed++ } } @@ -468,11 +473,14 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool { log.Infof("loading epoch %v stats (dep: %v, req: %v)", epochStats.epoch, epochStats.dependentRoot.String(), len(epochStats.requestedBy)) + t1 := time.Now() state, err := epochStats.dependentState.loadState(client.getContext(), client, cache) if err != nil && epochStats.dependentState.loadingStatus == 0 { client.logger.Warnf("failed loading epoch %v stats (dep: %v): %v", epochStats.epoch, epochStats.dependentRoot.String(), err) } + loadDuration := time.Since(t1) + if epochStats.dependentState.loadingStatus != 2 { // epoch state could not be loaded epochStats.dependentState.retryCount++ @@ -487,17 +495,82 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool { } } - dependentStats := []*EpochStats{} - cache.cacheMutex.Lock() - for _, stats := range cache.statsMap { - if stats.dependentState == epochStats.dependentState { - dependentStats = append(dependentStats, stats) + // Process the triggering EpochStats + go epochStats.processState(cache.indexer, validatorSet, loadDuration) + + // Find other pending epochStates with the same dependentRoot (different target epochs). + // These share the same raw state but need their own epoch transition applied. + // We advance the state sequentially through each epoch in order. + if state != nil { + type pendingEntry struct { + epochState *epochState + stats *EpochStats } - } - cache.cacheMutex.Unlock() - for _, stats := range dependentStats { - go stats.processState(cache.indexer, validatorSet) + var pendingOthers []pendingEntry + cache.cacheMutex.RLock() + for _, stats := range cache.statsMap { + if stats == epochStats { + continue + } + if stats.dependentState == nil || stats.dependentState.loadingStatus != 0 { + continue + } + if stats.dependentState.slotRoot != epochStats.dependentState.slotRoot { + continue + } + pendingOthers = append(pendingOthers, pendingEntry{ + epochState: stats.dependentState, + stats: stats, + }) + } + cache.cacheMutex.RUnlock() + + if len(pendingOthers) > 0 { + specs := client.indexer.consensusPool.GetChainState().GetSpecs() + + // Sort by target epoch so we advance the state forward incrementally. + sort.Slice(pendingOthers, func(i, j int) bool { + return pendingOthers[i].epochState.targetEpoch < pendingOthers[j].epochState.targetEpoch + }) + + for _, entry := range pendingOthers { + // Advance the already-loaded state to the next target epoch. + // Payload is nil since it was already applied on the first PrepareEpochPreState call. + var transitionInfo statetransition.TransitionInfo + if err := statetransition.NewStateTransition(specs, cache.indexer.dynSsz).PrepareEpochPreState(state, entry.epochState.targetEpoch, nil, &transitionInfo); err != nil { + cache.indexer.logger.Errorf("error advancing state to epoch %v: %v", entry.epochState.targetEpoch, err) + continue + } + entry.epochState.delayedBuilderPaymentCount = transitionInfo.DelayedBuilderPayments + + // Extract values from the advanced state. + if err := entry.epochState.processState(state, cache, specs); err != nil { + cache.indexer.logger.Errorf("error processing state for epoch %v: %v", entry.epochState.targetEpoch, err) + continue + } + + entry.epochState.loadingStatus = 2 + + // Store in state cache. + if sc := cache.indexer.stateCache; sc != nil { + if err := sc.Store(entry.epochState.slotRoot, entry.epochState.targetEpoch, state); err != nil { + cache.indexer.logger.Warnf("failed to cache state for epoch %v: %v", entry.epochState.targetEpoch, err) + } + } + + // Signal ready. + entry.epochState.readyChanMutex.Lock() + if entry.epochState.readyChan != nil { + close(entry.epochState.readyChan) + entry.epochState.readyChan = nil + } + entry.epochState.readyChanMutex.Unlock() + + // Trigger EpochStats processing. + go entry.stats.processState(cache.indexer, validatorSet, loadDuration) + } + } } return true diff --git a/indexer/beacon/epochstate.go b/indexer/beacon/epochstate.go index 4d696f480..1477f22eb 100644 --- a/indexer/beacon/epochstate.go +++ b/indexer/beacon/epochstate.go @@ -6,15 +6,20 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/dora/indexer/beacon/statetransition" + "github.com/ethpandaops/dora/statecache" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) // epochState represents a beacon state which a epoch status depends on. type epochState struct { - slotRoot phase0.Root - stateRoot phase0.Root + slotRoot phase0.Root + stateRoot phase0.Root + targetEpoch phase0.Epoch // the epoch this state is being prepared for loadingCancel context.CancelFunc loadingStatus uint8 @@ -23,22 +28,27 @@ type epochState struct { readyChan chan bool highPriority bool - stateSlot phase0.Slot - validatorBalances []phase0.Gwei - randaoMixes []phase0.Root - depositIndex uint64 - syncCommittee []phase0.ValidatorIndex - depositBalanceToConsume phase0.Gwei - pendingDeposits []*electra.PendingDeposit - pendingPartialWithdrawals []*electra.PendingPartialWithdrawal - pendingConsolidations []*electra.PendingConsolidation - proposerLookahead []phase0.ValidatorIndex + stateSlot phase0.Slot + sourceBlockUid uint64 // block UID of the source block (before epoch transition) + validatorBalances []phase0.Gwei + builderBalances []phase0.Gwei + randaoMixes []phase0.Root + depositIndex uint64 + syncCommittee []phase0.ValidatorIndex + depositBalanceToConsume phase0.Gwei + pendingDeposits []*electra.PendingDeposit + pendingPartialWithdrawals []*electra.PendingPartialWithdrawal + builderPendingWithdrawals []*gloas.BuilderPendingWithdrawal + delayedBuilderPaymentCount uint32 // number of delayed payments at the tail of builderPendingWithdrawals + pendingConsolidations []*electra.PendingConsolidation + proposerLookahead []phase0.ValidatorIndex } // newEpochState creates a new epochState instance with the root of the state to be loaded. -func newEpochState(slotRoot phase0.Root) *epochState { +func newEpochState(slotRoot phase0.Root, targetEpoch phase0.Epoch) *epochState { return &epochState{ - slotRoot: slotRoot, + slotRoot: slotRoot, + targetEpoch: targetEpoch, } } @@ -90,7 +100,6 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch } s.loadingStatus = 1 - client.logger.Debugf("loading state for slot %v", s.slotRoot.String()) ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout+(beaconHeaderRequestTimeout*2)) s.loadingCancel = cancel @@ -104,30 +113,109 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch } }() - var blockHeader *phase0.SignedBeaconBlockHeader + var beaconBlock *spec.VersionedSignedBeaconBlock block := client.indexer.blockCache.getBlockByRoot(s.slotRoot) if block != nil { - blockHeader = block.AwaitHeader(ctx, beaconHeaderRequestTimeout) + beaconBlock = block.AwaitBlock(ctx, beaconHeaderRequestTimeout) } - if blockHeader == nil { + if beaconBlock == nil { var err error - blockHeader, err = LoadBeaconHeader(ctx, client, s.slotRoot) + beaconBlock, err = LoadBeaconBlock(ctx, client, s.slotRoot) if err != nil { return nil, err } } - s.stateRoot = blockHeader.Message.StateRoot + if beaconBlock != nil { + var err error + s.stateRoot, err = beaconBlock.StateRoot() + if err != nil { + return nil, fmt.Errorf("error getting state root from beacon block %v: %v", s.slotRoot.String(), err) + } + } - resState, err := LoadBeaconState(ctx, client, blockHeader.Message.StateRoot) - if err != nil { - return nil, err + specs := client.indexer.consensusPool.GetChainState().GetSpecs() + + // Save the source block UID before epoch transition (needed for ref slot of + // direct builder payments from the parent epoch's last block). + if block != nil { + s.sourceBlockUid = block.BlockUID + } else if beaconBlock != nil { + slot, _ := beaconBlock.Slot() + s.sourceBlockUid = uint64(slot) << 16 } - err = s.processState(resState, cache) - if err != nil { + // Try loading from state cache first (post-epoch-transition state). + var resState *spec.VersionedBeaconState + sc := client.indexer.stateCache + if sc != nil && sc.Check(s.slotRoot, s.targetEpoch) { + resState = sc.Load(s.slotRoot, s.targetEpoch) + if resState != nil { + client.logger.Infof("loaded epoch %v state from cache (dep: %v)", s.targetEpoch, s.slotRoot.String()) + } + } + + // Try replaying from parent epoch's cached state + blocks. This is much + // cheaper than loading the full state from the beacon API (which can be + // hundreds of MB on mainnet). On any failure, falls through to API load. + if resState == nil && sc != nil && s.targetEpoch > 0 { + if replayed := s.tryReplayFromParentState(ctx, client, block, beaconBlock, specs, sc); replayed != nil { + resState = replayed + } + } + + if resState == nil { + // Fall back to loading the full state from the beacon API. + apiStart := time.Now() + var err error + resState, err = LoadBeaconState(ctx, client, s.stateRoot) + if err != nil { + return nil, err + } + apiLoadDur := time.Since(apiStart) + + // For Fulu+: apply epoch transition to advance the state from the post-block state + // of the parent epoch's last block to the pre-state of the target epoch. + // Skip for genesis (epoch 0) — the genesis state is already the correct pre-state. + var epochTransitionDur time.Duration + if resState.Version >= spec.DataVersionFulu && s.targetEpoch > 0 { + var payloadEnvelope *gloas.ExecutionPayloadEnvelope + if resState.Version >= spec.DataVersionGloas { + var executionPayload *gloas.SignedExecutionPayloadEnvelope + if block != nil { + executionPayload = block.GetExecutionPayload(ctx) + } + if executionPayload == nil { + executionPayload, _ = LoadExecutionPayload(ctx, client, s.slotRoot) + } + if executionPayload != nil { + payloadEnvelope = executionPayload.Message + } + } + + epochStart := time.Now() + var transitionInfo statetransition.TransitionInfo + if err := statetransition.NewStateTransition(specs, client.indexer.dynSsz).PrepareEpochPreState(resState, s.targetEpoch, payloadEnvelope, &transitionInfo); err != nil { + return nil, fmt.Errorf("error applying epoch transition for epoch %v: %w", s.targetEpoch, err) + } + epochTransitionDur = time.Since(epochStart) + s.delayedBuilderPaymentCount = transitionInfo.DelayedBuilderPayments + } + + client.logger.Infof("loaded epoch %v state from beacon API in %v + epoch transition %v", + s.targetEpoch, apiLoadDur.Round(time.Millisecond), epochTransitionDur.Round(time.Millisecond)) + + // Store in state cache for future use. + if sc != nil { + if err := sc.Store(s.slotRoot, s.targetEpoch, resState); err != nil { + client.logger.Warnf("failed to cache state for epoch %v: %v", s.targetEpoch, err) + } + } + } + + if err := s.processState(resState, cache, specs); err != nil { return nil, err } @@ -144,13 +232,14 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch // processState processes the state and updates the epochState instance. // the function extracts and unifies all relevant information from the beacon state, so the full beacon state can be dropped from memory afterwards. -func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epochCache) error { +func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epochCache, specs *consensus.ChainSpec) error { slot, err := state.Slot() if err != nil { return fmt.Errorf("error getting slot from state %v: %v", s.slotRoot.String(), err) } s.stateSlot = slot + dependentRoot := s.slotRoot validatorList, err := state.Validators() if err != nil { @@ -158,7 +247,20 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch } if cache != nil { - cache.indexer.validatorCache.updateValidatorSet(slot, s.slotRoot, validatorList) + cache.indexer.validatorCache.updateValidatorSet(slot, dependentRoot, validatorList) + } + + // Process builder set for Gloas + if state.Version >= spec.DataVersionGloas && state.Gloas != nil { + if cache != nil { + cache.indexer.builderCache.updateBuilderSet(slot, dependentRoot, state.Gloas.Builders) + } + + builderBalances := make([]phase0.Gwei, len(state.Gloas.Builders)) + for i, builder := range state.Gloas.Builders { + builderBalances[i] = builder.Balance + } + s.builderBalances = builderBalances } validatorPubkeyMap := make(map[phase0.BLSPubKey]phase0.ValidatorIndex) @@ -218,12 +320,15 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch } s.pendingPartialWithdrawals = pendingPartialWithdrawals + builderPendingWithdrawals, err := getStateBuilderPendingWithdrawals(state) + if err == nil { + s.builderPendingWithdrawals = builderPendingWithdrawals + } + pendingConsolidations, err := getStatePendingConsolidations(state) if err != nil { return fmt.Errorf("error getting pending consolidation indices from state %v: %v", s.slotRoot.String(), err) } - - // apply epoch transition to get remaining pending consolidations s.pendingConsolidations = pendingConsolidations } @@ -232,3 +337,196 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch return nil } + +// tryReplayFromParentState attempts to reconstruct the dependent block's post-state +// by loading the parent epoch's pre-state from cache and replaying all parent epoch +// blocks using the state transition. Returns the post-epoch-transition state ready +// for the target epoch, or nil if replay is not possible or verification fails. +// On any failure (missing inputs, ApplyBlock error, HTR mismatch, epoch transition +// error) the function returns nil and the caller falls back to loading the state +// from the beacon API. +func (s *epochState) tryReplayFromParentState( + ctx context.Context, + client *Client, + depBlock *Block, + depBeaconBlock *spec.VersionedSignedBeaconBlock, + specs *consensus.ChainSpec, + sc *statecache.StateCache, +) *spec.VersionedBeaconState { + if depBlock == nil || depBeaconBlock == nil { + return nil + } + + parentEpoch := s.targetEpoch - 1 + slotsPerEpoch := specs.SlotsPerEpoch + + // Walk back from depBlock to find the dependent root for the parent epoch + // (the last block before parentEpoch's first slot). + parentEpochFirstSlot := phase0.Slot(uint64(parentEpoch) * slotsPerEpoch) + walkBlock := depBlock + for walkBlock != nil && walkBlock.Slot >= parentEpochFirstSlot { + parentRoot := walkBlock.GetParentRoot() + if parentRoot == nil { + return nil + } + walkBlock = client.indexer.blockCache.getBlockByRoot(*parentRoot) + } + if walkBlock == nil { + return nil + } + parentDepRoot := walkBlock.Root + + // Parent epoch's pre-state must be in cache. + if !sc.Check(parentDepRoot, parentEpoch) { + return nil + } + parentState := sc.Load(parentDepRoot, parentEpoch) + if parentState == nil { + return nil + } + + // Skip replay across fork boundaries — the state version must match the + // dependent block's version (fork upgrades during state transition are not + // yet implemented). + if depBeaconBlock.Version != parentState.Version { + return nil + } + + // Collect all blocks in the parent epoch in slot order. + var epochBlocks []*Block + walkBlock = depBlock + for walkBlock != nil && walkBlock.Slot >= parentEpochFirstSlot { + epochBlocks = append(epochBlocks, walkBlock) + parentRoot := walkBlock.GetParentRoot() + if parentRoot == nil { + break + } + walkBlock = client.indexer.blockCache.getBlockByRoot(*parentRoot) + } + for i, j := 0, len(epochBlocks)-1; i < j; i, j = i+1, j-1 { + epochBlocks[i], epochBlocks[j] = epochBlocks[j], epochBlocks[i] + } + + // Reusable state transition; caches persist across all blocks in the epoch + // and the trailing epoch transition. + st := statetransition.NewStateTransition(specs, client.indexer.dynSsz) + + // prevStateRoot is the verified post-block HTR from the previous iteration — + // the same value as the next block's pre-state HTR — passed as a hint to + // skip the expensive HTR computation in the first process_slot. + var prevStateRoot phase0.Root + + replayStart := time.Now() + var blockApplyTotal time.Duration + for i, blk := range epochBlocks { + beaconBlock := blk.GetBlock(ctx) + if beaconBlock == nil { + return nil + } + + blockStart := time.Now() + if err := st.ApplyBlockWithStateRoot(parentState, beaconBlock, prevStateRoot); err != nil { + client.logger.Warnf("replay: ApplyBlock failed at slot %v: %v", blk.Slot, err) + return nil + } + + // Verify post-block state root matches the block header (post-block, + // pre-payload for Gloas). Catches state transition implementation bugs. + expectedStateRoot, _ := beaconBlock.StateRoot() + var gotStateRoot phase0.Root + var htrErr error + switch parentState.Version { + case spec.DataVersionFulu: + gotStateRoot, htrErr = parentState.Fulu.HashTreeRoot() + case spec.DataVersionGloas: + gotStateRoot, htrErr = parentState.Gloas.HashTreeRoot() + } + if htrErr != nil { + client.logger.Warnf("replay: HTR failed at slot %v: %v", blk.Slot, htrErr) + return nil + } + if gotStateRoot != expectedStateRoot { + client.logger.Warnf("replay: state root mismatch at slot %v (got %v, expected %v), falling back to API", + blk.Slot, gotStateRoot.String(), expectedStateRoot.String()) + return nil + } + // Default: post-block HTR is the correct pre-state HTR for the next block + // (correct for Fulu and for Gloas when no payload is applied). + prevStateRoot = gotStateRoot + blockApplyTotal += time.Since(blockStart) + + // For Gloas: apply execution payload if delivered AND accepted. + // A payload is "accepted" iff the next block in the canonical chain + // references it via bid.parent_block_hash; otherwise it was orphaned + // (the next block built on the parent payload instead) and must NOT + // be applied to the state. + // Skip the LAST block's payload — its acceptance is determined by the + // first block of the target epoch, which we don't have here. The + // PrepareEpochPreState call below handles it via the dep block payload. + isLastBlock := (i == len(epochBlocks)-1) + if parentState.Version >= spec.DataVersionGloas && !isLastBlock { + payload := blk.GetExecutionPayload(ctx) + if payload != nil && payload.Message != nil && payload.Message.Payload != nil { + nextBeaconBlock := epochBlocks[i+1].GetBlock(ctx) + if nextBeaconBlock == nil { + return nil + } + nextParentBlockHash, err := getBlockExecutionParentHash(nextBeaconBlock) + if err != nil { + client.logger.Warnf("replay: failed to read next bid parent hash at slot %v: %v", epochBlocks[i+1].Slot, err) + return nil + } + + if payload.Message.Payload.BlockHash == nextParentBlockHash { + // Payload accepted by the next block — apply it. + if err := st.ApplyExecutionPayload(parentState, payload); err != nil { + client.logger.Warnf("replay: ApplyExecutionPayload failed at slot %v: %v", blk.Slot, err) + return nil + } + // Post-payload state HTR is recorded in the envelope itself. + prevStateRoot = payload.Message.StateRoot + } + // else: payload was orphaned (next block built on parent payload). + // Leave state unchanged; gotStateRoot is the correct hint. + } + } + } + blockReplayDur := time.Since(replayStart) + + // Apply epoch transition to advance the state from the post-block state of + // the parent epoch's last block to the pre-state of the target epoch. + var epochTransitionDur time.Duration + if parentState.Version >= spec.DataVersionFulu { + var payloadEnvelope *gloas.ExecutionPayloadEnvelope + if parentState.Version >= spec.DataVersionGloas { + payload := depBlock.GetExecutionPayload(ctx) + if payload != nil { + payloadEnvelope = payload.Message + } + } + + epochStart := time.Now() + var transitionInfo statetransition.TransitionInfo + if err := st.PrepareEpochPreState(parentState, s.targetEpoch, payloadEnvelope, &transitionInfo); err != nil { + client.logger.Warnf("replay: epoch transition failed for epoch %v: %v", s.targetEpoch, err) + return nil + } + epochTransitionDur = time.Since(epochStart) + s.delayedBuilderPaymentCount = transitionInfo.DelayedBuilderPayments + } + + client.logger.Infof( + "replayed epoch %v: %d blocks in %v (apply %v) + epoch transition %v", + parentEpoch, len(epochBlocks), + blockReplayDur.Round(time.Millisecond), + blockApplyTotal.Round(time.Millisecond), + epochTransitionDur.Round(time.Millisecond), + ) + + // Cache the post-epoch-transition state for the target epoch. + if err := sc.Store(s.slotRoot, s.targetEpoch, parentState); err != nil { + client.logger.Warnf("failed to cache replayed state for epoch %v: %v", s.targetEpoch, err) + } + + return parentState +} diff --git a/indexer/beacon/epochstats.go b/indexer/beacon/epochstats.go index 7f1fbc9cb..9ca945128 100644 --- a/indexer/beacon/epochstats.go +++ b/indexer/beacon/epochstats.go @@ -9,12 +9,13 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon/duties" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/mashingan/smapping" ) @@ -44,39 +45,46 @@ type EpochStats struct { // EpochStatsValues holds the values for the epoch-specific information. type EpochStatsValues struct { - RandaoMix phase0.Hash32 - NextRandaoMix phase0.Hash32 - ActiveIndices []phase0.ValidatorIndex - EffectiveBalances []uint32 - ProposerDuties []phase0.ValidatorIndex - AttesterDuties [][][]duties.ActiveIndiceIndex - SyncCommitteeDuties []phase0.ValidatorIndex - ActiveValidators uint64 - TotalBalance phase0.Gwei - ActiveBalance phase0.Gwei - EffectiveBalance phase0.Gwei - FirstDepositIndex uint64 - PendingWithdrawals []electra.PendingPartialWithdrawal - PendingConsolidations []electra.PendingConsolidation - ConsolidatingBalance phase0.Gwei + RandaoMix phase0.Hash32 + NextRandaoMix phase0.Hash32 + ActiveIndices []phase0.ValidatorIndex + EffectiveBalances []uint32 // effective balance in full ETH of last epoch for pre-fulu stats, effective balance in full ETH of current epoch for fulu+ stats + ProposerDuties []phase0.ValidatorIndex + AttesterDuties [][][]duties.ActiveIndiceIndex + SyncCommitteeDuties []phase0.ValidatorIndex + PtcDuties [][]duties.ActiveIndiceIndex // [slot_index][ptc_member_index] - PTC duties for Gloas+ epochs + ActiveValidators uint64 + TotalBalance phase0.Gwei + ActiveBalance phase0.Gwei + EffectiveBalance phase0.Gwei + FirstDepositIndex uint64 + PendingWithdrawals []electra.PendingPartialWithdrawal + BuilderPendingWithdrawals []gloas.BuilderPendingWithdrawal + DelayedBuilderPaymentCount uint32 // number of delayed payments at the tail of BuilderPendingWithdrawals + SourceBlockUid uint64 // block UID of the source block (last block of parent epoch) + PendingConsolidations []electra.PendingConsolidation + ConsolidatingBalance phase0.Gwei } // EpochStatsPacked holds the packed values for the epoch-specific information. // // generate ssz: (this is really ugly, needs path patching and post-fixing to work) -// sszgen --suffix ssz --path . --include $GOPATH/pkg/mod/github.com/attestantio/go-eth2-client\@v0.26.0/spec/phase0,$GOPATH/pkg/mod/github.com/attestantio/go-eth2-client\@v0.26.0/spec/electra --objs EpochStatsPacked +// sszgen --suffix ssz --path . --include $GOPATH/pkg/mod/github.com/ethpandaops/go-eth2-client\@v0.26.0/spec/phase0,$GOPATH/pkg/mod/github.com/ethpandaops/go-eth2-client\@v0.26.0/spec/electra --objs EpochStatsPacked type EpochStatsPacked struct { - ActiveValidators []EpochStatsPackedValidator `ssz-max:"10000000"` - ProposerDuties []phase0.ValidatorIndex `ssz-max:"100"` - SyncCommitteeDuties []phase0.ValidatorIndex `ssz-max:"10000"` - RandaoMix phase0.Hash32 `ssz-size:"32"` - NextRandaoMix phase0.Hash32 `ssz-size:"32"` - TotalBalance phase0.Gwei - ActiveBalance phase0.Gwei - FirstDepositIndex uint64 - PendingWithdrawals []electra.PendingPartialWithdrawal `ssz-max:"10000000"` - PendingConsolidations []electra.PendingConsolidation `ssz-max:"10000000"` - ConsolidatingBalance phase0.Gwei + ActiveValidators []EpochStatsPackedValidator `ssz-max:"10000000"` + ProposerDuties []phase0.ValidatorIndex `ssz-max:"100"` + SyncCommitteeDuties []phase0.ValidatorIndex `ssz-max:"10000"` + RandaoMix phase0.Hash32 `ssz-size:"32"` + NextRandaoMix phase0.Hash32 `ssz-size:"32"` + TotalBalance phase0.Gwei + ActiveBalance phase0.Gwei + FirstDepositIndex uint64 + PendingWithdrawals []electra.PendingPartialWithdrawal `ssz-max:"10000000"` + BuilderPendingWithdrawals []gloas.BuilderPendingWithdrawal `ssz-max:"10000000"` + DelayedBuilderPaymentCount uint32 + SourceBlockUid uint64 + PendingConsolidations []electra.PendingConsolidation `ssz-max:"10000000"` + ConsolidatingBalance phase0.Gwei } // EpochStatsPackedValidator holds the packed values for an active validator. @@ -164,17 +172,20 @@ func (es *EpochStats) buildPackedSSZ() ([]byte, error) { } packedValues := &EpochStatsPacked{ - ActiveValidators: make([]EpochStatsPackedValidator, es.values.ActiveValidators), - ProposerDuties: es.values.ProposerDuties, - SyncCommitteeDuties: es.values.SyncCommitteeDuties, - RandaoMix: es.values.RandaoMix, - NextRandaoMix: es.values.NextRandaoMix, - TotalBalance: es.values.TotalBalance, - ActiveBalance: es.values.ActiveBalance, - FirstDepositIndex: es.values.FirstDepositIndex, - PendingWithdrawals: es.values.PendingWithdrawals, - PendingConsolidations: es.values.PendingConsolidations, - ConsolidatingBalance: es.values.ConsolidatingBalance, + ActiveValidators: make([]EpochStatsPackedValidator, es.values.ActiveValidators), + ProposerDuties: es.values.ProposerDuties, + SyncCommitteeDuties: es.values.SyncCommitteeDuties, + RandaoMix: es.values.RandaoMix, + NextRandaoMix: es.values.NextRandaoMix, + TotalBalance: es.values.TotalBalance, + ActiveBalance: es.values.ActiveBalance, + FirstDepositIndex: es.values.FirstDepositIndex, + PendingWithdrawals: es.values.PendingWithdrawals, + PendingConsolidations: es.values.PendingConsolidations, + BuilderPendingWithdrawals: es.values.BuilderPendingWithdrawals, + DelayedBuilderPaymentCount: es.values.DelayedBuilderPaymentCount, + SourceBlockUid: es.values.SourceBlockUid, + ConsolidatingBalance: es.values.ConsolidatingBalance, } lastValidatorIndex := phase0.ValidatorIndex(0) @@ -215,19 +226,22 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt } values := &EpochStatsValues{ - RandaoMix: packedValues.RandaoMix, - NextRandaoMix: packedValues.NextRandaoMix, - ActiveIndices: make([]phase0.ValidatorIndex, len(packedValues.ActiveValidators)), - EffectiveBalances: make([]uint32, len(packedValues.ActiveValidators)), - ProposerDuties: packedValues.ProposerDuties, - SyncCommitteeDuties: packedValues.SyncCommitteeDuties, - TotalBalance: packedValues.TotalBalance, - ActiveBalance: packedValues.ActiveBalance, - EffectiveBalance: 0, - FirstDepositIndex: packedValues.FirstDepositIndex, - PendingWithdrawals: packedValues.PendingWithdrawals, - PendingConsolidations: packedValues.PendingConsolidations, - ConsolidatingBalance: packedValues.ConsolidatingBalance, + RandaoMix: packedValues.RandaoMix, + NextRandaoMix: packedValues.NextRandaoMix, + ActiveIndices: make([]phase0.ValidatorIndex, len(packedValues.ActiveValidators)), + EffectiveBalances: make([]uint32, len(packedValues.ActiveValidators)), + ProposerDuties: packedValues.ProposerDuties, + SyncCommitteeDuties: packedValues.SyncCommitteeDuties, + TotalBalance: packedValues.TotalBalance, + ActiveBalance: packedValues.ActiveBalance, + EffectiveBalance: 0, + FirstDepositIndex: packedValues.FirstDepositIndex, + PendingWithdrawals: packedValues.PendingWithdrawals, + PendingConsolidations: packedValues.PendingConsolidations, + BuilderPendingWithdrawals: packedValues.BuilderPendingWithdrawals, + DelayedBuilderPaymentCount: packedValues.DelayedBuilderPaymentCount, + SourceBlockUid: packedValues.SourceBlockUid, + ConsolidatingBalance: packedValues.ConsolidatingBalance, } lastValidatorIndex := phase0.ValidatorIndex(0) @@ -265,7 +279,9 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt proposerDuties = append(proposerDuties, proposerIndex) } - values.ProposerDuties = proposerDuties + if len(values.ProposerDuties) == 0 { + values.ProposerDuties = proposerDuties + } if beaconState.RandaoMix != nil { values.RandaoMix = *beaconState.RandaoMix } @@ -273,6 +289,17 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt // compute committees attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch) values.AttesterDuties = attesterDuties + + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } } return values, nil @@ -291,6 +318,7 @@ func (es *EpochStats) pruneValues() { ProposerDuties: es.values.ProposerDuties, AttesterDuties: nil, // prune SyncCommitteeDuties: es.values.SyncCommitteeDuties, + PtcDuties: nil, // prune - only needed for recent epochs ActiveValidators: es.values.ActiveValidators, TotalBalance: es.values.TotalBalance, ActiveBalance: es.values.ActiveBalance, @@ -322,7 +350,7 @@ func (es *EpochStats) loadValuesFromDb(ctx context.Context, chainState *consensu } // processState processes the epoch state and computes proposer and attester duties. -func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator) { +func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator, loadDuration time.Duration) { if es.dependentState == nil || es.dependentState.loadingStatus != 2 { return } @@ -363,6 +391,15 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali values.PendingWithdrawals[i] = *pendingPartialWithdrawal } + if len(dependentState.builderPendingWithdrawals) > 0 { + values.BuilderPendingWithdrawals = make([]gloas.BuilderPendingWithdrawal, len(dependentState.builderPendingWithdrawals)) + for i, bpw := range dependentState.builderPendingWithdrawals { + values.BuilderPendingWithdrawals[i] = *bpw + } + } + values.DelayedBuilderPaymentCount = dependentState.delayedBuilderPaymentCount + values.SourceBlockUid = dependentState.sourceBlockUid + for i, pendingConsolidation := range dependentState.pendingConsolidations { srcIndicee := pendingConsolidation.SourceIndex srcValidator := validatorSet[srcIndicee] @@ -424,7 +461,7 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali offset = slotsPerEpoch } - values.ProposerDuties = dependentState.proposerLookahead[offset : offset+slotsPerEpoch] + values.ProposerDuties = dependentState.proposerLookahead[offset:] } else { proposerDuties := []phase0.ValidatorIndex{} for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { @@ -450,6 +487,20 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali } values.AttesterDuties = attesterDuties + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, ptcErr := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + if ptcErr != nil { + indexer.logger.Warnf("failed computing PTC duties for slot %v: %v", slot, ptcErr) + } + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } + if beaconState.RandaoMix != nil { values.RandaoMix = *beaconState.RandaoMix values.NextRandaoMix = *beaconState.NextRandaoMix @@ -475,12 +526,13 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali es.isInDb = true indexer.logger.Infof( - "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, %v ms), %v bytes", + "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, load: %v ms, process: %v ms), %v bytes", es.epoch, es.dependentRoot.String(), dependentState.stateRoot.String(), values.ActiveValidators, len(validatorSet), + loadDuration.Milliseconds(), time.Since(t1).Milliseconds(), len(packedSsz), ) @@ -555,14 +607,20 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E // compute proposers proposerDuties := []phase0.ValidatorIndex{} - for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { - proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot) - proposerIndex := phase0.ValidatorIndex(math.MaxInt64) - if err == nil { - proposerIndex = values.ActiveIndices[proposer] - } - proposerDuties = append(proposerDuties, proposerIndex) + specs := chainState.GetSpecs() + if parentState.dependentState != nil && uint64(len(parentState.dependentState.proposerLookahead)) > specs.SlotsPerEpoch { + proposerDuties = parentState.dependentState.proposerLookahead[specs.SlotsPerEpoch:] + } else { + for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { + proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot) + proposerIndex := phase0.ValidatorIndex(math.MaxInt64) + if err == nil { + proposerIndex = values.ActiveIndices[proposer] + } + + proposerDuties = append(proposerDuties, proposerIndex) + } } values.ProposerDuties = proposerDuties @@ -571,6 +629,17 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch) values.AttesterDuties = attesterDuties + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } + es.precalcValues = values indexer.logger.Infof( diff --git a/indexer/beacon/epochstats_ssz.go b/indexer/beacon/epochstats_ssz.go index a28e4b984..68e97cff5 100644 --- a/indexer/beacon/epochstats_ssz.go +++ b/indexer/beacon/epochstats_ssz.go @@ -1,12 +1,12 @@ // Code generated by dynamic-ssz. DO NOT EDIT. -// Hash: a6f4f377206be4aef1242896821b07617571dd21a37f76322c8872bc7c3cf3f3 -// Version: v1.2.1 (https://github.com/pk910/dynamic-ssz) +// Hash: 23cc3d81625bca9f5609a71ab4097c8e2e896f2fb49ddcc1567f532b4e30ec00 +// Version: v1.2.2 (https://github.com/pk910/dynamic-ssz) package beacon import ( "encoding/binary" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" dynssz "github.com/pk910/dynamic-ssz" "github.com/pk910/dynamic-ssz/hasher" "github.com/pk910/dynamic-ssz/sszutils" @@ -19,107 +19,115 @@ func (t *EpochStatsPacked) MarshalSSZ() ([]byte, error) { } func (t *EpochStatsPacked) MarshalSSZTo(buf []byte) (dst []byte, err error) { dst = buf + zeroBytes := sszutils.ZeroBytes() if t == nil { t = new(EpochStatsPacked) } dstlen := len(dst) - // Offset #0 'ActiveValidators' - offset0 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #1 'ProposerDuties' - offset1 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #2 'SyncCommitteeDuties' - offset2 := len(dst) - dst = append(dst, 0, 0, 0, 0) - { // Field #3 'RandaoMix' + // Offset Field #0 'ActiveValidators' + // Offset Field #1 'ProposerDuties' + // Offset Field #2 'SyncCommitteeDuties' + dst = append(dst, zeroBytes[:12]...) + { // Static Field #3 'RandaoMix' dst = append(dst, t.RandaoMix[:32]...) } - { // Field #4 'NextRandaoMix' + { // Static Field #4 'NextRandaoMix' dst = append(dst, t.NextRandaoMix[:32]...) } - { // Field #5 'TotalBalance' + { // Static Field #5 'TotalBalance' dst = binary.LittleEndian.AppendUint64(dst, uint64(t.TotalBalance)) } - { // Field #6 'ActiveBalance' + { // Static Field #6 'ActiveBalance' dst = binary.LittleEndian.AppendUint64(dst, uint64(t.ActiveBalance)) } - { // Field #7 'FirstDepositIndex' + { // Static Field #7 'FirstDepositIndex' dst = binary.LittleEndian.AppendUint64(dst, t.FirstDepositIndex) } - // Offset #8 'PendingWithdrawals' - offset8 := len(dst) - dst = append(dst, 0, 0, 0, 0) - // Offset #9 'PendingConsolidations' - offset9 := len(dst) + // Offset Field #8 'PendingWithdrawals' + // Offset Field #9 'BuilderPendingWithdrawals' + dst = append(dst, 0, 0, 0, 0, 0, 0, 0, 0) + { // Static Field #10 'DelayedBuilderPaymentCount' + dst = binary.LittleEndian.AppendUint32(dst, t.DelayedBuilderPaymentCount) + } + // Offset Field #11 'PendingConsolidations' dst = append(dst, 0, 0, 0, 0) - { // Field #10 'ConsolidatingBalance' + { // Static Field #12 'ConsolidatingBalance' dst = binary.LittleEndian.AppendUint64(dst, uint64(t.ConsolidatingBalance)) } { // Dynamic Field #0 'ActiveValidators' - binary.LittleEndian.PutUint32(dst[offset0:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen:], uint32(len(dst)-dstlen)) t := t.ActiveValidators vlen := len(t) if vlen > 10000000 { - return nil, sszutils.ErrListTooBig + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "ActiveValidators") } - for i := range vlen { - t := &t[i] - { // Field #0 'ValidatorIndexOffset' + for idx1 := range vlen { + t := &t[idx1] + { // Static Field #0 'ValidatorIndexOffset' dst = binary.LittleEndian.AppendUint32(dst, t.ValidatorIndexOffset) } - { // Field #1 'EffectiveBalanceEth' + { // Static Field #1 'EffectiveBalanceEth' dst = binary.LittleEndian.AppendUint32(dst, t.EffectiveBalanceEth) } } } { // Dynamic Field #1 'ProposerDuties' - binary.LittleEndian.PutUint32(dst[offset1:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+4:], uint32(len(dst)-dstlen)) t := t.ProposerDuties vlen := len(t) if vlen > 100 { - return nil, sszutils.ErrListTooBig - } - for i := range vlen { - dst = binary.LittleEndian.AppendUint64(dst, uint64(t[i])) + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 100), "ProposerDuties") } + dst = sszutils.MarshalUint64Slice(dst, t[:vlen]) } { // Dynamic Field #2 'SyncCommitteeDuties' - binary.LittleEndian.PutUint32(dst[offset2:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+8:], uint32(len(dst)-dstlen)) t := t.SyncCommitteeDuties vlen := len(t) if vlen > 10000 { - return nil, sszutils.ErrListTooBig - } - for i := range vlen { - dst = binary.LittleEndian.AppendUint64(dst, uint64(t[i])) + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000), "SyncCommitteeDuties") } + dst = sszutils.MarshalUint64Slice(dst, t[:vlen]) } { // Dynamic Field #8 'PendingWithdrawals' - binary.LittleEndian.PutUint32(dst[offset8:], uint32(len(dst)-dstlen)) + binary.LittleEndian.PutUint32(dst[dstlen+100:], uint32(len(dst)-dstlen)) t := t.PendingWithdrawals vlen := len(t) if vlen > 10000000 { - return nil, sszutils.ErrListTooBig + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "PendingWithdrawals") } - for i := range vlen { - t := &t[i] + for idx1 := range vlen { + t := &t[idx1] if dst, err = t.MarshalSSZTo(dst); err != nil { - return nil, err + return nil, sszutils.ErrorWithPathf(err, "PendingWithdrawals[%d]", idx1) } } } - { // Dynamic Field #9 'PendingConsolidations' - binary.LittleEndian.PutUint32(dst[offset9:], uint32(len(dst)-dstlen)) + { // Dynamic Field #9 'BuilderPendingWithdrawals' + binary.LittleEndian.PutUint32(dst[dstlen+104:], uint32(len(dst)-dstlen)) + t := t.BuilderPendingWithdrawals + vlen := len(t) + if vlen > 10000000 { + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "BuilderPendingWithdrawals") + } + for idx1 := range vlen { + t := &t[idx1] + if dst, err = t.MarshalSSZTo(dst); err != nil { + return nil, sszutils.ErrorWithPathf(err, "BuilderPendingWithdrawals[%d]", idx1) + } + } + } + { // Dynamic Field #11 'PendingConsolidations' + binary.LittleEndian.PutUint32(dst[dstlen+112:], uint32(len(dst)-dstlen)) t := t.PendingConsolidations vlen := len(t) if vlen > 10000000 { - return nil, sszutils.ErrListTooBig + return nil, sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "PendingConsolidations") } - for i := range vlen { - t := &t[i] + for idx1 := range vlen { + t := &t[idx1] if dst, err = t.MarshalSSZTo(dst); err != nil { - return nil, err + return nil, sszutils.ErrorWithPathf(err, "PendingConsolidations[%d]", idx1) } } } @@ -128,23 +136,23 @@ func (t *EpochStatsPacked) MarshalSSZTo(buf []byte) (dst []byte, err error) { func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { buflen := len(buf) - if buflen < 116 { - return sszutils.ErrUnexpectedEOF + if buflen < 124 { + return sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 124) } // Field #0 'ActiveValidators' (offset) offset0 := int(binary.LittleEndian.Uint32(buf[0:4])) - if offset0 != 116 { - return sszutils.ErrOffset + if offset0 != 124 { + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "first offset does not match (offset: %d, static len: %d)", offset0, 124), "ActiveValidators:o") } // Field #1 'ProposerDuties' (offset) offset1 := int(binary.LittleEndian.Uint32(buf[4:8])) if offset1 < offset0 || offset1 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset1, offset0, buflen), "ProposerDuties:o") } // Field #2 'SyncCommitteeDuties' (offset) offset2 := int(binary.LittleEndian.Uint32(buf[8:12])) if offset2 < offset1 || offset2 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset2, offset1, buflen), "SyncCommitteeDuties:o") } { // Field #3 'RandaoMix' (static) buf := buf[12:44] @@ -169,15 +177,24 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { // Field #8 'PendingWithdrawals' (offset) offset8 := int(binary.LittleEndian.Uint32(buf[100:104])) if offset8 < offset2 || offset8 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset8, offset2, buflen), "PendingWithdrawals:o") } - // Field #9 'PendingConsolidations' (offset) + // Field #9 'BuilderPendingWithdrawals' (offset) offset9 := int(binary.LittleEndian.Uint32(buf[104:108])) if offset9 < offset8 || offset9 > buflen { - return sszutils.ErrOffset + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset9, offset8, buflen), "BuilderPendingWithdrawals:o") + } + { // Field #10 'DelayedBuilderPaymentCount' (static) + buf := buf[108:112] + t.DelayedBuilderPaymentCount = binary.LittleEndian.Uint32(buf) } - { // Field #10 'ConsolidatingBalance' (static) - buf := buf[108:116] + // Field #11 'PendingConsolidations' (offset) + offset11 := int(binary.LittleEndian.Uint32(buf[112:116])) + if offset11 < offset9 || offset11 > buflen { + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrOffset, "offset out of range (offset=%d, prev=%d, buflen=%d)", offset11, offset9, buflen), "PendingConsolidations:o") + } + { // Field #12 'ConsolidatingBalance' (static) + buf := buf[116:124] t.ConsolidatingBalance = phase0.Gwei(binary.LittleEndian.Uint64(buf)) } { // Field #0 'ActiveValidators' (dynamic) @@ -185,15 +202,15 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { val1 := t.ActiveValidators itemCount := len(buf) / 8 if len(buf)%8 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 8), "ActiveValidators") } val1 = sszutils.ExpandSlice(val1, itemCount) - for i := range itemCount { - val2 := val1[i] - buf := buf[8*i : 8*(i+1)] + for idx1 := range itemCount { + val2 := val1[idx1] + buf := buf[8*idx1 : 8*(idx1+1)] buflen := len(buf) if buflen < 8 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPathf(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "not enough data for fixed fields (have %d, needed %d)", buflen, 8), "ActiveValidators[%d]", idx1) } { // Field #0 'ValidatorIndexOffset' (static) buf := buf[0:4] @@ -203,7 +220,7 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { buf := buf[4:8] val2.EffectiveBalanceEth = binary.LittleEndian.Uint32(buf) } - val1[i] = val2 + val1[idx1] = val2 } t.ActiveValidators = val1 } @@ -212,13 +229,10 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { val3 := t.ProposerDuties itemCount := len(buf) / 8 if len(buf)%8 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszError(sszutils.ErrUnexpectedEOF, "uint64 list length not a multiple of 8"), "ProposerDuties") } val3 = sszutils.ExpandSlice(val3, itemCount) - for i := range itemCount { - buf := buf[8*i : 8*(i+1)] - val3[i] = phase0.ValidatorIndex(binary.LittleEndian.Uint64(buf)) - } + sszutils.UnmarshalUint64Slice(val3, buf) t.ProposerDuties = val3 } { // Field #2 'SyncCommitteeDuties' (dynamic) @@ -226,13 +240,10 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { val4 := t.SyncCommitteeDuties itemCount := len(buf) / 8 if len(buf)%8 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszError(sszutils.ErrUnexpectedEOF, "uint64 list length not a multiple of 8"), "SyncCommitteeDuties") } val4 = sszutils.ExpandSlice(val4, itemCount) - for i := range itemCount { - buf := buf[8*i : 8*(i+1)] - val4[i] = phase0.ValidatorIndex(binary.LittleEndian.Uint64(buf)) - } + sszutils.UnmarshalUint64Slice(val4, buf) t.SyncCommitteeDuties = val4 } { // Field #8 'PendingWithdrawals' (dynamic) @@ -240,32 +251,48 @@ func (t *EpochStatsPacked) UnmarshalSSZ(buf []byte) (err error) { val5 := t.PendingWithdrawals itemCount := len(buf) / 24 if len(buf)%24 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 24), "PendingWithdrawals") } val5 = sszutils.ExpandSlice(val5, itemCount) - for i := range itemCount { - buf := buf[24*i : 24*(i+1)] - if err = val5[i].UnmarshalSSZ(buf); err != nil { - return err + for idx1 := range itemCount { + buf := buf[24*idx1 : 24*(idx1+1)] + if err = val5[idx1].UnmarshalSSZ(buf); err != nil { + return sszutils.ErrorWithPathf(err, "PendingWithdrawals[%d]", idx1) } } t.PendingWithdrawals = val5 } - { // Field #9 'PendingConsolidations' (dynamic) - buf := buf[offset9:] - val6 := t.PendingConsolidations + { // Field #9 'BuilderPendingWithdrawals' (dynamic) + buf := buf[offset9:offset11] + val6 := t.BuilderPendingWithdrawals + itemCount := len(buf) / 36 + if len(buf)%36 != 0 { + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 36), "BuilderPendingWithdrawals") + } + val6 = sszutils.ExpandSlice(val6, itemCount) + for idx1 := range itemCount { + buf := buf[36*idx1 : 36*(idx1+1)] + if err = val6[idx1].UnmarshalSSZ(buf); err != nil { + return sszutils.ErrorWithPathf(err, "BuilderPendingWithdrawals[%d]", idx1) + } + } + t.BuilderPendingWithdrawals = val6 + } + { // Field #11 'PendingConsolidations' (dynamic) + buf := buf[offset11:] + val7 := t.PendingConsolidations itemCount := len(buf) / 16 if len(buf)%16 != 0 { - return sszutils.ErrUnexpectedEOF + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrUnexpectedEOF, "list length %d not a multiple of element size %d", len(buf), 16), "PendingConsolidations") } - val6 = sszutils.ExpandSlice(val6, itemCount) - for i := range itemCount { - buf := buf[16*i : 16*(i+1)] - if err = val6[i].UnmarshalSSZ(buf); err != nil { - return err + val7 = sszutils.ExpandSlice(val7, itemCount) + for idx1 := range itemCount { + buf := buf[16*idx1 : 16*(idx1+1)] + if err = val7[idx1].UnmarshalSSZ(buf); err != nil { + return sszutils.ErrorWithPathf(err, "PendingConsolidations[%d]", idx1) } } - t.PendingConsolidations = val6 + t.PendingConsolidations = val7 } return nil } @@ -283,9 +310,11 @@ func (t *EpochStatsPacked) SizeSSZ() (size int) { // Field #6 'ActiveBalance' static (8 bytes) // Field #7 'FirstDepositIndex' static (8 bytes) // Field #8 'PendingWithdrawals' offset (4 bytes) - // Field #9 'PendingConsolidations' offset (4 bytes) - // Field #10 'ConsolidatingBalance' static (8 bytes) - size += 116 + // Field #9 'BuilderPendingWithdrawals' offset (4 bytes) + // Field #10 'DelayedBuilderPaymentCount' static (4 bytes) + // Field #11 'PendingConsolidations' offset (4 bytes) + // Field #12 'ConsolidatingBalance' static (8 bytes) + size += 124 { // Dynamic field #0 'ActiveValidators' size += len(t.ActiveValidators) * 8 } @@ -298,7 +327,10 @@ func (t *EpochStatsPacked) SizeSSZ() (size int) { { // Dynamic field #8 'PendingWithdrawals' size += len(t.PendingWithdrawals) * 24 } - { // Dynamic field #9 'PendingConsolidations' + { // Dynamic field #9 'BuilderPendingWithdrawals' + size += len(t.BuilderPendingWithdrawals) * 36 + } + { // Dynamic field #11 'PendingConsolidations' size += len(t.PendingConsolidations) * 16 } return size @@ -323,11 +355,11 @@ func (t *EpochStatsPacked) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.ActiveValidators vlen := uint64(len(t)) if vlen > 10000000 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "ActiveValidators") } idx := hh.Index() - for i := range int(vlen) { - t := &t[i] + for idx1 := range int(vlen) { + t := &t[idx1] idx := hh.Index() { // Field #0 'ValidatorIndexOffset' hh.PutUint32(t.ValidatorIndexOffset) @@ -343,12 +375,10 @@ func (t *EpochStatsPacked) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.ProposerDuties vlen := uint64(len(t)) if vlen > 100 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 100), "ProposerDuties") } idx := hh.Index() - for i := range int(vlen) { - hh.AppendUint64(uint64(t[i])) - } + sszutils.HashUint64Slice(hh, t) hh.FillUpTo32() hh.MerkleizeWithMixin(idx, vlen, sszutils.CalculateLimit(100, vlen, 8)) } @@ -356,12 +386,10 @@ func (t *EpochStatsPacked) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.SyncCommitteeDuties vlen := uint64(len(t)) if vlen > 10000 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000), "SyncCommitteeDuties") } idx := hh.Index() - for i := range int(vlen) { - hh.AppendUint64(uint64(t[i])) - } + sszutils.HashUint64Slice(hh, t) hh.FillUpTo32() hh.MerkleizeWithMixin(idx, vlen, sszutils.CalculateLimit(10000, vlen, 8)) } @@ -384,33 +412,51 @@ func (t *EpochStatsPacked) HashTreeRootWith(hh sszutils.HashWalker) error { t := t.PendingWithdrawals vlen := uint64(len(t)) if vlen > 10000000 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "PendingWithdrawals") } idx := hh.Index() - for i := range int(vlen) { - t := &t[i] + for idx1 := range int(vlen) { + t := &t[idx1] if err := t.HashTreeRootWith(hh); err != nil { - return err + return sszutils.ErrorWithPathf(err, "PendingWithdrawals[%d]", idx1) } } hh.MerkleizeWithMixin(idx, vlen, sszutils.CalculateLimit(10000000, vlen, 32)) } - { // Field #9 'PendingConsolidations' + { // Field #9 'BuilderPendingWithdrawals' + t := t.BuilderPendingWithdrawals + vlen := uint64(len(t)) + if vlen > 10000000 { + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "BuilderPendingWithdrawals") + } + idx := hh.Index() + for idx1 := range int(vlen) { + t := &t[idx1] + if err := t.HashTreeRootWith(hh); err != nil { + return sszutils.ErrorWithPathf(err, "BuilderPendingWithdrawals[%d]", idx1) + } + } + hh.MerkleizeWithMixin(idx, vlen, sszutils.CalculateLimit(10000000, vlen, 32)) + } + { // Field #10 'DelayedBuilderPaymentCount' + hh.PutUint32(t.DelayedBuilderPaymentCount) + } + { // Field #11 'PendingConsolidations' t := t.PendingConsolidations vlen := uint64(len(t)) if vlen > 10000000 { - return sszutils.ErrListTooBig + return sszutils.ErrorWithPath(sszutils.NewSszErrorf(sszutils.ErrListTooBig, "list length %d exceeds maximum %d", vlen, 10000000), "PendingConsolidations") } idx := hh.Index() - for i := range int(vlen) { - t := &t[i] + for idx1 := range int(vlen) { + t := &t[idx1] if err := t.HashTreeRootWith(hh); err != nil { - return err + return sszutils.ErrorWithPathf(err, "PendingConsolidations[%d]", idx1) } } hh.MerkleizeWithMixin(idx, vlen, sszutils.CalculateLimit(10000000, vlen, 32)) } - { // Field #10 'ConsolidatingBalance' + { // Field #12 'ConsolidatingBalance' hh.PutUint64(uint64(t.ConsolidatingBalance)) } hh.Merkleize(idx) diff --git a/indexer/beacon/epochstats_test.go b/indexer/beacon/epochstats_test.go index d5a38781b..53839c957 100644 --- a/indexer/beacon/epochstats_test.go +++ b/indexer/beacon/epochstats_test.go @@ -3,7 +3,7 @@ package beacon import ( "testing" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" ) diff --git a/indexer/beacon/epochvotes.go b/indexer/beacon/epochvotes.go index 4f4fe599c..73964f56a 100644 --- a/indexer/beacon/epochvotes.go +++ b/indexer/beacon/epochvotes.go @@ -5,9 +5,9 @@ import ( "encoding/binary" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/prysmaticlabs/go-bitfield" ) diff --git a/indexer/beacon/finalization.go b/indexer/beacon/finalization.go index 2bb70ba17..734e279ff 100644 --- a/indexer/beacon/finalization.go +++ b/indexer/beacon/finalization.go @@ -7,12 +7,13 @@ import ( "sync" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/mashingan/smapping" ) @@ -149,6 +150,15 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R if block.block == nil { return true, fmt.Errorf("missing block body for canonical block %v (%v)", block.Slot, block.Root.String()) } + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + if _, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + return LoadExecutionPayload(client.getContext(), client, block.Root) + }); err != nil { + client.logger.Warnf("failed loading finalized execution payload %v (%v): %v", block.Slot, block.Root.String(), err) + } + } + canonicalBlocks = append(canonicalBlocks, block) } else { if block.block == nil { @@ -206,6 +216,10 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R } } + if firstBlock.Slot == 0 { + dependentRoot = phase0.Root{} + } + if !isValid { return false, fmt.Errorf("first canonical block %v (%v) is not the first block of epoch %v", firstBlock.Slot, firstBlock.Root.String(), epoch) } @@ -268,7 +282,7 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R // if the state is not yet loaded, we set it to high priority and wait for it to be loaded if !epochStats.ready { if epochStats.dependentState == nil { - indexer.epochCache.addEpochStateRequest(epochStats) + indexer.epochCache.ensureEpochDependentState(epochStats, canonicalBlocks[0].Root) } if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 { indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", epoch, dependentRoot.String()) @@ -314,6 +328,38 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R finalizedForkIds[block.GetForkId()] = true } + // Determine payload status for canonical blocks (ePBS only) + // A payload is orphaned if the next canonical block doesn't build on it + allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...) + if chainState.IsEip7732Enabled(epoch) { + for i, block := range canonicalBlocks { + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next canonical block + if i+1 >= len(allCanonicalBlocks) { + continue + } + + nextBlock := allCanonicalBlocks[i+1] + if nextBlock == nil { + continue + } + + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex == nil { + continue + } + + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + dependentGroups := map[phase0.Root][]*Block{} for _, block := range orphanedBlocks { var dependentRoot phase0.Root @@ -383,6 +429,36 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R } } + // Determine payload status for orphaned chain blocks (ePBS only) + // A payload is orphaned if the next block in the chain doesn't build on it + allChainBlocks := append(chain, nextBlocks...) + for i, block := range chain { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next block in this orphaned chain + var nextBlock *Block + if i+1 < len(allChainBlocks) { + nextBlock = allChainBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // compute votes for canonical blocks votingBlocks := make([]*Block, len(chain)+len(nextBlocks)) copy(votingBlocks, chain) @@ -528,10 +604,9 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R t1 = time.Now() - // update validator cache - if len(canonicalBlocks) > 0 { - indexer.validatorCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root) - } + // update validator & builder cache with the epoch's dependent root (last block of parent epoch) + indexer.validatorCache.setFinalizedEpoch(epoch, dependentRoot) + indexer.builderCache.setFinalizedEpoch(epoch, dependentRoot) // clean fork cache indexer.forkCache.setFinalizedEpoch(deleteBeforeSlot, justifiedRoot) diff --git a/indexer/beacon/fork.go b/indexer/beacon/fork.go index 5c3651f49..5f2a80b95 100644 --- a/indexer/beacon/fork.go +++ b/indexer/beacon/fork.go @@ -1,8 +1,8 @@ package beacon import ( - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) // ForkKey represents a key used for indexing forks. diff --git a/indexer/beacon/forkcache.go b/indexer/beacon/forkcache.go index 93cebee11..70f551506 100644 --- a/indexer/beacon/forkcache.go +++ b/indexer/beacon/forkcache.go @@ -7,10 +7,10 @@ import ( "sort" "sync" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" ) diff --git a/indexer/beacon/forkdetection.go b/indexer/beacon/forkdetection.go index 49c98eb24..1001d25cc 100644 --- a/indexer/beacon/forkdetection.go +++ b/indexer/beacon/forkdetection.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/ethpandaops/dora/db" diff --git a/indexer/beacon/indexer.go b/indexer/beacon/indexer.go index b49bc187e..3706e3619 100644 --- a/indexer/beacon/indexer.go +++ b/indexer/beacon/indexer.go @@ -7,8 +7,8 @@ import ( "sync" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" dynssz "github.com/pk910/dynamic-ssz" "github.com/sirupsen/logrus" @@ -17,6 +17,7 @@ import ( "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/statecache" "github.com/ethpandaops/dora/utils" "github.com/ethpandaops/ethwallclock" ) @@ -41,12 +42,15 @@ type Indexer struct { maxParallelStateCalls uint16 // caches + stateCache *statecache.StateCache blockCache *blockCache epochCache *epochCache forkCache *forkCache pubkeyCache *pubkeyCache validatorCache *validatorCache validatorActivity *validatorActivityCache + blockBidCache *blockBidCache + builderCache *builderCache // indexer state clients []*Client @@ -112,12 +116,15 @@ func NewIndexer(ctx context.Context, logger logrus.FieldLogger, consensusPool *c blockDispatcher: &utils.Dispatcher[*Block]{}, } + indexer.stateCache = statecache.New(utils.Config, indexer.dynSsz) indexer.blockCache = newBlockCache(indexer) indexer.epochCache = newEpochCache(indexer) indexer.forkCache = newForkCache(indexer) indexer.pubkeyCache = newPubkeyCache(indexer, utils.Config.Indexer.PubkeyCachePath) indexer.validatorCache = newValidatorCache(indexer) indexer.validatorActivity = newValidatorActivityCache(indexer) + indexer.blockBidCache = newBlockBidCache(indexer) + indexer.builderCache = newBuilderCache(indexer) indexer.dbWriter = newDbWriter(indexer) badChainRoots := utils.Config.Indexer.BadChainRoots @@ -275,6 +282,14 @@ func (indexer *Indexer) StartIndexer() { indexer.logger.Infof("restored %v validators from DB (%.3f sec)", validatorCount, time.Since(t1).Seconds()) } + // restore finalized builder set from db + t1 = time.Now() + if builderCount, err := indexer.builderCache.prepopulateFromDB(); err != nil { + indexer.logger.WithError(err).Errorf("failed loading builder set") + } else if builderCount > 0 { + indexer.logger.Infof("restored %v builders from DB (%.3f sec)", builderCount, time.Since(t1).Seconds()) + } + // restore unfinalized epoch stats from db restoredEpochStats := 0 t1 = time.Now() @@ -292,7 +307,7 @@ func (indexer *Indexer) StartIndexer() { processingWaitGroup.Done() }() - epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot), false) + epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot)) pruneStats := dbDuty.Epoch < uint64(indexer.lastPrunedEpoch) err := epochStats.restoreFromDb(dbDuty, chainState, !pruneStats) @@ -340,6 +355,7 @@ func (indexer *Indexer) StartIndexer() { // restore unfinalized blocks from db restoredBlockCount := 0 restoredBodyCount := 0 + restoredPayloadCount := 0 t1 = time.Now() err = db.StreamUnfinalizedBlocks(indexer.ctx, uint64(finalizedSlot), func(dbBlock *dbtypes.UnfinalizedBlock) { block, _ := indexer.blockCache.createOrGetBlock(phase0.Root(dbBlock.Root), phase0.Slot(dbBlock.Slot)) @@ -377,10 +393,23 @@ func (indexer *Indexer) StartIndexer() { block.SetBlock(blockBody) restoredBodyCount++ } else { - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, nil) block.isInFinalizedDb = true } + if len(dbBlock.PayloadSSZ) > 0 { + blockPayload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + if err != nil { + indexer.logger.Warnf("could not restore unfinalized block payload %v [%x] from db: %v", dbBlock.Slot, dbBlock.Root, err) + } else if block.processingStatus == 0 { + block.SetExecutionPayload(blockPayload) + restoredPayloadCount++ + } else { + block.setBlockIndex(blockBody, blockPayload) + block.hasExecutionPayload = true + } + } + indexer.blockCache.addBlockToExecBlockMap(block) blockFork := indexer.forkCache.getForkById(block.forkId) @@ -404,6 +433,9 @@ func (indexer *Indexer) StartIndexer() { indexer.logger.Infof("restored %v unfinalized blocks from DB (%v with bodies, %.3f sec)", restoredBlockCount, restoredBodyCount, time.Since(t1).Seconds()) } + // restore block bids from db + indexer.blockBidCache.loadFromDB(chainState.CurrentSlot()) + // start indexing for all clients for _, client := range indexer.clients { client.startIndexing() @@ -424,7 +456,8 @@ func (indexer *Indexer) StartIndexer() { if len(genesisBlock) == 0 { indexer.logger.Warnf("genesis block not found in cache") } else { - indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root, true) + epochStats := indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root) + indexer.epochCache.ensureEpochDependentState(epochStats, genesisBlock[0].Root) } } @@ -438,6 +471,11 @@ func (indexer *Indexer) StartIndexer() { } func (indexer *Indexer) StopIndexer() { + // flush block bids to db before shutdown + if err := indexer.blockBidCache.flushAll(); err != nil { + indexer.logger.WithError(err).Errorf("error flushing block bids on shutdown") + } + indexer.pubkeyCache.Close() } @@ -489,6 +527,11 @@ func (indexer *Indexer) runIndexerLoop() { slotIndex := chainState.SlotToSlotIndex(phase0.Slot(slotEvent.Number())) slotProgress := uint8(100 / chainState.GetSpecs().SlotsPerEpoch * uint64(slotIndex)) + // flush old block bids if needed + if err := indexer.blockBidCache.checkAndFlush(); err != nil { + indexer.logger.WithError(err).Errorf("failed flushing block bids") + } + // precalc next canonical duties on epoch start if epoch >= indexer.lastPrecalcRunEpoch { err := indexer.precalcNextEpochStats(epoch) diff --git a/indexer/beacon/indexer_getter.go b/indexer/beacon/indexer_getter.go index 46817c0b5..a8200d73a 100644 --- a/indexer/beacon/indexer_getter.go +++ b/indexer/beacon/indexer_getter.go @@ -7,11 +7,13 @@ import ( "slices" "sort" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" dynssz "github.com/pk910/dynamic-ssz" ) @@ -222,6 +224,14 @@ func (indexer *Indexer) GetOrphanedBlockByRoot(blockRoot phase0.Root) (*Block, e block.SetHeader(header) block.SetBlock(blockBody) + if len(orphanedBlock.PayloadSSZ) > 0 { + payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, orphanedBlock.PayloadVer, orphanedBlock.PayloadSSZ) + if err != nil { + return nil, fmt.Errorf("could not restore orphaned block payload %v [%x] from db: %v", header.Message.Slot, orphanedBlock.Root, err) + } + block.SetExecutionPayload(payload) + } + return block, nil } @@ -505,3 +515,178 @@ func (indexer *Indexer) GetFullValidatorByIndex(validatorIndex phase0.ValidatorI return validatorData } + +// GetBlockBids returns the execution payload bids for a given parent block root. +// It first checks the in-memory cache, then falls back to the database. +func (indexer *Indexer) GetBlockBids(parentBlockRoot phase0.Root) []*dbtypes.BlockBid { + // First check the in-memory cache + bids := indexer.blockBidCache.GetBidsForBlockRoot(parentBlockRoot) + if len(bids) > 0 { + return bids + } + + // Fall back to database + return db.GetBidsForBlockRoot(indexer.ctx, parentBlockRoot[:]) +} + +// StreamActiveBuilderDataForRoot streams the available builder set data for a given blockRoot. +func (indexer *Indexer) StreamActiveBuilderDataForRoot(blockRoot phase0.Root, activeOnly bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error { + return indexer.builderCache.streamBuilderSetForRoot(blockRoot, activeOnly, epoch, cb) +} + +// GetBuilderSetSize returns the size of the builder set cache. +func (indexer *Indexer) GetBuilderSetSize() uint64 { + return indexer.builderCache.getBuilderSetSize() +} + +// GetBuilderByIndex returns the builder by index for the canonical head. +func (indexer *Indexer) GetBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder { + return indexer.builderCache.getBuilderByIndex(index, overrideForkId) +} + +// GetRecentBuilderBalances returns the most recent builder balances for the given fork. +// Starts with epoch-boundary balances and replays in-epoch blocks to reflect live state: +// builder withdrawals/payments are deducted and builder deposits are credited. +func (indexer *Indexer) GetRecentBuilderBalances(overrideForkId *ForkKey) []phase0.Gwei { + chainState := indexer.consensusPool.GetChainState() + + canonicalHead := indexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil + } + + headEpoch := chainState.EpochOfSlot(canonicalHead.Slot) + + var epochStats *EpochStats + var statsEpoch phase0.Epoch + for { + cEpoch := chainState.EpochOfSlot(canonicalHead.Slot) + if headEpoch-cEpoch > 2 { + return nil + } + + dependentBlock := indexer.blockCache.getDependentBlock(chainState, canonicalHead, nil) + if dependentBlock == nil { + return nil + } + canonicalHead = dependentBlock + + stats := indexer.epochCache.getEpochStats(cEpoch, dependentBlock.Root) + if cEpoch > 0 && (stats == nil || stats.dependentState == nil || stats.dependentState.loadingStatus != 2) { + continue // retry previous state + } + + epochStats = stats + statsEpoch = cEpoch + break + } + + if epochStats == nil || epochStats.dependentState == nil { + return nil + } + + // Copy epoch-boundary balances so we can mutate them. + src := epochStats.dependentState.builderBalances + balances := make([]phase0.Gwei, len(src)) + copy(balances, src) + + // Walk the canonical chain from head back to epoch start, collecting blocks to replay. + epochStartSlot := chainState.EpochToSlot(statsEpoch) + head := indexer.GetCanonicalHead(overrideForkId) + + var epochBlocks []*Block + for block := head; block != nil && block.Slot >= epochStartSlot; { + epochBlocks = append(epochBlocks, block) + parentRoot := block.GetParentRoot() + if parentRoot == nil { + break + } + block = indexer.blockCache.getBlockByRoot(*parentRoot) + } + + // Replay in slot order (reverse the collected list). + isEip7732 := chainState.IsEip7732Enabled(chainState.EpochOfSlot(epochStartSlot)) + for i := len(epochBlocks) - 1; i >= 0; i-- { + block := epochBlocks[i] + indexer.applyBuilderBalanceChanges(block, balances, isEip7732) + } + + return balances +} + +// applyBuilderBalanceChanges extracts withdrawals and deposit requests from a +// cached block body and applies the corresponding builder balance changes. +func (indexer *Indexer) applyBuilderBalanceChanges(block *Block, balances []phase0.Gwei, isEip7732 bool) { + // Apply withdrawals (decrease builder balances). + if isEip7732 { + payload := block.GetExecutionPayload(indexer.ctx) + if payload != nil && payload.Message != nil && payload.Message.Payload != nil { + for _, w := range payload.Message.Payload.Withdrawals { + if uint64(w.ValidatorIndex)&BuilderIndexFlag == 0 { + continue + } + builderIdx := uint64(w.ValidatorIndex) &^ BuilderIndexFlag + if builderIdx < uint64(len(balances)) { + if balances[builderIdx] >= w.Amount { + balances[builderIdx] -= w.Amount + } else { + balances[builderIdx] = 0 + } + } + } + + // Apply deposit requests (increase builder balances). + if payload.Message.ExecutionRequests != nil { + for _, deposit := range payload.Message.ExecutionRequests.Deposits { + if validatorIdx, found := indexer.pubkeyCache.Get(deposit.Pubkey); found { + idx := uint64(validatorIdx) + if idx&BuilderIndexFlag != 0 { + builderIdx := idx &^ BuilderIndexFlag + if builderIdx < uint64(len(balances)) { + balances[builderIdx] += phase0.Gwei(deposit.Amount) + } + } + } + } + } + } + } else { + blockBody := block.GetBlock(indexer.ctx) + if blockBody == nil { + return + } + + if execPayload, err := blockBody.ExecutionPayload(); err == nil && execPayload != nil { + if withdrawals, err := execPayload.Withdrawals(); err == nil { + for _, w := range withdrawals { + if uint64(w.ValidatorIndex)&BuilderIndexFlag == 0 { + continue + } + builderIdx := uint64(w.ValidatorIndex) &^ BuilderIndexFlag + if builderIdx < uint64(len(balances)) { + if balances[builderIdx] >= w.Amount { + balances[builderIdx] -= w.Amount + } else { + balances[builderIdx] = 0 + } + } + } + } + } + + // Apply deposit requests (increase builder balances). + if requests, err := blockBody.ExecutionRequests(); err == nil && requests != nil { + for _, deposit := range requests.Deposits { + if validatorIdx, found := indexer.pubkeyCache.Get(deposit.Pubkey); found { + idx := uint64(validatorIdx) + if idx&BuilderIndexFlag != 0 { + builderIdx := idx &^ BuilderIndexFlag + if builderIdx < uint64(len(balances)) { + balances[builderIdx] += phase0.Gwei(deposit.Amount) + } + } + } + } + } + } +} diff --git a/indexer/beacon/precalc.go b/indexer/beacon/precalc.go index 078370eda..209b04409 100644 --- a/indexer/beacon/precalc.go +++ b/indexer/beacon/precalc.go @@ -3,7 +3,7 @@ package beacon import ( "fmt" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) func (indexer *Indexer) precalcNextEpochStats(epoch phase0.Epoch) error { @@ -32,7 +32,7 @@ func (indexer *Indexer) precalcNextEpochStats(epoch phase0.Epoch) error { } // precompute epoch stats for the epoch if we have the parent epoch stats ready - epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, false) + epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root) if !epochStats.ready { var parentDependentBlock *Block if chainState.EpochOfSlot(dependentBlock.Slot) == epoch-1 { diff --git a/indexer/beacon/pruning.go b/indexer/beacon/pruning.go index ff64840ad..d94fb9813 100644 --- a/indexer/beacon/pruning.go +++ b/indexer/beacon/pruning.go @@ -7,9 +7,9 @@ import ( "sort" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/mashingan/smapping" ) @@ -117,7 +117,7 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui // if the state is not yet loaded, we set it to high priority and wait for it to be loaded if epochStats != nil && !epochStats.ready { if epochStats.dependentState == nil { - indexer.epochCache.addEpochStateRequest(epochStats) + indexer.epochCache.ensureEpochDependentState(epochStats, blocks[0].Root) } if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 { indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", pruneEpoch, dependentRoot.String()) @@ -169,6 +169,36 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui } } + // Determine payload status for chain blocks (ePBS only) + // A payload is orphaned if the next block in the chain doesn't build on it + allChainBlocks := append(chain, nextBlocks...) + for i, block := range chain { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next block in this chain + var nextBlock *Block + if i+1 < len(allChainBlocks) { + nextBlock = allChainBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // compute votes for canonical blocks votingBlocks := make([]*Block, len(chain)+len(nextBlocks)) copy(votingBlocks, chain) @@ -257,8 +287,9 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui for _, block := range pruningBlocks { block.isInFinalizedDb = true block.processingStatus = dbtypes.UnfinalizedBlockStatusPruned - block.setBlockIndex(block.block) + block.setBlockIndex(block.block, block.executionPayload) block.block = nil + block.executionPayload = nil block.blockResults = nil } diff --git a/indexer/beacon/pubkeycache.go b/indexer/beacon/pubkeycache.go index 67cfc7ec0..573bed4af 100644 --- a/indexer/beacon/pubkeycache.go +++ b/indexer/beacon/pubkeycache.go @@ -4,7 +4,7 @@ import ( "strconv" "sync" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/syndtr/goleveldb/leveldb" ) diff --git a/indexer/beacon/requests.go b/indexer/beacon/requests.go index 604033f8f..97251a7be 100644 --- a/indexer/beacon/requests.go +++ b/indexer/beacon/requests.go @@ -5,8 +5,9 @@ import ( "fmt" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) // BeaconHeaderRequestTimeout is the timeout duration for beacon header requests. @@ -18,6 +19,9 @@ const beaconBodyRequestTimeout time.Duration = 30 * time.Second // BeaconStateRequestTimeout is the timeout duration for beacon state requests. const beaconStateRequestTimeout time.Duration = 600 * time.Second +// ExecutionPayloadRequestTimeout is the timeout duration for execution payload requests. +const executionPayloadRequestTimeout time.Duration = 30 * time.Second + const beaconStateRetryCount = 10 const beaconStateHighPriorityRetryCount uint64 = 3 @@ -69,10 +73,29 @@ func LoadBeaconState(ctx context.Context, client *Client, root phase0.Root) (*sp ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout) defer cancel() - resState, err := client.client.GetRPCClient().GetState(ctx, fmt.Sprintf("0x%x", root[:])) + stateRef := fmt.Sprintf("0x%x", root[:]) + nullRoot := phase0.Root{} + if root == nullRoot { + stateRef = "genesis" + } + + resState, err := client.client.GetRPCClient().GetState(ctx, stateRef) if err != nil { return nil, err } return resState, nil } + +// LoadExecutionPayload loads the execution payload from the client. +func LoadExecutionPayload(ctx context.Context, client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(ctx, executionPayloadRequestTimeout) + defer cancel() + + payload, err := client.client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, root) + if err != nil { + return nil, err + } + + return payload, nil +} diff --git a/indexer/beacon/state_sim.go b/indexer/beacon/state_sim.go index 7c83d367c..8293f5c02 100644 --- a/indexer/beacon/state_sim.go +++ b/indexer/beacon/state_sim.go @@ -2,12 +2,15 @@ package beacon import ( "bytes" + "math" "slices" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) type stateSimulator struct { @@ -18,10 +21,21 @@ type stateSimulator struct { validatorSet []*phase0.Validator } +// trackedBuilderWithdrawal pairs a builder pending withdrawal with the optional +// BlockUID of the block whose payload delivery created it. Entries loaded from +// the epoch boundary state have RefBlockUID == nil; entries added during replay +// carry the source block's UID. +type trackedBuilderWithdrawal struct { + gloas.BuilderPendingWithdrawal + RefBlockUID *uint64 +} + type stateSimulatorState struct { epochRoot phase0.Root block *Block pendingWithdrawals []electra.PendingPartialWithdrawal + builderPendingWithdrawals []trackedBuilderWithdrawal + builderDelayedCount uint32 // how many entries in builderPendingWithdrawals are delayed/quorum payments additionalWithdrawals []phase0.ValidatorIndex pendingConsolidationCount uint64 validatorMap map[phase0.ValidatorIndex]*phase0.Validator @@ -43,9 +57,16 @@ func newStateSimulator(indexer *Indexer, epochStats *EpochStats) *stateSimulator return sim } +// getParentBlocks returns blocks that need to be replayed before the target block. +// Uses the sim's epoch as the boundary — all blocks from the epoch start up to +// (but not including) the target block are returned. +// The epoch state is always the pre-state of the first slot (post-epoch-transition), +// so all blocks in the epoch need to be replayed. func (sim *stateSimulator) getParentBlocks(block *Block) []*Block { chainState := sim.indexer.consensusPool.GetChainState() - minSlot := chainState.EpochToSlot(chainState.EpochOfSlot(block.Slot)) + simEpoch := sim.epochStats.epoch + minSlot := chainState.EpochToSlot(simEpoch) + parentBlocks := []*Block{} for { @@ -88,10 +109,21 @@ func (sim *stateSimulator) resetState(block *Block) *stateSimulatorState { } } + rawBuilderWithdrawals := sim.epochStatsValues.BuilderPendingWithdrawals + if rawBuilderWithdrawals == nil { + rawBuilderWithdrawals = []gloas.BuilderPendingWithdrawal{} + } + trackedWithdrawals := make([]trackedBuilderWithdrawal, len(rawBuilderWithdrawals)) + for i := range rawBuilderWithdrawals { + trackedWithdrawals[i] = trackedBuilderWithdrawal{BuilderPendingWithdrawal: rawBuilderWithdrawals[i]} + } + state := &stateSimulatorState{ block: nil, epochRoot: epochRoot, pendingWithdrawals: pendingWithdrawals, + builderPendingWithdrawals: trackedWithdrawals, + builderDelayedCount: sim.epochStatsValues.DelayedBuilderPaymentCount, // delayed payments from epoch transition are at the tail pendingConsolidationCount: 0, additionalWithdrawals: []phase0.ValidatorIndex{}, validatorMap: map[phase0.ValidatorIndex]*phase0.Validator{}, @@ -118,9 +150,67 @@ func (sim *stateSimulator) resetState(block *Block) *stateSimulatorState { // get pending withdrawals from state state.pendingWithdrawals = sim.epochStatsValues.PendingWithdrawals + // Resolve RefBlockUIDs for initial direct entries by matching them to + // blocks with delivered payloads in the previous epoch (FIFO order). + directCount := len(trackedWithdrawals) - int(state.builderDelayedCount) + if directCount > 0 { + sim.resolveInitialDirectRefs(state, directCount) + } + return state } +// resolveInitialDirectRefs populates RefBlockUID for the first directCount entries +// in the builder pending withdrawals queue. These are direct payments from the +// previous epoch's delivered payloads, loaded from the epoch boundary state without +// source block information. We scan the previous epoch's blocks in slot order and +// match delivered payloads to queue entries by FIFO position. +func (sim *stateSimulator) resolveInitialDirectRefs(state *stateSimulatorState, directCount int) { + chainState := sim.indexer.consensusPool.GetChainState() + + if sim.epochStats.epoch == 0 { + return + } + prevEpoch := sim.epochStats.epoch - 1 + prevStart := chainState.EpochToSlot(prevEpoch) + prevEnd := chainState.EpochToSlot(prevEpoch + 1) + + resolved := 0 + _, prunedEpoch := sim.indexer.GetBlockCacheState() + if prevEpoch >= prunedEpoch { + // Previous epoch is in cache + for slot := prevStart; slot < prevEnd && resolved < directCount; slot++ { + blocks := sim.indexer.GetBlocksBySlot(slot) + for _, b := range blocks { + if b.HasExecutionPayload() && !b.isPayloadOrphaned { + uid := b.BlockUID + state.builderPendingWithdrawals[resolved].RefBlockUID = &uid + resolved++ + if resolved >= directCount { + break + } + } + } + } + } else { + // Previous epoch is finalized/pruned — query DB + dbSlots := db.GetSlotsRange(sim.indexer.ctx, uint64(prevEnd-1), uint64(prevStart), false, false) + for _, assignedSlot := range dbSlots { + if resolved >= directCount { + break + } + if assignedSlot.Block == nil { + continue + } + if assignedSlot.Block.PayloadStatus == dbtypes.PayloadStatusCanonical { + uid := assignedSlot.Block.BlockUid + state.builderPendingWithdrawals[resolved].RefBlockUID = &uid + resolved++ + } + } + } +} + func (sim *stateSimulator) getValidator(index phase0.ValidatorIndex) *phase0.Validator { if validator, ok := sim.prevState.validatorMap[index]; ok { return validator @@ -337,9 +427,44 @@ func (sim *stateSimulator) applyBlock(block *Block) [][]uint8 { return nil } - // process pending withdrawals + // process builder pending withdrawals (come first in the spec) chainState := sim.indexer.consensusPool.GetChainState() chainSpec := chainState.GetSpecs() + processedBuilderWithdrawals := uint32(len(sim.prevState.builderPendingWithdrawals)) + if processedBuilderWithdrawals > 0 { + // Delayed entries are at the tail. When consuming N entries from the front, + // the delayed count decreases by however many delayed entries were in that batch. + // directCount = total - delayed; consumed from front = min(total, processed). + // If we consume all: delayed consumed = delayed count. + // If we consume partial: delayed consumed = max(0, processed - (total - delayed)). + directCount := uint32(0) + if processedBuilderWithdrawals > sim.prevState.builderDelayedCount { + directCount = processedBuilderWithdrawals - sim.prevState.builderDelayedCount + } + delayedConsumed := processedBuilderWithdrawals - directCount + if delayedConsumed > sim.prevState.builderDelayedCount { + delayedConsumed = sim.prevState.builderDelayedCount + } + sim.prevState.builderDelayedCount -= delayedConsumed + sim.prevState.builderPendingWithdrawals = sim.prevState.builderPendingWithdrawals[processedBuilderWithdrawals:] + } + + // After processing withdrawals, check if this block has a full payload (direct payment added to queue) + if block.HasExecutionPayload() && !block.isPayloadOrphaned { + blockIndex := block.GetBlockIndex(sim.indexer.ctx) + if blockIndex != nil && blockIndex.BuilderIndex != math.MaxUint64 { + uid := block.BlockUID + sim.prevState.builderPendingWithdrawals = append(sim.prevState.builderPendingWithdrawals, trackedBuilderWithdrawal{ + BuilderPendingWithdrawal: gloas.BuilderPendingWithdrawal{ + BuilderIndex: gloas.BuilderIndex(blockIndex.BuilderIndex), + }, + RefBlockUID: &uid, + }) + // builderDelayedCount stays the same — the new entry is direct, not delayed + } + } + + // process pending partial withdrawals processedWithdrawals := uint64(0) skippedWithdrawals := uint64(0) for _, pendingWithdrawal := range sim.prevState.pendingWithdrawals { @@ -519,3 +644,211 @@ func (sim *stateSimulator) replayBlockResults(block *Block) [][]uint8 { return results } + +// builderPaymentClassification holds the type and reference slot for a single builder payment withdrawal. +type builderPaymentClassification struct { + Type uint8 + RefSlot *uint64 +} + +// withdrawalSimResult holds the result of simulating pending withdrawals for a block. +type withdrawalSimResult struct { + BuilderPaymentCount int + BuilderPayments []builderPaymentClassification // one per builder payment (first BuilderPaymentCount entries) + PartialCount int +} + +// replayWithdrawalState simulates the pending withdrawal queue for the given block +// and returns classification info for builder payments and counts of each category. +// The epoch state is always the pre-state of the first slot (post-epoch-transition), +// so all blocks from the epoch start are replayed uniformly — no first-slot special casing. +func (sim *stateSimulator) replayWithdrawalState(block *Block) *withdrawalSimResult { + result := &withdrawalSimResult{} + + chainState := sim.indexer.consensusPool.GetChainState() + chainSpec := chainState.GetSpecs() + if chainSpec.MaxPendingPartialsPerWithdrawalsSweep == 0 { + return result + } + + // Replay state up to (but not including) target block + parentBlocks := sim.getParentBlocks(block) + state := sim.resetState(block) + if state == nil { + return result + } + for _, parentBlock := range parentBlocks { + sim.applyBlock(parentBlock) + } + + // Builder payment classification + builderCount := len(sim.prevState.builderPendingWithdrawals) + result.BuilderPaymentCount = builderCount + + if builderCount > 0 { + result.BuilderPayments = sim.classifyBuilderPayments(block, builderCount) + } + + // Count pending partial withdrawals + for _, pw := range sim.prevState.pendingWithdrawals { + if pw.WithdrawableEpoch > sim.epochStats.epoch { + break + } + validator := sim.getValidator(pw.ValidatorIndex) + if validator == nil { + break + } + if validator.ExitEpoch != FarFutureEpoch || validator.EffectiveBalance < phase0.Gwei(chainSpec.MinActivationBalance) { + continue + } + result.PartialCount++ + if uint64(result.PartialCount) >= chainSpec.MaxPendingPartialsPerWithdrawalsSweep { + break + } + } + + return result +} + +// classifyBuilderPayments determines the type and reference slot for each builder payment +// in the pending queue. The epoch pre-state queue layout is: +// +// [direct_from_prev_payloads..., delayed_0, ..., delayed_N] +// +// Direct entries (from delivered payloads) are at the front — the payload transition +// (process_execution_payload) runs before the epoch transition. Delayed entries +// (from process_builder_pending_payments during the epoch transition) are at the tail. +// The number of delayed entries is known from DelayedBuilderPaymentCount. +// +// During block replay, each block consumes all entries from the front and may append +// a new direct entry at the back if it has a delivered payload. +func (sim *stateSimulator) classifyBuilderPayments(block *Block, builderCount int) []builderPaymentClassification { + delayedCount := sim.prevState.builderDelayedCount + + payments := make([]builderPaymentClassification, builderCount) + + // Delayed entries are at the tail: positions [builderCount - delayedCount, builderCount) + delayedStart := builderCount - int(delayedCount) + + // Resolve delayed entries by matching against blocks with missed/orphaned payloads + // from the source epoch (2 epochs back). The delayed entries are generated in slot + // order by process_builder_pending_payments, so we collect all candidate blocks + // and assign them to delayed entries in order. + delayedRefs := sim.resolveDelayedPaymentRefSlots(builderCount, block) + + for i := range payments { + if i >= delayedStart && delayedStart >= 0 { + // Delayed entry (tail of queue, from epoch transition) + payments[i].Type = dbtypes.WithdrawalTypeBuilderDelayedPayment + delayedOff := i - delayedStart + if delayedOff < len(delayedRefs) { + payments[i].RefSlot = delayedRefs[delayedOff] + } + } else { + // Direct entry (from a delivered payload) + payments[i].Type = dbtypes.WithdrawalTypeBuilderPayment + // Use the tracked source block UID if available (set during replay). + if i < len(sim.prevState.builderPendingWithdrawals) { + payments[i].RefSlot = sim.prevState.builderPendingWithdrawals[i].RefBlockUID + } + } + } + + return payments +} + +// resolveDelayedPaymentRefSlots resolves reference block UIDs for all delayed entries +// in the builder pending withdrawals queue. Delayed payments originate from +// process_builder_pending_payments during the epoch transition, which processes +// BuilderPendingPayments entries from 2 epochs ago in slot order. Each delayed +// entry corresponds to a block where the builder's payload was missed/orphaned. +// +// We scan the source epoch's blocks in slot order, collecting those with missed +// payloads, and assign them to delayed entries in FIFO order. +func (sim *stateSimulator) resolveDelayedPaymentRefSlots(builderCount int, block *Block) []*uint64 { + delayedCount := int(sim.prevState.builderDelayedCount) + if delayedCount == 0 { + return nil + } + + chainState := sim.indexer.consensusPool.GetChainState() + blockEpoch := chainState.EpochOfSlot(block.Slot) + if blockEpoch < 2 { + return make([]*uint64, delayedCount) + } + + // Delayed payments from epoch K-2 are evaluated at epoch K boundary + sourceEpoch := blockEpoch - 2 + sourceEpochFirstSlot := chainState.EpochToSlot(sourceEpoch) + sourceEpochEndSlot := chainState.EpochToSlot(sourceEpoch + 1) + + // Collect all blocks with missed/orphaned payloads from the source epoch, in slot order. + type missedBlock struct { + builderIndex uint64 + blockUID uint64 + } + var missedBlocks []missedBlock + + _, prunedEpoch := sim.indexer.GetBlockCacheState() + if sourceEpoch >= prunedEpoch { + for slot := sourceEpochFirstSlot; slot < sourceEpochEndSlot; slot++ { + blocks := sim.indexer.GetBlocksBySlot(slot) + for _, b := range blocks { + blockIndex := b.GetBlockIndex(sim.indexer.ctx) + if blockIndex == nil || blockIndex.BuilderIndex == math.MaxUint64 { + continue + } + if !b.HasExecutionPayload() || b.isPayloadOrphaned { + missedBlocks = append(missedBlocks, missedBlock{ + builderIndex: blockIndex.BuilderIndex, + blockUID: b.BlockUID, + }) + } + } + } + } else { + dbSlots := db.GetSlotsRange(sim.indexer.ctx, uint64(sourceEpochEndSlot-1), uint64(sourceEpochFirstSlot), false, false) + for _, assignedSlot := range dbSlots { + if assignedSlot.Block == nil { + continue + } + if assignedSlot.Block.BuilderIndex < 0 { + continue + } + if assignedSlot.Block.PayloadStatus == dbtypes.PayloadStatusMissing || assignedSlot.Block.PayloadStatus == dbtypes.PayloadStatusOrphaned { + missedBlocks = append(missedBlocks, missedBlock{ + builderIndex: uint64(assignedSlot.Block.BuilderIndex), + blockUID: assignedSlot.Block.BlockUid, + }) + } + } + } + + // Match delayed entries to missed blocks in order. Each delayed entry's builder + // index must match the missed block's builder index. Multiple delayed entries for + // the same builder consume successive missed blocks for that builder. + delayedStart := builderCount - delayedCount + refs := make([]*uint64, delayedCount) + consumed := make([]bool, len(missedBlocks)) + + for i := range delayedCount { + queueIdx := delayedStart + i + if queueIdx < 0 || queueIdx >= len(sim.prevState.builderPendingWithdrawals) { + continue + } + wantBuilder := sim.prevState.builderPendingWithdrawals[queueIdx].BuilderIndex + for j, mb := range missedBlocks { + if consumed[j] { + continue + } + if mb.builderIndex == uint64(wantBuilder) { + uid := mb.blockUID + refs[i] = &uid + consumed[j] = true + break + } + } + } + + return refs +} diff --git a/indexer/beacon/statetransition/balance.go b/indexer/beacon/statetransition/balance.go new file mode 100644 index 000000000..dbb746f97 --- /dev/null +++ b/indexer/beacon/statetransition/balance.go @@ -0,0 +1,28 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processEffectiveBalanceUpdates implements the Electra+ version of +// process_effective_balance_updates. +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-process_effective_balance_updates +func processEffectiveBalanceUpdates(s *stateAccessor) { + hysteresisIncrement := s.specs.EffectiveBalanceIncrement / s.specs.HysteresisQuotient + downwardThreshold := hysteresisIncrement * s.specs.HysteresisDownwardMultiplier + upwardThreshold := hysteresisIncrement * s.specs.HysteresisUpwardMultiplier + + for i, v := range s.Validators { + balance := uint64(s.Balances[i]) + maxEB := uint64(s.getMaxEffectiveBalance(v)) + eb := uint64(v.EffectiveBalance) + + if balance+downwardThreshold < eb || eb+upwardThreshold < balance { + newEB := balance - balance%s.specs.EffectiveBalanceIncrement + if newEB > maxEB { + newEB = maxEB + } + v.EffectiveBalance = phase0.Gwei(newEB) + } + } +} diff --git a/indexer/beacon/statetransition/block.go b/indexer/beacon/statetransition/block.go new file mode 100644 index 000000000..c7250640d --- /dev/null +++ b/indexer/beacon/statetransition/block.go @@ -0,0 +1,290 @@ +package statetransition + +import ( + "bytes" + "crypto/sha256" + "fmt" + "time" + + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/bellatrix" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/deneb" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + "github.com/pk910/dynamic-ssz/sszutils" +) + +// ApplyBlock applies a beacon block to the state in-place (process_block). +// The state must be at the block's slot (call PrepareEpochPreState or ProcessSlots first). +// After this call, the state matches the block's post-state (pre-payload for Gloas). +// +// If parentStateRoot is non-zero, it is used as the hint for the first +// process_slot's state-root caching, skipping the expensive HTR computation. +// This is safe when the caller already knows the HTR of the current state +// (e.g., from the parent block's state_root field). Subsequent process_slot +// calls (when there are skipped slots) still compute HTR normally. +// +// Modified in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-block-processing +func (st *StateTransition) applyBlock(state *spec.VersionedBeaconState, block *spec.VersionedSignedBeaconBlock, parentStateRoot phase0.Root, info *ApplyInfo) error { + if state.Version < spec.DataVersionFulu { + return nil + } + + s, err := st.newAccessor(state) + if err != nil { + return fmt.Errorf("failed to create state accessor: %w", err) + } + + blockSlot, err := block.Slot() + if err != nil { + return fmt.Errorf("failed to get block slot: %w", err) + } + + // Advance state to the block's slot via process_slots. + // This implements the spec's process_slots exactly: + // while state.slot < slot: + // process_slot(state) + // if (state.slot + 1) % SLOTS_PER_EPOCH == 0: + // process_epoch(state) + // state.slot += 1 + // https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#process_slots + slotsPerEpoch := st.specs.SlotsPerEpoch + stateRootHint := parentStateRoot + for s.Slot < blockSlot { + if err := processSlotRootCaching(s, stateRootHint); err != nil { + return fmt.Errorf("process_slot at slot %d: %w", s.Slot, err) + } + // Hint only applies to the first iteration; subsequent slots have + // mutated state and need fresh HTR. + stateRootHint = phase0.Root{} + if (uint64(s.Slot)+1)%slotsPerEpoch == 0 { + epochStart := time.Now() + if err := processEpochInternal(s, nil); err != nil { + return fmt.Errorf("process_epoch at slot %d: %w", s.Slot, err) + } + if info != nil { + info.EpochTransitionDur += time.Since(epochStart) + } + } + s.Slot++ + s.writeBack() + } + + proposerIndex, err := block.ProposerIndex() + if err != nil { + return fmt.Errorf("failed to get proposer index: %w", err) + } + + parentRoot, err := block.ParentRoot() + if err != nil { + return fmt.Errorf("failed to get parent root: %w", err) + } + + bodyRoot, err := getBlockBodyRoot(block) + if err != nil { + return fmt.Errorf("failed to get body root: %w", err) + } + + // process_block_header + processBlockHeader(s, blockSlot, proposerIndex, parentRoot, bodyRoot) + + // process_withdrawals + processWithdrawals(s) + + // process_execution_payload (Fulu) — caches the execution payload header + if state.Version == spec.DataVersionFulu { + processFuluExecutionPayload(s, block) + } + + // process_execution_payload_bid (Gloas) — records the builder's bid + if state.Version >= spec.DataVersionGloas { + processExecutionPayloadBid(s, block) + } + + // process_randao + processRandao(s, block) + + // process_eth1_data + processEth1Data(s, block) + + // process_operations + processOperations(s, block) + + // process_sync_aggregate + processSyncAggregate(s, block) + + s.writeBack() + + return nil +} + +// processSlotRootCaching implements process_slot: caches state root and block root. +// If stateRootHint is non-zero it is used directly, skipping the expensive HTR +// computation. The caller is responsible for ensuring the hint matches the +// current state's HTR (e.g., by sourcing it from the parent block's state_root). +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#process_slot +func processSlotRootCaching(s *stateAccessor, stateRootHint phase0.Root) error { + // Cache state root — use hint if provided, otherwise compute HTR. + var stateRoot phase0.Root + if stateRootHint != (phase0.Root{}) { + stateRoot = stateRootHint + } else { + var err error + stateRoot, err = s.computeStateHTR() + if err != nil { + return fmt.Errorf("failed to compute state root: %w", err) + } + } + + idx := uint64(s.Slot) % s.specs.SlotsPerHistoricalRoot + s.StateRoots[idx] = stateRoot + + // Fill in latest block header's state root if it's the default zero value + if s.LatestBlockHeader != nil && s.LatestBlockHeader.StateRoot == (phase0.Root{}) { + s.LatestBlockHeader.StateRoot = stateRoot + } + + // Cache block root + blockRoot, err := s.computeLatestBlockHeaderHTR() + if err != nil { + return fmt.Errorf("failed to compute block root: %w", err) + } + + s.BlockRoots[idx] = blockRoot + + // Gloas: clear the next slot's execution payload availability bit. + s.clearNextSlotAvailabilityBit() + + return nil +} + +// processBlockHeader implements process_block_header. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#block-header +func processBlockHeader(s *stateAccessor, slot phase0.Slot, proposerIndex phase0.ValidatorIndex, parentRoot phase0.Root, bodyRoot phase0.Root) { + header := &phase0.BeaconBlockHeader{ + Slot: slot, + ProposerIndex: proposerIndex, + ParentRoot: parentRoot, + StateRoot: phase0.Root{}, // filled in by next process_slot + BodyRoot: bodyRoot, + } + + s.LatestBlockHeader = header +} + +// processRandao mixes in the block's RANDAO reveal. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#randao +func processRandao(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + randaoReveal, err := block.RandaoReveal() + if err != nil { + return + } + + epoch := s.currentEpoch() + idx := uint64(epoch) % s.specs.EpochsPerHistoricalVector + + // Mix in: XOR current mix with hash of the reveal + revealHash := sha256.Sum256(randaoReveal[:]) + var mixed phase0.Root + for i := 0; i < 32; i++ { + mixed[i] = s.RANDAOMixes[idx][i] ^ revealHash[i] + } + s.RANDAOMixes[idx] = mixed +} + +// processEth1Data adds the block's ETH1 vote. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#eth1-data +func processEth1Data(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + eth1Data, err := block.ETH1Data() + if err != nil || eth1Data == nil { + return + } + + s.ETH1DataVotes = append(s.ETH1DataVotes, eth1Data) + + // Check if majority vote + voteCount := 0 + for _, vote := range s.ETH1DataVotes { + if bytes.Equal(vote.BlockHash, eth1Data.BlockHash) && + vote.DepositRoot == eth1Data.DepositRoot && + vote.DepositCount == eth1Data.DepositCount { + voteCount++ + } + } + + threshold := s.specs.EpochsPerEth1VotingPeriod * s.specs.SlotsPerEpoch / 2 + if uint64(voteCount) > threshold { + s.ETH1Data = eth1Data + } +} + +// Transaction/Withdrawal list types for SSZ hash_tree_root computation via dynamic-ssz. +type transactionList []bellatrix.Transaction + +var _ = sszutils.Annotate[transactionList](`ssz-max:"1048576,1073741824" ssz-size:"?,?"`) + +type withdrawalList []*capella.Withdrawal + +var _ = sszutils.Annotate[withdrawalList](`ssz-max:"16"`) + +// processFuluExecutionPayload caches the execution payload header for Fulu blocks. +// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/beacon-chain.md#modified-process_execution_payload +func processFuluExecutionPayload(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + if block.Fulu == nil || block.Fulu.Message == nil || block.Fulu.Message.Body == nil { + return + } + + payload := block.Fulu.Message.Body.ExecutionPayload + if payload == nil { + return + } + + txs := make(transactionList, len(payload.Transactions)) + copy(txs, payload.Transactions) + txRoot, err := s.dynSsz.HashTreeRoot(txs) + if err != nil { + return + } + + wds := make(withdrawalList, len(payload.Withdrawals)) + copy(wds, payload.Withdrawals) + wdRoot, err := s.dynSsz.HashTreeRoot(wds) + if err != nil { + return + } + + s.LatestExecutionPayloadHeader = &deneb.ExecutionPayloadHeader{ + ParentHash: payload.ParentHash, + FeeRecipient: payload.FeeRecipient, + StateRoot: payload.StateRoot, + ReceiptsRoot: payload.ReceiptsRoot, + LogsBloom: payload.LogsBloom, + PrevRandao: payload.PrevRandao, + BlockNumber: payload.BlockNumber, + GasLimit: payload.GasLimit, + GasUsed: payload.GasUsed, + Timestamp: payload.Timestamp, + ExtraData: payload.ExtraData, + BaseFeePerGas: payload.BaseFeePerGas, + BlockHash: payload.BlockHash, + TransactionsRoot: phase0.Root(txRoot), + WithdrawalsRoot: phase0.Root(wdRoot), + BlobGasUsed: payload.BlobGasUsed, + ExcessBlobGas: payload.ExcessBlobGas, + } +} + +// getBlockBodyRoot computes the body root of a signed beacon block. +func getBlockBodyRoot(block *spec.VersionedSignedBeaconBlock) (phase0.Root, error) { + switch block.Version { + case spec.DataVersionFulu: + if block.Fulu != nil && block.Fulu.Message != nil && block.Fulu.Message.Body != nil { + return block.Fulu.Message.Body.HashTreeRoot() + } + case spec.DataVersionGloas: + if block.Gloas != nil && block.Gloas.Message != nil && block.Gloas.Message.Body != nil { + return block.Gloas.Message.Body.HashTreeRoot() + } + } + return phase0.Root{}, fmt.Errorf("unsupported block version: %v", block.Version) +} diff --git a/indexer/beacon/statetransition/builder.go b/indexer/beacon/statetransition/builder.go new file mode 100644 index 000000000..6905504db --- /dev/null +++ b/indexer/beacon/statetransition/builder.go @@ -0,0 +1,199 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// Gloas-specific spec constants for builder payment quorum. +const ( + BuilderPaymentThresholdNumerator = 6 + BuilderPaymentThresholdDenominator = 10 +) + +// processBuilderPendingPayments implements process_builder_pending_payments (Gloas). +// Evaluates the first SLOTS_PER_EPOCH entries of BuilderPendingPayments against +// the quorum threshold. Qualifying payments are promoted to BuilderPendingWithdrawals. +// Then the 2-epoch window shifts forward. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-process_builder_pending_payments +// processBuilderPendingPayments returns the number of delayed payments appended to +// BuilderPendingWithdrawals. +func processBuilderPendingPayments(s *stateAccessor) uint32 { + slotsPerEpoch := s.specs.SlotsPerEpoch + quorum := getBuilderPaymentQuorumThreshold(s) + + // Evaluate first half (epoch K-1 payments) + limit := slotsPerEpoch + if limit > uint64(len(s.BuilderPendingPayments)) { + limit = uint64(len(s.BuilderPendingPayments)) + } + + count := uint32(0) + for i := uint64(0); i < limit; i++ { + payment := s.BuilderPendingPayments[i] + if payment == nil || payment.Withdrawal == nil { + continue + } + if uint64(payment.Weight) >= quorum { + s.BuilderPendingWithdrawals = append(s.BuilderPendingWithdrawals, payment.Withdrawal) + count++ + } + } + + // Shift window: move second half to first, fill second with empty entries + if uint64(len(s.BuilderPendingPayments)) >= 2*slotsPerEpoch { + copy(s.BuilderPendingPayments[:slotsPerEpoch], s.BuilderPendingPayments[slotsPerEpoch:2*slotsPerEpoch]) + for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ { + s.BuilderPendingPayments[i] = &gloas.BuilderPendingPayment{} + } + } + + return count +} + +// getBuilderPaymentQuorumThreshold computes the quorum threshold for builder payments. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-get_builder_payment_quorum_threshold +func getBuilderPaymentQuorumThreshold(s *stateAccessor) uint64 { + totalActiveBalance := uint64(s.getTotalActiveBalance()) + perSlotBalance := totalActiveBalance / s.specs.SlotsPerEpoch + return perSlotBalance * BuilderPaymentThresholdNumerator / BuilderPaymentThresholdDenominator +} + +// processPtcWindow implements process_ptc_window (Gloas). +// Slides the PTC assignment window and computes new assignments for the lookahead epoch. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-process_ptc_window +func processPtcWindow(s *stateAccessor) { + // PTC window is Gloas-only. + if s.version < spec.DataVersionGloas { + return + } + + if len(s.PTCWindow) == 0 { + return + } + + slotsPerEpoch := s.specs.SlotsPerEpoch + + // Slide window: remove first SLOTS_PER_EPOCH entries + windowLen := uint64(len(s.PTCWindow)) + if windowLen <= slotsPerEpoch { + return + } + + copy(s.PTCWindow, s.PTCWindow[slotsPerEpoch:]) + + // Compute new PTC assignments for the last SLOTS_PER_EPOCH entries + nextEpoch := s.currentEpoch() + phase0.Epoch(s.specs.MinSeedLookahead) + 1 + startSlot := uint64(nextEpoch) * slotsPerEpoch + + lastStart := windowLen - slotsPerEpoch + for i := uint64(0); i < slotsPerEpoch && lastStart+i < windowLen; i++ { + s.PTCWindow[lastStart+i] = computePtc(s, phase0.Slot(startSlot+i)) + } +} + +// computePtc computes the PTC (Payload Timeliness Committee) for a given slot. +// Concatenates all beacon committees for the slot, then uses balance-weighted +// selection (without shuffling) to pick PTC_SIZE members. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-compute_ptc +func computePtc(s *stateAccessor, slot phase0.Slot) []phase0.ValidatorIndex { + epoch := phase0.Epoch(uint64(slot) / s.specs.SlotsPerEpoch) + ptcSize := s.specs.PtcSize + if ptcSize == 0 { + return nil + } + + // seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot)) + epochSeed := getSeed(s, epoch, phase0.DomainType(s.specs.DomainPtcAttester)) + var buf [40]byte + copy(buf[:32], epochSeed[:]) + buf[32] = byte(slot) + buf[33] = byte(slot >> 8) + buf[34] = byte(slot >> 16) + buf[35] = byte(slot >> 24) + buf[36] = byte(slot >> 32) + buf[37] = byte(slot >> 40) + buf[38] = byte(slot >> 48) + buf[39] = byte(slot >> 56) + seed := hash256(buf[:]) + + // Concatenate all committees for this slot + cc := newCommitteeCache() + committeesPerSlot := s.getCommitteeCountPerSlot(epoch) + var indices []phase0.ValidatorIndex + for ci := uint64(0); ci < committeesPerSlot; ci++ { + committee := s.getBeaconCommittee(slot, ci, cc) + indices = append(indices, committee...) + } + + if len(indices) == 0 { + return nil + } + + // compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False) + return computeBalanceWeightedSelection(s, indices, seed, ptcSize, false) +} + +// computeBalanceWeightedSelection implements compute_balance_weighted_selection. +// Selects `size` validators from `indices` using balance-weighted rejection sampling. +// If shuffleIndices is true, candidates are sampled via compute_shuffled_index; +// otherwise they are traversed in order. +// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-compute_balance_weighted_selection +func computeBalanceWeightedSelection(s *stateAccessor, indices []phase0.ValidatorIndex, seed phase0.Root, size uint64, shuffleIndices bool) []phase0.ValidatorIndex { + const maxRandomValue = 65535 // 2^16 - 1 + total := uint64(len(indices)) + if total == 0 { + return nil + } + + maxEB := uint64(s.specs.MaxEffectiveBalanceElectra) + if maxEB == 0 { + maxEB = uint64(s.specs.MaxEffectiveBalance) + } + + // Pre-compute effective balances for the candidate indices + effectiveBalances := make([]uint64, total) + for j, idx := range indices { + effectiveBalances[j] = uint64(s.Validators[idx].EffectiveBalance) + } + + selected := make([]phase0.ValidatorIndex, 0, size) + var randomBytes phase0.Root + i := uint64(0) + + for uint64(len(selected)) < size { + offset := (i % 16) * 2 + if offset == 0 { + // random_bytes = hash(seed + uint_to_bytes(i // 16)) + var rbuf [40]byte + copy(rbuf[:32], seed[:]) + quotient := i / 16 + rbuf[32] = byte(quotient) + rbuf[33] = byte(quotient >> 8) + rbuf[34] = byte(quotient >> 16) + rbuf[35] = byte(quotient >> 24) + rbuf[36] = byte(quotient >> 32) + rbuf[37] = byte(quotient >> 40) + rbuf[38] = byte(quotient >> 48) + rbuf[39] = byte(quotient >> 56) + randomBytes = hash256(rbuf[:]) + } + + nextIndex := i % total + if shuffleIndices { + nextIndex = computeShuffledIndex(nextIndex, total, seed, s.specs) + } + + weight := effectiveBalances[nextIndex] * maxRandomValue + randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8 + threshold := maxEB * randomValue + + if weight >= threshold { + selected = append(selected, indices[nextIndex]) + } + i++ + } + + return selected +} diff --git a/indexer/beacon/statetransition/committees.go b/indexer/beacon/statetransition/committees.go new file mode 100644 index 000000000..bb9ce6cdc --- /dev/null +++ b/indexer/beacon/statetransition/committees.go @@ -0,0 +1,299 @@ +package statetransition + +import ( + "encoding/binary" + "fmt" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec/altair" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + blsu "github.com/protolambda/bls12-381-util" +) + +// committeeKey uniquely identifies a beacon committee by slot and index. +type committeeKey struct { + slot phase0.Slot + index uint64 +} + +// committeeCache caches computed beacon committees +// to avoid recomputation across multiple attestations. +type committeeCache struct { + cache map[committeeKey][]phase0.ValidatorIndex +} + +func newCommitteeCache() *committeeCache { + return &committeeCache{ + cache: make(map[committeeKey][]phase0.ValidatorIndex, 64), + } +} + +// get returns the cached committee, or nil if not cached. +func (c *committeeCache) get(slot phase0.Slot, index uint64) []phase0.ValidatorIndex { + return c.cache[committeeKey{slot: slot, index: index}] +} + +// put stores a committee in the cache. +func (c *committeeCache) put(slot phase0.Slot, index uint64, committee []phase0.ValidatorIndex) { + c.cache[committeeKey{slot: slot, index: index}] = committee +} + +// getCommitteeCountPerSlot returns the number of committees per slot for the given epoch. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#get_committee_count_per_slot +func (s *stateAccessor) getCommitteeCountPerSlot(epoch phase0.Epoch) uint64 { + activeCount := uint64(len(s.getActiveValidatorIndices(epoch))) + committeesPerSlot := activeCount / s.specs.SlotsPerEpoch / s.specs.TargetCommitteeSize + if committeesPerSlot > s.specs.MaxCommitteesPerSlot { + committeesPerSlot = s.specs.MaxCommitteesPerSlot + } + if committeesPerSlot < 1 { + committeesPerSlot = 1 + } + return committeesPerSlot +} + +// getBeaconCommittee returns the beacon committee for the given slot and committee index. +// Uses the provided cache to avoid recomputation. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#get_beacon_committee +func (s *stateAccessor) getBeaconCommittee(slot phase0.Slot, committeeIndex uint64, cc *committeeCache) []phase0.ValidatorIndex { + if cc != nil { + if cached := cc.get(slot, committeeIndex); cached != nil { + return cached + } + } + + epoch := phase0.Epoch(uint64(slot) / s.specs.SlotsPerEpoch) + committeesPerSlot := s.getCommitteeCountPerSlot(epoch) + activeIndices := s.getActiveValidatorIndices(epoch) + seed := getSeed(s, epoch, phase0.DomainType(s.specs.DomainBeaconAttester)) + + slotIndex := uint64(slot) % s.specs.SlotsPerEpoch + index := slotIndex*committeesPerSlot + committeeIndex + count := committeesPerSlot * s.specs.SlotsPerEpoch + + committee := computeCommittee(activeIndices, seed, index, count, s.specs) + + if cc != nil { + cc.put(slot, committeeIndex, committee) + } + + return committee +} + +// computeCommittee computes a committee from the given parameters (no cache). +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#compute_committee +func computeCommittee(indices []phase0.ValidatorIndex, seed phase0.Root, index, count uint64, specs *consensus.ChainSpec) []phase0.ValidatorIndex { + if count == 0 { + return nil + } + + indexCount := uint64(len(indices)) + start := (indexCount * index) / count + end := (indexCount * (index + 1)) / count + + shuffledIndices := computeShuffledBatch(start, end, indexCount, seed, specs) + committee := make([]phase0.ValidatorIndex, len(shuffledIndices)) + for i, shuffled := range shuffledIndices { + committee[i] = indices[shuffled] + } + + return committee +} + +// computeShuffledBatch computes shuffled indices for a contiguous range [start, end) +// using the swap-or-not shuffle, with per-round pivot caching. +// Much faster than calling computeShuffledIndex individually for each index. +func computeShuffledBatch(start, end, indexCount uint64, seed phase0.Root, specs *consensus.ChainSpec) []uint64 { + n := end - start + result := make([]uint64, n) + for i := uint64(0); i < n; i++ { + result[i] = start + i + } + + for currentRound := uint64(0); currentRound < specs.ShuffleRoundCount; currentRound++ { + // Compute pivot once per round (depends only on seed + round) + var buf [33]byte + copy(buf[0:32], seed[:]) + buf[32] = byte(currentRound) + pivotHash := hash256(buf[:]) + pivot := binary.LittleEndian.Uint64(pivotHash[:8]) % indexCount + + // Pre-compute the seed+round prefix for source hashes + var srcPrefix [33]byte + copy(srcPrefix[0:32], seed[:]) + srcPrefix[32] = byte(currentRound) + + // Cache source hashes by position/256 bucket + sourceCache := make(map[uint32]phase0.Root) + + for i := uint64(0); i < n; i++ { + index := result[i] + flip := (pivot + indexCount - index) % indexCount + position := index + if flip > index { + position = flip + } + + bucket := uint32(position / 256) + source, ok := sourceCache[bucket] + if !ok { + var buf2 [37]byte + copy(buf2[0:33], srcPrefix[:]) + binary.LittleEndian.PutUint32(buf2[33:37], bucket) + source = hash256(buf2[:]) + sourceCache[bucket] = source + } + + byteIdx := (position % 256) / 8 + bitIdx := position % 8 + bit := (source[byteIdx] >> bitIdx) & 1 + + if bit == 1 { + result[i] = flip + } + } + } + + return result +} + +// getAttestingIndices returns the set of attesting indices for an Electra+ attestation. +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-get_attesting_indices +func (s *stateAccessor) getAttestingIndices(slot phase0.Slot, committeeBits []byte, aggregationBits []byte, cc *committeeCache) []phase0.ValidatorIndex { + committeeIndices := getCommitteeIndicesFromBits(committeeBits) + + attestingSet := make(map[phase0.ValidatorIndex]struct{}) + committeeOffset := 0 + + for _, ci := range committeeIndices { + committee := s.getBeaconCommittee(slot, ci, cc) + for i, validatorIndex := range committee { + bitPos := committeeOffset + i + byteIdx := bitPos / 8 + bitIdx := bitPos % 8 + if byteIdx < len(aggregationBits) && aggregationBits[byteIdx]&(1<= maxEB*randomValue { + pubkeys = append(pubkeys, s.Validators[candidateIndex].PublicKey) + } + i++ + } + + aggregate, err := aggregateBLSPubkeys(pubkeys) + if err != nil { + // An aggregation failure means a malformed pubkey ended up in the + // committee, which is impossible for a valid chain — surface it loudly + // rather than silently producing a wrong state root. + panic(fmt.Errorf("failed to aggregate sync committee pubkeys: %w", err)) + } + + return &altair.SyncCommittee{ + Pubkeys: pubkeys, + AggregatePubkey: aggregate, + } +} + +// aggregateBLSPubkeys computes the BLS G1 aggregate of the given pubkeys. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#bls-signatures +func aggregateBLSPubkeys(pubkeys []phase0.BLSPubKey) (phase0.BLSPubKey, error) { + if len(pubkeys) == 0 { + return phase0.BLSPubKey{}, fmt.Errorf("cannot aggregate empty pubkey set") + } + parsed := make([]*blsu.Pubkey, len(pubkeys)) + for i, pk := range pubkeys { + var raw [48]byte = pk + p := new(blsu.Pubkey) + if err := p.Deserialize(&raw); err != nil { + return phase0.BLSPubKey{}, fmt.Errorf("invalid pubkey at index %d: %w", i, err) + } + parsed[i] = p + } + agg, err := blsu.AggregatePubkeys(parsed) + if err != nil { + return phase0.BLSPubKey{}, err + } + out := agg.Serialize() + return phase0.BLSPubKey(out), nil +} diff --git a/indexer/beacon/statetransition/helpers.go b/indexer/beacon/statetransition/helpers.go new file mode 100644 index 000000000..e3b4b64d0 --- /dev/null +++ b/indexer/beacon/statetransition/helpers.go @@ -0,0 +1,110 @@ +package statetransition + +import ( + "crypto/sha256" + "encoding/binary" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// hash256 returns the SHA-256 hash of data. +func hash256(data []byte) phase0.Root { + return phase0.Root(sha256.Sum256(data)) +} + +// hashTreeRoot computes the SSZ hash_tree_root of a fixed-size vector of +// roots (e.g., state.block_roots / state.state_roots, both +// Vector[Root, SLOTS_PER_HISTORICAL_ROOT]). Each root is already a 32-byte +// chunk, so no chunkification or length mixin is needed — just the binary +// Merkle tree over the leaves padded to the next power of two. +func hashTreeRoot(roots []phase0.Root) phase0.Root { + if len(roots) == 0 { + return phase0.Root{} + } + + // Build Merkle tree bottom-up + leaves := make([]phase0.Root, len(roots)) + copy(leaves, roots) + + // Pad to next power of 2 + size := uint64(1) + for size < uint64(len(leaves)) { + size *= 2 + } + for uint64(len(leaves)) < size { + leaves = append(leaves, phase0.Root{}) + } + + for len(leaves) > 1 { + next := make([]phase0.Root, len(leaves)/2) + for i := 0; i < len(next); i++ { + var buf [64]byte + copy(buf[:32], leaves[2*i][:]) + copy(buf[32:], leaves[2*i+1][:]) + next[i] = hash256(buf[:]) + } + leaves = next + } + + return leaves[0] +} + +// getSeed computes the seed for the given epoch and domain type. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#get_seed +func getSeed(s *stateAccessor, epoch phase0.Epoch, domainType phase0.DomainType) phase0.Root { + mixEpoch := epoch + phase0.Epoch(s.specs.EpochsPerHistoricalVector) - phase0.Epoch(s.specs.MinSeedLookahead) - 1 + mix := s.RANDAOMixes[uint64(mixEpoch)%s.specs.EpochsPerHistoricalVector] + + var buf [4 + 8 + 32]byte + copy(buf[0:4], domainType[:]) + binary.LittleEndian.PutUint64(buf[4:12], uint64(epoch)) + copy(buf[12:44], mix[:]) + + result := hash256(buf[:]) + return result +} + +// computeShuffledIndex implements the swap-or-not shuffle. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#compute_shuffled_index +func computeShuffledIndex(index, indexCount uint64, seed phase0.Root, specs *consensus.ChainSpec) uint64 { + if indexCount == 0 { + return 0 + } + + for currentRound := uint64(0); currentRound < specs.ShuffleRoundCount; currentRound++ { + var buf [33]byte + copy(buf[0:32], seed[:]) + buf[32] = byte(currentRound) + pivotHash := hash256(buf[:]) + pivot := binary.LittleEndian.Uint64(pivotHash[:8]) % indexCount + + flip := (pivot + indexCount - index) % indexCount + position := index + if flip > index { + position = flip + } + + var buf2 [33 + 4]byte + copy(buf2[0:32], seed[:]) + buf2[32] = byte(currentRound) + binary.LittleEndian.PutUint32(buf2[33:37], uint32(position/256)) + source := hash256(buf2[:]) + + byteIdx := (position % 256) / 8 + bitIdx := position % 8 + bit := (source[byteIdx] >> bitIdx) & 1 + + if bit == 1 { + index = flip + } + } + + return index +} + +// computeActivationExitEpoch returns the epoch at which a validator activation/exit takes effect. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#compute_activation_exit_epoch +func computeActivationExitEpoch(epoch phase0.Epoch, specs *consensus.ChainSpec) phase0.Epoch { + return epoch + 1 + phase0.Epoch(specs.MaxSeedLookahead) +} diff --git a/indexer/beacon/statetransition/justification.go b/indexer/beacon/statetransition/justification.go new file mode 100644 index 000000000..ff7fe0535 --- /dev/null +++ b/indexer/beacon/statetransition/justification.go @@ -0,0 +1,85 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processJustificationAndFinalization implements the Altair+ version of +// process_justification_and_finalization. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#justification-and-finalization +func processJustificationAndFinalization(s *stateAccessor) error { + currentEpoch := s.currentEpoch() + if currentEpoch <= 1 { + return nil + } + + previousEpoch := s.previousEpoch() + oldPreviousJustifiedCheckpoint := s.PreviousJustifiedCheckpoint + oldCurrentJustifiedCheckpoint := s.CurrentJustifiedCheckpoint + + // Process justification + s.PreviousJustifiedCheckpoint = s.CurrentJustifiedCheckpoint + + // Shift justification bits + if len(s.JustificationBits) > 0 { + s.JustificationBits[0] = (s.JustificationBits[0] << 1) & 0x0F + } + + totalActiveBalance := s.getTotalActiveBalance() + + // Previous epoch justification + previousTargetBalance := s.getUnslashedParticipatingBalance(TimelyTargetFlagIndex, previousEpoch) + if previousTargetBalance*3 >= totalActiveBalance*2 { + s.CurrentJustifiedCheckpoint = &phase0.Checkpoint{ + Epoch: previousEpoch, + Root: getBlockRoot(s, previousEpoch), + } + if len(s.JustificationBits) > 0 { + s.JustificationBits[0] |= 0x02 // bit 1 + } + } + + // Current epoch justification + currentTargetBalance := s.getUnslashedParticipatingBalance(TimelyTargetFlagIndex, currentEpoch) + if currentTargetBalance*3 >= totalActiveBalance*2 { + s.CurrentJustifiedCheckpoint = &phase0.Checkpoint{ + Epoch: currentEpoch, + Root: getBlockRoot(s, currentEpoch), + } + if len(s.JustificationBits) > 0 { + s.JustificationBits[0] |= 0x01 // bit 0 + } + } + + bits := byte(0) + if len(s.JustificationBits) > 0 { + bits = s.JustificationBits[0] + } + + // Process finalizations + // The 2/3/4th most recent epochs are justified, the 2nd using the 4th as source + if bits&0x0E == 0x0E && oldPreviousJustifiedCheckpoint.Epoch+3 == currentEpoch { + s.FinalizedCheckpoint = oldPreviousJustifiedCheckpoint + } + // The 2/3rd most recent epochs are justified, the 2nd using the 3rd as source + if bits&0x06 == 0x06 && oldPreviousJustifiedCheckpoint.Epoch+2 == currentEpoch { + s.FinalizedCheckpoint = oldPreviousJustifiedCheckpoint + } + // The 1/2/3rd most recent epochs are justified, the 1st using the 3rd as source + if bits&0x07 == 0x07 && oldCurrentJustifiedCheckpoint.Epoch+2 == currentEpoch { + s.FinalizedCheckpoint = oldCurrentJustifiedCheckpoint + } + // The 1/2nd most recent epochs are justified, the 1st using the 2nd as source + if bits&0x03 == 0x03 && oldCurrentJustifiedCheckpoint.Epoch+1 == currentEpoch { + s.FinalizedCheckpoint = oldCurrentJustifiedCheckpoint + } + + return nil +} + +// getBlockRoot returns the block root at the start of the given epoch. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#get_block_root +func getBlockRoot(s *stateAccessor, epoch phase0.Epoch) phase0.Root { + startSlot := uint64(epoch) * s.specs.SlotsPerEpoch + return s.BlockRoots[startSlot%s.specs.SlotsPerHistoricalRoot] +} diff --git a/indexer/beacon/statetransition/lookahead.go b/indexer/beacon/statetransition/lookahead.go new file mode 100644 index 000000000..0e3217764 --- /dev/null +++ b/indexer/beacon/statetransition/lookahead.go @@ -0,0 +1,120 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processProposerLookahead implements process_proposer_lookahead (Fulu+/EIP-7917). +// Slides the proposer lookahead window and computes new proposer indices for the +// lookahead epoch. +// New in Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/beacon-chain.md#new-process_proposer_lookahead +func processProposerLookahead(s *stateAccessor) { + slotsPerEpoch := s.specs.SlotsPerEpoch + lookaheadLen := uint64(len(s.ProposerLookahead)) + if lookaheadLen == 0 || slotsPerEpoch == 0 { + return + } + + // Slide window: shift left by SLOTS_PER_EPOCH + if lookaheadLen > slotsPerEpoch { + copy(s.ProposerLookahead, s.ProposerLookahead[slotsPerEpoch:]) + } + + // Compute new proposer indices for the last SLOTS_PER_EPOCH entries + nextEpoch := s.currentEpoch() + phase0.Epoch(s.specs.MinSeedLookahead) + 1 + proposers := getBeaconProposerIndices(s, nextEpoch) + + lastStart := lookaheadLen - slotsPerEpoch + for i := uint64(0); i < slotsPerEpoch && i < uint64(len(proposers)); i++ { + if lastStart+i < lookaheadLen { + s.ProposerLookahead[lastStart+i] = proposers[i] + } + } +} + +// getBeaconProposerIndices computes the proposer index for each slot in the given epoch. +// Spec: get_beacon_proposer_index applied to each slot. +func getBeaconProposerIndices(s *stateAccessor, epoch phase0.Epoch) []phase0.ValidatorIndex { + slotsPerEpoch := s.specs.SlotsPerEpoch + startSlot := uint64(epoch) * slotsPerEpoch + indices := make([]phase0.ValidatorIndex, slotsPerEpoch) + + activeIndices := s.getActiveValidatorIndices(epoch) + if len(activeIndices) == 0 { + return indices + } + + for slotOffset := uint64(0); slotOffset < slotsPerEpoch; slotOffset++ { + slot := phase0.Slot(startSlot + slotOffset) + indices[slotOffset] = computeProposerIndex(s, activeIndices, epoch, slot) + } + + return indices +} + +// computeProposerIndex selects the proposer for a specific slot using the +// Electra+ compute_proposer_index with per-slot seed from Fulu's compute_proposer_indices. +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-compute_proposer_index +// Called via Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/beacon-chain.md#new-compute_proposer_indices +func computeProposerIndex(s *stateAccessor, activeIndices []phase0.ValidatorIndex, epoch phase0.Epoch, slot phase0.Slot) phase0.ValidatorIndex { + if len(activeIndices) == 0 { + return 0 + } + + epochSeed := getSeed(s, epoch, phase0.DomainType(s.specs.DomainBeaconProposer)) + + // Compute per-slot seed: hash(epoch_seed + uint_to_bytes(slot)) + // Fulu compute_proposer_indices: seeds = [hash(seed + uint_to_bytes(slot)) for each slot] + var buf [40]byte + copy(buf[:32], epochSeed[:]) + buf[32] = byte(slot) + buf[33] = byte(slot >> 8) + buf[34] = byte(slot >> 16) + buf[35] = byte(slot >> 24) + buf[36] = byte(slot >> 32) + buf[37] = byte(slot >> 40) + buf[38] = byte(slot >> 48) + buf[39] = byte(slot >> 56) + seed := hash256(buf[:]) + + indexCount := uint64(len(activeIndices)) + maxEB := uint64(s.specs.MaxEffectiveBalanceElectra) + if maxEB == 0 { + maxEB = uint64(s.specs.MaxEffectiveBalance) + } + + // Electra: 16-bit random values (MAX_RANDOM_VALUE = 2^16 - 1) + const maxRandomValue = 65535 + + i := uint64(0) + for { + candidateIndex := activeIndices[computeShuffledIndex(i%indexCount, indexCount, seed, s.specs)] + + // Electra: random_bytes = hash(seed + uint_to_bytes(i // 16)) + // offset = (i % 16) * 2; random_value = LE uint16 from random_bytes[offset:offset+2] + var rbuf [40]byte + copy(rbuf[:32], seed[:]) + quotient := i / 16 + rbuf[32] = byte(quotient) + rbuf[33] = byte(quotient >> 8) + rbuf[34] = byte(quotient >> 16) + rbuf[35] = byte(quotient >> 24) + rbuf[36] = byte(quotient >> 32) + rbuf[37] = byte(quotient >> 40) + rbuf[38] = byte(quotient >> 48) + rbuf[39] = byte(quotient >> 56) + h := hash256(rbuf[:]) + offset := (i % 16) * 2 + randomValue := uint64(h[offset]) | uint64(h[offset+1])<<8 + + effectiveBalance := uint64(s.Validators[candidateIndex].EffectiveBalance) + if effectiveBalance*maxRandomValue >= maxEB*randomValue { + return candidateIndex + } + i++ + + if i > indexCount*100 { + return activeIndices[0] + } + } +} diff --git a/indexer/beacon/statetransition/operations.go b/indexer/beacon/statetransition/operations.go new file mode 100644 index 000000000..4760a5947 --- /dev/null +++ b/indexer/beacon/statetransition/operations.go @@ -0,0 +1,704 @@ +package statetransition + +import ( + "bytes" + "slices" + + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/altair" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processOperations implements process_operations. +// Processes all block body operations: slashings, attestations, deposits, exits, etc. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#operations +// Modified in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_operations +func processOperations(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + proposerSlashings, _ := block.ProposerSlashings() + for _, slashing := range proposerSlashings { + processProposerSlashing(s, slashing) + } + + attesterSlashings, _ := block.AttesterSlashings() + for _, slashing := range attesterSlashings { + processAttesterSlashing(s, slashing) + } + + processAttestations(s, block) + + voluntaryExits, _ := block.VoluntaryExits() + for _, exit := range voluntaryExits { + processVoluntaryExit(s, exit) + } + + blsChanges, _ := block.BLSToExecutionChanges() + for _, change := range blsChanges { + processBLSToExecutionChange(s, change) + } + + // Process execution requests (Electra+): deposits, withdrawals, consolidations. + // In Fulu/Gloas, these come from the execution layer via the block body, NOT from + // the legacy deposit mechanism. + // https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_execution_requests + requests, err := block.ExecutionRequests() + if err == nil && requests != nil { + for _, deposit := range requests.Deposits { + s.PendingDeposits = append(s.PendingDeposits, &electra.PendingDeposit{ + Pubkey: deposit.Pubkey, + WithdrawalCredentials: deposit.WithdrawalCredentials, + Amount: deposit.Amount, + Signature: deposit.Signature, + Slot: s.Slot, + }) + } + for _, withdrawal := range requests.Withdrawals { + processWithdrawalRequest(s, withdrawal) + } + for _, consolidation := range requests.Consolidations { + processConsolidationRequest(s, consolidation) + } + } +} + +// processProposerSlashing processes a proposer slashing. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#proposer-slashings +func processProposerSlashing(s *stateAccessor, slashing *phase0.ProposerSlashing) { + if slashing == nil || slashing.SignedHeader1 == nil { + return + } + proposerIndex := slashing.SignedHeader1.Message.ProposerIndex + if int(proposerIndex) >= len(s.Validators) { + return + } + slashValidator(s, proposerIndex) +} + +// processAttesterSlashing processes an attester slashing. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#attester-slashings +func processAttesterSlashing(s *stateAccessor, slashing any) { + att1, att2 := getSlashingAttestations(slashing) + if att1 == nil || att2 == nil { + return + } + + att1Indices, _ := att1.AttestingIndices() + att2Indices, _ := att2.AttestingIndices() + if att1Indices == nil || att2Indices == nil { + return + } + + att2Set := make(map[uint64]bool, len(att2Indices)) + for _, idx := range att2Indices { + att2Set[idx] = true + } + + for _, idx := range att1Indices { + if att2Set[idx] && phase0.ValidatorIndex(idx) < phase0.ValidatorIndex(len(s.Validators)) { + slashValidator(s, phase0.ValidatorIndex(idx)) + } + } +} + +// processAttestations processes all attestations in the block. +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-process_attestation +func processAttestations(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + var attestations []*electra.Attestation + + switch block.Version { + case spec.DataVersionFulu: + if block.Fulu != nil && block.Fulu.Message != nil && block.Fulu.Message.Body != nil { + attestations = block.Fulu.Message.Body.Attestations + } + case spec.DataVersionGloas: + if block.Gloas != nil && block.Gloas.Message != nil && block.Gloas.Message.Body != nil { + attestations = block.Gloas.Message.Body.Attestations + } + } + + for _, att := range attestations { + processAttestation(s, att, s.caches.committeeCache) + } +} + +// processAttestation processes a single Electra+ attestation, updating participation flags. +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-process_attestation +func processAttestation(s *stateAccessor, att *electra.Attestation, cc *committeeCache) { + if att == nil || att.Data == nil { + return + } + + data := att.Data + currentEpoch := s.currentEpoch() + previousEpoch := s.previousEpoch() + + if data.Target.Epoch != currentEpoch && data.Target.Epoch != previousEpoch { + return + } + + attestingIndices := s.getAttestingIndices(data.Slot, []byte(att.CommitteeBits), []byte(att.AggregationBits), cc) + + // Determine which participation flags to set based on attestation properties. + isCurrentEpoch := data.Target.Epoch == currentEpoch + inclusionDelay := uint64(s.Slot) - uint64(data.Slot) + + // Check correctness of source, target, head + justifiedCheckpoint := s.PreviousJustifiedCheckpoint + if isCurrentEpoch { + justifiedCheckpoint = s.CurrentJustifiedCheckpoint + } + + // Spec variable mapping (Deneb+): + // is_matching_source = data.source == justified_checkpoint + // is_matching_target = is_matching_source and target_root_matches + // is_matching_head = is_matching_target and head_root_matches + isMatchingSource := data.Source.Epoch == justifiedCheckpoint.Epoch && data.Source.Root == justifiedCheckpoint.Root + isMatchingTarget := isMatchingSource && data.Target.Root == getBlockRoot(s, data.Target.Epoch) + isMatchingHead := isMatchingTarget && data.BeaconBlockRoot == getBlockRootAtSlot(s, data.Slot) + + // Gloas: payload_matches check for head attestation. + // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_attestation + payloadMatches := true + if s.version >= spec.DataVersionGloas { + if isAttestationSameSlot(s, data) { + payloadMatches = true + } else { + var payloadIndex uint64 + if s.getAvailabilityBit(data.Slot) { + payloadIndex = 1 + } + payloadMatches = uint64(data.Index) == payloadIndex + } + } + + // Determine which flags to set. + // Modified in Deneb (EIP-7045): TIMELY_TARGET has no delay constraint. + // https://github.com/ethereum/consensus-specs/blob/master/specs/deneb/beacon-chain.md#modified-get_attestation_participation_flag_indices + var participationFlags [3]bool + if isMatchingSource && inclusionDelay <= intSqrt(s.specs.SlotsPerEpoch) { + participationFlags[TimelySourceFlagIndex] = true + } + if isMatchingTarget { + participationFlags[TimelyTargetFlagIndex] = true + } + if isMatchingHead && payloadMatches && inclusionDelay == s.specs.MinAttestationInclusionDelay { + participationFlags[TimelyHeadFlagIndex] = true + } + + // Select the right participation array and (Gloas) the corresponding + // builder pending payment slot. The Gloas spec keeps a 2-epoch sliding + // window of payments — current epoch is the second half, previous epoch + // the first half. + var participation []altair.ParticipationFlags + var builderPayment *gloas.BuilderPendingPayment + var builderPaymentIdx uint64 + slotsPerEpoch := s.specs.SlotsPerEpoch + if isCurrentEpoch { + participation = s.CurrentEpochParticipation + builderPaymentIdx = slotsPerEpoch + uint64(data.Slot)%slotsPerEpoch + } else { + participation = s.PreviousEpochParticipation + builderPaymentIdx = uint64(data.Slot) % slotsPerEpoch + } + if s.version >= spec.DataVersionGloas && builderPaymentIdx < uint64(len(s.BuilderPendingPayments)) { + builderPayment = s.BuilderPendingPayments[builderPaymentIdx] + } + + sameSlot := s.version >= spec.DataVersionGloas && isAttestationSameSlot(s, data) + + // Update participation flags, compute proposer reward, and (Gloas) accumulate + // builder payment weight from validators contributing new flags on same-slot + // attestations. + proposerRewardNumerator := uint64(0) + for _, index := range attestingIndices { + if int(index) >= len(participation) { + continue + } + + willSetNewFlag := false + for fi := 0; fi < 3; fi++ { + if !participationFlags[fi] { + continue + } + if !hasFlag(participation[index], fi) { + participation[index] |= altair.ParticipationFlags(1 << fi) + proposerRewardNumerator += uint64(s.getBaseReward(index)) * ParticipationFlagWeights[fi] + willSetNewFlag = true + } + } + + // Gloas: each validator contributes its effective balance to the + // builder payment weight at most once per slot, when it first sets a + // new flag on a same-slot attestation. Only counted when the slot + // actually has a builder payment with non-zero amount. + if willSetNewFlag && sameSlot && builderPayment != nil && + builderPayment.Withdrawal != nil && builderPayment.Withdrawal.Amount > 0 { + builderPayment.Weight += s.Validators[index].EffectiveBalance + } + } + + // Proposer reward + proposerRewardDenominator := uint64((WeightDenominator - ProposerWeight) * WeightDenominator / ProposerWeight) + if proposerRewardDenominator > 0 { + proposerReward := phase0.Gwei(proposerRewardNumerator / proposerRewardDenominator) + proposerIndex := s.getProposerIndex() + s.increaseBalance(proposerIndex, proposerReward) + } +} + +// processVoluntaryExit processes a voluntary exit. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#voluntary-exits +func processVoluntaryExit(s *stateAccessor, exit *phase0.SignedVoluntaryExit) { + if exit == nil || exit.Message == nil { + return + } + validatorIndex := exit.Message.ValidatorIndex + if int(validatorIndex) >= len(s.Validators) { + return + } + initiateValidatorExit(s, validatorIndex) +} + +// processBLSToExecutionChange updates a validator's withdrawal credentials from BLS to execution. +// https://github.com/ethereum/consensus-specs/blob/master/specs/capella/beacon-chain.md#new-process_bls_to_execution_change +func processBLSToExecutionChange(s *stateAccessor, signed *capella.SignedBLSToExecutionChange) { + if signed == nil || signed.Message == nil { + return + } + + change := signed.Message + validatorIndex := change.ValidatorIndex + if int(validatorIndex) >= len(s.Validators) { + return + } + + validator := s.Validators[validatorIndex] + + // Only apply if currently BLS credentials (0x00 prefix) + if len(validator.WithdrawalCredentials) == 0 || validator.WithdrawalCredentials[0] != 0x00 { + return + } + + // Update to execution credentials (0x01 prefix) + newCredentials := make([]byte, 32) + newCredentials[0] = 0x01 + copy(newCredentials[12:], change.ToExecutionAddress[:]) + validator.WithdrawalCredentials = newCredentials +} + +// processWithdrawalRequest processes an EL withdrawal request. +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_withdrawal_request +func processWithdrawalRequest(s *stateAccessor, request *electra.WithdrawalRequest) { + if request == nil { + return + } + isFullExitRequest := request.Amount == 0 + + // If partial withdrawal queue is full, only full exits are processed. + if uint64(len(s.PendingPartialWithdrawals)) == s.specs.PendingPartialWithdrawalsLimit && !isFullExitRequest { + return + } + + // Find validator by pubkey. + validatorIndex := findValidatorByPubkey(s, request.ValidatorPubkey) + if validatorIndex == nil { + return + } + validator := s.Validators[*validatorIndex] + + // Verify withdrawal credentials: must be 0x01 or 0x02 (execution prefix) and + // the source address must match the credential's address bytes. + if !hasExecutionWithdrawalCredential(validator) { + return + } + if !bytes.Equal(validator.WithdrawalCredentials[12:], request.SourceAddress[:]) { + return + } + // Verify the validator is active and exit has not been initiated. + if !isActiveValidator(validator, s.currentEpoch()) { + return + } + if validator.ExitEpoch != FarFutureEpoch { + return + } + // Validator must have been active long enough. + if s.currentEpoch() < validator.ActivationEpoch+phase0.Epoch(s.specs.ShardCommitteePeriod) { + return + } + + pendingBalanceToWithdraw := getPendingBalanceToWithdraw(s, *validatorIndex) + + if isFullExitRequest { + // Only exit if there are no pending partial withdrawals queued. + if pendingBalanceToWithdraw == 0 { + initiateValidatorExit(s, *validatorIndex) + } + return + } + + // Partial withdrawal: only allowed for compounding (0x02) validators with + // effective balance ≥ MIN_ACTIVATION_BALANCE and *actual* balance exceeding + // MIN_ACTIVATION_BALANCE + already-pending withdrawals. + if !hasCompoundingWithdrawalCredential(validator) { + return + } + hasSufficientEffectiveBalance := validator.EffectiveBalance >= phase0.Gwei(s.specs.MinActivationBalance) + hasExcessBalance := s.Balances[*validatorIndex] > phase0.Gwei(s.specs.MinActivationBalance)+pendingBalanceToWithdraw + if !hasSufficientEffectiveBalance || !hasExcessBalance { + return + } + + // Withdraw at most the excess (balance - MIN - pending), capped by request.Amount. + maxWithdrawable := s.Balances[*validatorIndex] - phase0.Gwei(s.specs.MinActivationBalance) - pendingBalanceToWithdraw + toWithdraw := phase0.Gwei(request.Amount) + if maxWithdrawable < toWithdraw { + toWithdraw = maxWithdrawable + } + + exitQueueEpoch := computeExitEpochAndUpdateChurn(s, toWithdraw) + withdrawableEpoch := exitQueueEpoch + phase0.Epoch(s.specs.MinValidatorWithdrawbilityDelay) + + s.PendingPartialWithdrawals = append(s.PendingPartialWithdrawals, &electra.PendingPartialWithdrawal{ + ValidatorIndex: *validatorIndex, + Amount: toWithdraw, + WithdrawableEpoch: withdrawableEpoch, + }) +} + +// blsG2PointAtInfinity is the canonical compressed encoding of the G2 point at +// infinity (0xc0 followed by 95 zero bytes). Used as a placeholder signature +// for synthetic pending deposits created via queueExcessActiveBalance. +var blsG2PointAtInfinity = func() phase0.BLSSignature { + var sig phase0.BLSSignature + sig[0] = 0xc0 + return sig +}() + +// switchToCompoundingValidator switches a validator's withdrawal credentials to +// compounding (0x02) and moves any balance above MIN_ACTIVATION_BALANCE into the +// pending_deposits queue. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-switch_to_compounding_validator +func switchToCompoundingValidator(s *stateAccessor, index phase0.ValidatorIndex) { + validator := s.Validators[index] + validator.WithdrawalCredentials[0] = 0x02 + queueExcessActiveBalance(s, index) +} + +// queueExcessActiveBalance moves any balance above MIN_ACTIVATION_BALANCE from +// the validator's balance into a synthetic pending deposit. Used when a +// validator switches to compounding credentials. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-queue_excess_active_balance +func queueExcessActiveBalance(s *stateAccessor, index phase0.ValidatorIndex) { + balance := s.Balances[index] + minActivation := phase0.Gwei(s.specs.MinActivationBalance) + if balance <= minActivation { + return + } + excess := balance - minActivation + s.Balances[index] = minActivation + + validator := s.Validators[index] + s.PendingDeposits = append(s.PendingDeposits, &electra.PendingDeposit{ + Pubkey: validator.PublicKey, + WithdrawalCredentials: append([]byte(nil), validator.WithdrawalCredentials...), + Amount: excess, + Signature: blsG2PointAtInfinity, + Slot: 0, // GENESIS_SLOT, distinguishes from real deposits + }) +} + +// processConsolidationRequest processes an EL consolidation request. +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_consolidation_request +func processConsolidationRequest(s *stateAccessor, request *electra.ConsolidationRequest) { + if request == nil { + return + } + + sourceIndex := findValidatorByPubkey(s, request.SourcePubkey) + targetIndex := findValidatorByPubkey(s, request.TargetPubkey) + if sourceIndex == nil || targetIndex == nil { + return + } + + sourceValidator := s.Validators[*sourceIndex] + targetValidator := s.Validators[*targetIndex] + + // Validate source credentials + if len(sourceValidator.WithdrawalCredentials) == 0 || sourceValidator.WithdrawalCredentials[0] == 0x00 { + return + } + if !bytes.Equal(sourceValidator.WithdrawalCredentials[12:], request.SourceAddress[:]) { + return + } + + // Self-consolidation: switch source to compounding credentials. + // Per the spec, this also moves any excess balance (above 32 ETH) into the + // pending_deposits queue via switch_to_compounding_validator. + // https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_consolidation_request + if *sourceIndex == *targetIndex { + if sourceValidator.WithdrawalCredentials[0] == 0x01 { + switchToCompoundingValidator(s, *sourceIndex) + } + return + } + + // Check queue limit + if uint64(len(s.PendingConsolidations)) >= s.specs.PendingConsolidationsLimit { + return + } + + // Check target has compounding credentials + if targetValidator.WithdrawalCredentials[0] != 0x02 { + return + } + + // Check both active and not exiting + epoch := s.currentEpoch() + if !isActiveValidator(sourceValidator, epoch) || sourceValidator.ExitEpoch != FarFutureEpoch { + return + } + if !isActiveValidator(targetValidator, epoch) || targetValidator.ExitEpoch != FarFutureEpoch { + return + } + + // Check source age + if epoch < sourceValidator.ActivationEpoch+phase0.Epoch(s.specs.ShardCommitteePeriod) { + return + } + + // Check no pending partial withdrawals for source + for _, pw := range s.PendingPartialWithdrawals { + if pw.ValidatorIndex == *sourceIndex { + return + } + } + + s.PendingConsolidations = append(s.PendingConsolidations, &electra.PendingConsolidation{ + SourceIndex: *sourceIndex, + TargetIndex: *targetIndex, + }) + + // Initiate exit for the source + initiateValidatorExit(s, *sourceIndex) +} + +// processExecutionPayloadBid records the builder's bid in builder_pending_payments. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-process_execution_payload_bid +func processExecutionPayloadBid(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + if s.version < spec.DataVersionGloas { + return + } + if block.Gloas == nil || block.Gloas.Message == nil || block.Gloas.Message.Body == nil { + return + } + + signedBid := block.Gloas.Message.Body.SignedExecutionPayloadBid + if signedBid == nil || signedBid.Message == nil { + return + } + + bid := signedBid.Message + amount := bid.Value + + // Record the pending payment if there is some payment + if amount > 0 { + slotsPerEpoch := s.specs.SlotsPerEpoch + paymentIdx := slotsPerEpoch + uint64(s.Slot)%slotsPerEpoch + if paymentIdx < uint64(len(s.BuilderPendingPayments)) { + s.BuilderPendingPayments[paymentIdx] = &gloas.BuilderPendingPayment{ + Weight: 0, + Withdrawal: &gloas.BuilderPendingWithdrawal{ + FeeRecipient: bid.FeeRecipient, + Amount: amount, + BuilderIndex: bid.BuilderIndex, + }, + } + } + } + + // Cache the signed execution payload bid (always, regardless of amount) + s.LatestExecutionPayloadBid = bid +} + +// processSyncAggregate processes the sync committee aggregate. +// https://github.com/ethereum/consensus-specs/blob/master/specs/altair/beacon-chain.md#sync-aggregate-processing +func processSyncAggregate(s *stateAccessor, block *spec.VersionedSignedBeaconBlock) { + syncAggregate, err := block.SyncAggregate() + if err != nil || syncAggregate == nil { + return + } + + committeeSize := s.specs.SyncCommitteeSize + if committeeSize == 0 || s.CurrentSyncCommittee == nil { + return + } + + totalActiveBalance := s.getTotalActiveBalance() + totalActiveIncrements := uint64(totalActiveBalance) / s.specs.EffectiveBalanceIncrement + totalBaseRewards := totalActiveIncrements * uint64(s.getBaseRewardPerIncrement()) + maxParticipantRewards := totalBaseRewards * SyncRewardWeight / WeightDenominator / s.specs.SlotsPerEpoch + participantReward := phase0.Gwei(maxParticipantRewards / committeeSize) + proposerReward := participantReward * ProposerWeight / (WeightDenominator - ProposerWeight) + + // Build pubkey → validator index map for the sync committee + syncCommitteePubkeys := s.CurrentSyncCommittee.Pubkeys + proposerIndex := s.getProposerIndex() + + for i := uint64(0); i < committeeSize && i < uint64(len(syncCommitteePubkeys)); i++ { + validatorIndex := findValidatorByPubkey(s, syncCommitteePubkeys[i]) + if validatorIndex == nil { + continue + } + + byteIdx := i / 8 + bitIdx := i % 8 + if byteIdx < uint64(len(syncAggregate.SyncCommitteeBits)) && + syncAggregate.SyncCommitteeBits[byteIdx]&(1< 0 { + penalty := validator.EffectiveBalance / phase0.Gwei(minPenaltyQuotient) + s.decreaseBalance(index, penalty) + } + + // Proposer + whistleblower reward + proposerIndex := s.getProposerIndex() + whistleblowerRewardQuotient := s.specs.WhistleblowerRewardQuotientElectra + if whistleblowerRewardQuotient == 0 { + whistleblowerRewardQuotient = s.specs.WhitelistRewardQuotient + } + if whistleblowerRewardQuotient > 0 { + whistleblowerReward := validator.EffectiveBalance / phase0.Gwei(whistleblowerRewardQuotient) + proposerReward := whistleblowerReward * ProposerWeight / WeightDenominator + s.increaseBalance(proposerIndex, proposerReward) + } +} + +// getProposerIndex returns the proposer for the current slot from the lookahead. +// Modified in Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/beacon-chain.md#modified-get_beacon_proposer_index +func (s *stateAccessor) getProposerIndex() phase0.ValidatorIndex { + if len(s.ProposerLookahead) > 0 { + idx := uint64(s.Slot) % s.specs.SlotsPerEpoch + return s.ProposerLookahead[idx] + } + // Fallback: compute directly + activeIndices := s.getActiveValidatorIndices(s.currentEpoch()) + return computeProposerIndex(s, activeIndices, s.currentEpoch(), s.Slot) +} + +// getBlockRootAtSlot returns the block root at a specific slot. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#get_block_root_at_slot +func getBlockRootAtSlot(s *stateAccessor, slot phase0.Slot) phase0.Root { + return s.BlockRoots[uint64(slot)%s.specs.SlotsPerHistoricalRoot] +} + +// isAttestationSameSlot checks if the attestation is for the block at the attestation slot. +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-is_attestation_same_slot +func isAttestationSameSlot(s *stateAccessor, data *phase0.AttestationData) bool { + if data.Slot == 0 { + return true + } + blockRoot := data.BeaconBlockRoot + slotBlockRoot := getBlockRootAtSlot(s, data.Slot) + prevBlockRoot := getBlockRootAtSlot(s, data.Slot-1) + return blockRoot == slotBlockRoot && blockRoot != prevBlockRoot +} + +// findValidatorByPubkey looks up a validator index by BLS public key. +// Uses a cached map for O(1) lookups after first call. +func findValidatorByPubkey(s *stateAccessor, pubkey phase0.BLSPubKey) *phase0.ValidatorIndex { + if s.caches.pubkeyCache == nil { + epoch := s.currentEpoch() + + // Include active validators AND recently-exited validators that could + // still be serving on a sync committee. Sync committees are recomputed + // every EPOCHS_PER_SYNC_COMMITTEE_PERIOD epochs; a committee chosen at + // the start of the current period serves for its full duration, so we + // need validators that were active at any point in the last 2 periods. + syncPeriod := phase0.Epoch(s.specs.EpochsPerSyncCommitteePeriod) + cutoff := phase0.Epoch(0) + if epoch > 2*syncPeriod { + cutoff = epoch - 2*syncPeriod + } + + s.caches.pubkeyCache = make(map[phase0.BLSPubKey]phase0.ValidatorIndex, len(s.Validators)) + for i, v := range s.Validators { + // Active now, or exited recently enough to still be on a sync committee. + if isActiveValidator(v, epoch) || v.ExitEpoch >= cutoff { + s.caches.pubkeyCache[v.PublicKey] = phase0.ValidatorIndex(i) + } + } + } + if idx, ok := s.caches.pubkeyCache[pubkey]; ok { + return &idx + } + return nil +} + +func maxEpoch(a, b phase0.Epoch) phase0.Epoch { + if a > b { + return a + } + return b +} + +// getSlashingAttestations extracts both attestations from an attester slashing. +func getSlashingAttestations(slashing any) (att1, att2 interface { + AttestingIndices() ([]uint64, error) +}) { + type attestationPair interface { + Attestation1() (interface{ AttestingIndices() ([]uint64, error) }, error) + Attestation2() (interface{ AttestingIndices() ([]uint64, error) }, error) + } + + if s, ok := slashing.(attestationPair); ok { + a1, _ := s.Attestation1() + a2, _ := s.Attestation2() + return a1, a2 + } + return nil, nil +} + +// Unused import guards. +var ( + _ = (*electra.WithdrawalRequest)(nil) + _ = (*electra.ConsolidationRequest)(nil) + _ = slices.Contains[[]int] +) diff --git a/indexer/beacon/statetransition/pending.go b/indexer/beacon/statetransition/pending.go new file mode 100644 index 000000000..e3f3a37a2 --- /dev/null +++ b/indexer/beacon/statetransition/pending.go @@ -0,0 +1,191 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processPendingDeposits implements the Electra+ version of process_pending_deposits. +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_pending_deposits +func processPendingDeposits(s *stateAccessor) error { + nextEpoch := s.currentEpoch() + 1 + availableForProcessing := s.DepositBalanceToConsume + s.getActivationExitChurnLimit() + processedAmount := phase0.Gwei(0) + nextDepositIndex := uint64(0) + var depositsToPostpone []*electra.PendingDeposit + isChurnLimitReached := false + finalizedSlot := phase0.Slot(uint64(s.FinalizedCheckpoint.Epoch) * s.specs.SlotsPerEpoch) + + // Build pubkey index map for O(1) validator lookups. Without this, the + // nested loop over PendingDeposits × Validators is O(N×M), which on mainnet + // (51K deposits × 2.2M validators) would take hours. + pubkeyIndex := make(map[phase0.BLSPubKey]phase0.ValidatorIndex, len(s.Validators)) + for i, v := range s.Validators { + pubkeyIndex[v.PublicKey] = phase0.ValidatorIndex(i) + } + + for _, deposit := range s.PendingDeposits { + // Do not process deposit requests if Eth1 bridge deposits are not yet applied. + if deposit.Slot > 0 && s.Eth1DepositIndex < s.DepositRequestsStartIndex { + break + } + + // Check if deposit has been finalized. + if deposit.Slot > finalizedSlot { + break + } + + // Check the per-epoch processing limit. + if nextDepositIndex >= s.specs.MaxPendingDepositsPerEpoch { + break + } + + // Read validator state. + isValidatorExited := false + isValidatorWithdrawn := false + if existingIdx, ok := pubkeyIndex[deposit.Pubkey]; ok { + v := s.Validators[existingIdx] + isValidatorExited = v.ExitEpoch < FarFutureEpoch + isValidatorWithdrawn = v.WithdrawableEpoch < nextEpoch + } + + switch { + case isValidatorWithdrawn: + // Deposited balance will never become active. Apply without consuming churn. + applyPendingDeposit(s, deposit, pubkeyIndex) + + case isValidatorExited: + // Validator is exiting; postpone until after withdrawable epoch. + depositsToPostpone = append(depositsToPostpone, deposit) + + default: + // Check if deposit fits in the churn; if not, stop processing this epoch. + if processedAmount+phase0.Gwei(deposit.Amount) > availableForProcessing { + isChurnLimitReached = true + break + } + processedAmount += phase0.Gwei(deposit.Amount) + applyPendingDeposit(s, deposit, pubkeyIndex) + } + + if isChurnLimitReached { + break + } + + // Regardless of how the deposit was handled, advance the queue cursor. + nextDepositIndex++ + } + + // state.pending_deposits = state.pending_deposits[next_deposit_index:] + deposits_to_postpone + remaining := make([]*electra.PendingDeposit, 0, len(s.PendingDeposits)-int(nextDepositIndex)+len(depositsToPostpone)) + remaining = append(remaining, s.PendingDeposits[nextDepositIndex:]...) + remaining = append(remaining, depositsToPostpone...) + s.PendingDeposits = remaining + + // Accumulate churn only if the churn limit has been hit. + if isChurnLimitReached { + s.DepositBalanceToConsume = availableForProcessing - processedAmount + } else { + s.DepositBalanceToConsume = 0 + } + + return nil +} + +// applyPendingDeposit implements apply_pending_deposit. If the validator does +// not exist, a new one is added to the registry. Otherwise the deposit amount +// is added to the existing validator's balance. +// +// Signature verification is skipped — we trust blocks fetched from a verified +// beacon node, so the deposit is always applied. +// +// pubkeyIndex is the local pubkey→index map maintained by processPendingDeposits; +// when a new validator is appended, the map is updated so subsequent deposits +// in the same loop find the new validator. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-apply_pending_deposit +func applyPendingDeposit(s *stateAccessor, deposit *electra.PendingDeposit, pubkeyIndex map[phase0.BLSPubKey]phase0.ValidatorIndex) { + if existingIdx, ok := pubkeyIndex[deposit.Pubkey]; ok { + s.increaseBalance(existingIdx, phase0.Gwei(deposit.Amount)) + return + } + // New validator: add to registry. + addValidatorToRegistry(s, deposit.Pubkey, deposit.WithdrawalCredentials, phase0.Gwei(deposit.Amount)) + pubkeyIndex[deposit.Pubkey] = phase0.ValidatorIndex(len(s.Validators) - 1) +} + +// addValidatorToRegistry implements the Electra modified add_validator_to_registry. +// Constructs a validator via get_validator_from_deposit (which sets effective_balance +// based on the deposit amount and the credential type) and appends it to the +// registry along with matching balance/participation/inactivity entries. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-add_validator_to_registry +func addValidatorToRegistry(s *stateAccessor, pubkey phase0.BLSPubKey, withdrawalCredentials []byte, amount phase0.Gwei) { + v := getValidatorFromDeposit(s, pubkey, withdrawalCredentials, amount) + s.Validators = append(s.Validators, v) + s.Balances = append(s.Balances, amount) + s.PreviousEpochParticipation = append(s.PreviousEpochParticipation, 0) + s.CurrentEpochParticipation = append(s.CurrentEpochParticipation, 0) + s.InactivityScores = append(s.InactivityScores, 0) +} + +// getValidatorFromDeposit implements the Electra modified get_validator_from_deposit. +// Returns a validator with effective_balance computed as +// min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance). +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-get_validator_from_deposit +func getValidatorFromDeposit(s *stateAccessor, pubkey phase0.BLSPubKey, withdrawalCredentials []byte, amount phase0.Gwei) *phase0.Validator { + credentials := append([]byte(nil), withdrawalCredentials...) + v := &phase0.Validator{ + PublicKey: pubkey, + WithdrawalCredentials: credentials, + EffectiveBalance: 0, + Slashed: false, + ActivationEligibilityEpoch: FarFutureEpoch, + ActivationEpoch: FarFutureEpoch, + ExitEpoch: FarFutureEpoch, + WithdrawableEpoch: FarFutureEpoch, + } + maxEB := s.getMaxEffectiveBalance(v) + increment := phase0.Gwei(s.specs.EffectiveBalanceIncrement) + rounded := amount - amount%increment + if rounded < maxEB { + v.EffectiveBalance = rounded + } else { + v.EffectiveBalance = maxEB + } + return v +} + +// processPendingConsolidations implements the Electra+ version of process_pending_consolidations. +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-process_pending_consolidations +func processPendingConsolidations(s *stateAccessor) { + nextEpoch := s.currentEpoch() + 1 + nextPendingConsolidation := 0 + + for _, consolidation := range s.PendingConsolidations { + sourceValidator := s.Validators[consolidation.SourceIndex] + // Slashed source: skip (count as processed) and continue. + if sourceValidator.Slashed { + nextPendingConsolidation++ + continue + } + // Source not yet withdrawable: stop processing (queue is ordered). + if sourceValidator.WithdrawableEpoch > nextEpoch { + break + } + + // Move only the *active* balance — bounded by the effective balance. + // Any balance above effective_balance becomes a normal withdrawal later. + sourceEffectiveBalance := s.Balances[consolidation.SourceIndex] + if sourceValidator.EffectiveBalance < sourceEffectiveBalance { + sourceEffectiveBalance = sourceValidator.EffectiveBalance + } + + s.decreaseBalance(consolidation.SourceIndex, sourceEffectiveBalance) + s.increaseBalance(consolidation.TargetIndex, sourceEffectiveBalance) + nextPendingConsolidation++ + } + + s.PendingConsolidations = s.PendingConsolidations[nextPendingConsolidation:] +} diff --git a/indexer/beacon/statetransition/registry.go b/indexer/beacon/statetransition/registry.go new file mode 100644 index 000000000..28fe06f67 --- /dev/null +++ b/indexer/beacon/statetransition/registry.go @@ -0,0 +1,93 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processRegistryUpdates implements the Electra+ version of process_registry_updates. +// Single loop with if/elif/elif chain — Electra removed activation churn here and +// moved it to process_pending_deposits, so every eligible validator activates this +// epoch (no churn limit, no sorting). +// +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-process_registry_updates +func processRegistryUpdates(s *stateAccessor) error { + currentEpoch := s.currentEpoch() + activationEpoch := computeActivationExitEpoch(currentEpoch, s.specs) + + for i, v := range s.Validators { + switch { + case isEligibleForActivationQueue(v, s.specs): + v.ActivationEligibilityEpoch = currentEpoch + 1 + case isActiveValidator(v, currentEpoch) && v.EffectiveBalance <= phase0.Gwei(s.specs.EjectionBalance): + initiateValidatorExit(s, phase0.ValidatorIndex(i)) + case isEligibleForActivation(v, s.FinalizedCheckpoint.Epoch): + v.ActivationEpoch = activationEpoch + } + } + + return nil +} + +// initiateValidatorExit queues a validator for exit, computing the exit epoch +// via compute_exit_epoch_and_update_churn (which handles multi-epoch overflow). +// +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-initiate_validator_exit +func initiateValidatorExit(s *stateAccessor, index phase0.ValidatorIndex) { + v := s.Validators[index] + if v.ExitEpoch != FarFutureEpoch { + return // already exiting + } + + // Spec uses validator.effective_balance, NOT max effective balance. + exitQueueEpoch := computeExitEpochAndUpdateChurn(s, v.EffectiveBalance) + + v.ExitEpoch = exitQueueEpoch + v.WithdrawableEpoch = exitQueueEpoch + phase0.Epoch(s.specs.MinValidatorWithdrawbilityDelay) +} + +// computeExitEpochAndUpdateChurn returns the earliest epoch at which an exit of +// the given balance can be processed, while updating state.earliest_exit_epoch +// and state.exit_balance_to_consume in place. Handles multi-epoch overflow. +// +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-compute_exit_epoch_and_update_churn +func computeExitEpochAndUpdateChurn(s *stateAccessor, exitBalance phase0.Gwei) phase0.Epoch { + earliestExitEpoch := computeActivationExitEpoch(s.currentEpoch(), s.specs) + if s.EarliestExitEpoch > earliestExitEpoch { + earliestExitEpoch = s.EarliestExitEpoch + } + perEpochChurn := s.getActivationExitChurnLimit() + + var exitBalanceToConsume phase0.Gwei + if s.EarliestExitEpoch < earliestExitEpoch { + // New epoch for exits — refill the budget. + exitBalanceToConsume = perEpochChurn + } else { + exitBalanceToConsume = s.ExitBalanceToConsume + } + + // If exit doesn't fit, push it forward by enough epochs to fit the balance. + if exitBalance > exitBalanceToConsume { + balanceToProcess := exitBalance - exitBalanceToConsume + additionalEpochs := (balanceToProcess-1)/perEpochChurn + 1 + earliestExitEpoch += phase0.Epoch(additionalEpochs) + exitBalanceToConsume += phase0.Gwei(additionalEpochs) * perEpochChurn + } + + s.ExitBalanceToConsume = exitBalanceToConsume - exitBalance + s.EarliestExitEpoch = earliestExitEpoch + return s.EarliestExitEpoch +} + +// getPendingBalanceToWithdraw returns the sum of pending partial withdrawal +// amounts for the given validator. +// +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw +func getPendingBalanceToWithdraw(s *stateAccessor, validatorIndex phase0.ValidatorIndex) phase0.Gwei { + total := phase0.Gwei(0) + for _, w := range s.PendingPartialWithdrawals { + if w.ValidatorIndex == validatorIndex { + total += w.Amount + } + } + return total +} diff --git a/indexer/beacon/statetransition/resets.go b/indexer/beacon/statetransition/resets.go new file mode 100644 index 000000000..e50bd024b --- /dev/null +++ b/indexer/beacon/statetransition/resets.go @@ -0,0 +1,61 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/altair" + "github.com/ethpandaops/go-eth2-client/spec/capella" +) + +// processEth1DataReset resets the ETH1 data votes at the start of a new voting period. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#eth1-data-votes-updates +func processEth1DataReset(s *stateAccessor) { + nextEpoch := s.currentEpoch() + 1 + if uint64(nextEpoch)%s.specs.EpochsPerEth1VotingPeriod == 0 { + s.ETH1DataVotes = s.ETH1DataVotes[:0] + } +} + +// processSlashingsReset rotates the slashings vector. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#slashings-balances-updates +func processSlashingsReset(s *stateAccessor) { + nextEpoch := s.currentEpoch() + 1 + idx := uint64(nextEpoch) % s.specs.EpochsPerSlashingVector + s.Slashings[idx] = 0 +} + +// processRandaoMixesReset copies the current epoch mix to the next epoch slot. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#randao-mixes-updates +func processRandaoMixesReset(s *stateAccessor) { + currentEpoch := s.currentEpoch() + nextEpoch := currentEpoch + 1 + srcIdx := uint64(currentEpoch) % s.specs.EpochsPerHistoricalVector + dstIdx := uint64(nextEpoch) % s.specs.EpochsPerHistoricalVector + s.RANDAOMixes[dstIdx] = s.RANDAOMixes[srcIdx] +} + +// processParticipationFlagUpdates rotates epoch participation. +// New in Altair: https://github.com/ethereum/consensus-specs/blob/master/specs/altair/beacon-chain.md#participation-flags-updates +func processParticipationFlagUpdates(s *stateAccessor) { + s.PreviousEpochParticipation = s.CurrentEpochParticipation + s.CurrentEpochParticipation = make([]altair.ParticipationFlags, len(s.Validators)) +} + +// processHistoricalSummariesUpdate appends a new historical summary at period boundaries. +// Modified in Capella: https://github.com/ethereum/consensus-specs/blob/master/specs/capella/beacon-chain.md#modified-process_historical_summaries_update +func processHistoricalSummariesUpdate(s *stateAccessor) { + nextEpoch := s.currentEpoch() + 1 + epochsPerPeriod := s.specs.SlotsPerHistoricalRoot / s.specs.SlotsPerEpoch + if epochsPerPeriod == 0 { + return + } + if uint64(nextEpoch)%epochsPerPeriod != 0 { + return + } + + blockSummary := hashTreeRoot(s.BlockRoots) + stateSummary := hashTreeRoot(s.StateRoots) + + s.HistoricalSummaries = append(s.HistoricalSummaries, &capella.HistoricalSummary{ + BlockSummaryRoot: blockSummary, + StateSummaryRoot: stateSummary, + }) +} diff --git a/indexer/beacon/statetransition/rewards.go b/indexer/beacon/statetransition/rewards.go new file mode 100644 index 000000000..e83261c4e --- /dev/null +++ b/indexer/beacon/statetransition/rewards.go @@ -0,0 +1,149 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processInactivityUpdates implements process_inactivity_updates (Altair+). +// Skips the genesis epoch — score updates are based on the previous epoch's +// participation, which doesn't exist at epoch 0. +// https://github.com/ethereum/consensus-specs/blob/master/specs/altair/beacon-chain.md#inactivity-scores +func processInactivityUpdates(s *stateAccessor) error { + currentEpoch := s.currentEpoch() + if currentEpoch == 0 { + return nil + } + + previousEpoch := s.previousEpoch() + isInactivityLeak := isInInactivityLeak(s) + + // Build set of timely target participants for previous epoch + targetParticipants := make(map[phase0.ValidatorIndex]bool) + for _, idx := range s.getUnslashedParticipatingIndices(TimelyTargetFlagIndex, previousEpoch) { + targetParticipants[idx] = true + } + + // Iterate over eligible validator indices: active in previous epoch OR (slashed and not yet withdrawable) + for i, v := range s.Validators { + if !isActiveValidator(v, previousEpoch) && !(v.Slashed && previousEpoch+1 < v.WithdrawableEpoch) { + continue + } + + idx := phase0.ValidatorIndex(i) + if targetParticipants[idx] { + // Decrease inactivity score by min(1, score) + if s.InactivityScores[i] >= 1 { + s.InactivityScores[i] -= 1 + } + } else { + // Increase inactivity score by INACTIVITY_SCORE_BIAS + s.InactivityScores[i] += s.specs.InactivityScoreBias + } + + if !isInactivityLeak { + // Not in inactivity leak: decrease score by min(INACTIVITY_SCORE_RECOVERY_RATE, score) + recovery := s.specs.InactivityScoreRecoveryRate + if s.InactivityScores[i] >= recovery { + s.InactivityScores[i] -= recovery + } else { + s.InactivityScores[i] = 0 + } + } + } + + return nil +} + +// processRewardsAndPenalties implements the Altair+ version of process_rewards_and_penalties. +// Skips the genesis epoch — rewards are for work done in the previous epoch. +// Modified in Altair: https://github.com/ethereum/consensus-specs/blob/master/specs/altair/beacon-chain.md#modified-get_flag_index_deltas +func processRewardsAndPenalties(s *stateAccessor) error { + currentEpoch := s.currentEpoch() + if currentEpoch == 0 { + return nil + } + + previousEpoch := s.previousEpoch() + totalActiveBalance := s.getTotalActiveBalance() + isInactivityLeak := isInInactivityLeak(s) + + // Precompute participating increments for each flag (matching spec: get_flag_index_deltas) + type flagData struct { + participatingIncrements uint64 + participants map[phase0.ValidatorIndex]bool + } + + activeIncrements := uint64(totalActiveBalance) / s.specs.EffectiveBalanceIncrement + + flags := make([]flagData, ParticipationFlagCount) + for fi := 0; fi < ParticipationFlagCount; fi++ { + indices := s.getUnslashedParticipatingIndices(fi, previousEpoch) + balance := phase0.Gwei(0) + pMap := make(map[phase0.ValidatorIndex]bool, len(indices)) + for _, idx := range indices { + pMap[idx] = true + balance += s.Validators[idx].EffectiveBalance + } + if balance < phase0.Gwei(s.specs.EffectiveBalanceIncrement) { + balance = phase0.Gwei(s.specs.EffectiveBalanceIncrement) + } + flags[fi] = flagData{ + participatingIncrements: uint64(balance) / s.specs.EffectiveBalanceIncrement, + participants: pMap, + } + } + + for i, v := range s.Validators { + // is_eligible_validator: active in previous epoch OR (slashed and not yet withdrawable) + if !isActiveValidator(v, previousEpoch) && !(v.Slashed && previousEpoch+1 < v.WithdrawableEpoch) { + continue + } + + idx := phase0.ValidatorIndex(i) + baseReward := uint64(s.getBaseReward(idx)) + + for fi := 0; fi < ParticipationFlagCount; fi++ { + weight := ParticipationFlagWeights[fi] + + if flags[fi].participants[idx] && !v.Slashed { + if !isInactivityLeak { + // Reward (spec: rewards[index] += base_reward * weight * participating_increments / (active_increments * WEIGHT_DENOMINATOR)) + rewardNumerator := baseReward * weight * flags[fi].participatingIncrements + reward := rewardNumerator / (activeIncrements * WeightDenominator) + s.increaseBalance(idx, phase0.Gwei(reward)) + } + } else if fi != TimelyHeadFlagIndex { + // Penalty (spec: skip TIMELY_HEAD_FLAG_INDEX for penalties) + penalty := baseReward * weight / WeightDenominator + s.decreaseBalance(idx, phase0.Gwei(penalty)) + } + } + + // Inactivity penalty (spec: get_inactivity_penalty_deltas) + // penalty = effective_balance * inactivity_score / (INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_BELLATRIX) + if !flags[TimelyTargetFlagIndex].participants[idx] || v.Slashed { + penaltyNumerator := uint64(v.EffectiveBalance) * s.InactivityScores[i] + penaltyDenominator := s.specs.InactivityScoreBias * s.getInactivityPenaltyQuotient() + if penaltyDenominator > 0 { + penalty := penaltyNumerator / penaltyDenominator + s.decreaseBalance(idx, phase0.Gwei(penalty)) + } + } + } + + return nil +} + +// isInInactivityLeak checks if the chain is in an inactivity leak. +func isInInactivityLeak(s *stateAccessor) bool { + return s.previousEpoch()-s.FinalizedCheckpoint.Epoch > phase0.Epoch(s.specs.MinEpochsToInactivityPenalty) +} + +// getInactivityPenaltyQuotient returns the inactivity penalty quotient for the current fork. +func (s *stateAccessor) getInactivityPenaltyQuotient() uint64 { + // Electra/Fulu/Gloas use Bellatrix quotient + if s.specs.InactivityPenaltyQuotientBellatrix > 0 { + return s.specs.InactivityPenaltyQuotientBellatrix + } + return s.specs.InactivityPenaltyQuotient +} diff --git a/indexer/beacon/statetransition/slashings.go b/indexer/beacon/statetransition/slashings.go new file mode 100644 index 000000000..9e4bcd603 --- /dev/null +++ b/indexer/beacon/statetransition/slashings.go @@ -0,0 +1,50 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// processSlashings implements the Electra+ version of process_slashings. +// +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-process_slashings +func processSlashings(s *stateAccessor) error { + currentEpoch := s.currentEpoch() + totalBalance := s.getTotalActiveBalance() + + totalSlashings := phase0.Gwei(0) + for _, slashing := range s.Slashings { + totalSlashings += slashing + } + + adjustedTotalSlashingBalance := totalSlashings * phase0.Gwei(s.specs.ProportionalSlashingMultiplierBellatrix) + if adjustedTotalSlashingBalance > totalBalance { + adjustedTotalSlashingBalance = totalBalance + } + + // Spec computes penalty_per_effective_balance_increment ONCE outside the loop: + // penalty_per_effective_balance_increment = adjusted // (total // increment) + // then per validator: + // penalty = penalty_per_effective_balance_increment * (effective_balance // increment) + // Doing the divisions in a different order loses precision for small slashings. + increment := phase0.Gwei(s.specs.EffectiveBalanceIncrement) + if increment == 0 || totalBalance < increment { + return nil + } + penaltyPerIncrement := adjustedTotalSlashingBalance / (totalBalance / increment) + + halfSlashingsVector := phase0.Epoch(s.specs.EpochsPerSlashingVector / 2) + for i, v := range s.Validators { + if !v.Slashed { + continue + } + if currentEpoch+halfSlashingsVector != v.WithdrawableEpoch { + continue + } + + effectiveBalanceIncrements := v.EffectiveBalance / increment + penalty := penaltyPerIncrement * effectiveBalanceIncrements + s.decreaseBalance(phase0.ValidatorIndex(i), penalty) + } + + return nil +} diff --git a/indexer/beacon/statetransition/state.go b/indexer/beacon/statetransition/state.go new file mode 100644 index 000000000..63900cd4d --- /dev/null +++ b/indexer/beacon/statetransition/state.go @@ -0,0 +1,605 @@ +package statetransition + +import ( + "fmt" + "math" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/altair" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/deneb" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + dynssz "github.com/pk910/dynamic-ssz" +) + +// stateAccessor provides a unified interface to access and mutate beacon state fields +// across Fulu and Gloas versions. All fields are pointers/slices into the underlying +// VersionedBeaconState, so mutations are applied in-place. +type stateAccessor struct { + version spec.DataVersion + specs *consensus.ChainSpec + dynSsz *dynssz.DynSsz + + // Common fields shared by Fulu and Gloas (pointers into the underlying state). + Slot phase0.Slot + Validators []*phase0.Validator + Balances []phase0.Gwei + RANDAOMixes []phase0.Root + Slashings []phase0.Gwei + PreviousEpochParticipation []altair.ParticipationFlags + CurrentEpochParticipation []altair.ParticipationFlags + JustificationBits []byte // bitfield.Bitvector4 is []byte + PreviousJustifiedCheckpoint *phase0.Checkpoint + CurrentJustifiedCheckpoint *phase0.Checkpoint + FinalizedCheckpoint *phase0.Checkpoint + InactivityScores []uint64 + CurrentSyncCommittee *altair.SyncCommittee + NextSyncCommittee *altair.SyncCommittee + ETH1DataVotes []*phase0.ETH1Data + BlockRoots []phase0.Root + StateRoots []phase0.Root + HistoricalSummaries []*capella.HistoricalSummary + Eth1DepositIndex uint64 + DepositRequestsStartIndex uint64 + DepositBalanceToConsume phase0.Gwei + ExitBalanceToConsume phase0.Gwei + EarliestExitEpoch phase0.Epoch + ConsolidationBalanceToConsume phase0.Gwei + EarliestConsolidationEpoch phase0.Epoch + PendingDeposits []*electra.PendingDeposit + PendingPartialWithdrawals []*electra.PendingPartialWithdrawal + PendingConsolidations []*electra.PendingConsolidation + ProposerLookahead []phase0.ValidatorIndex + + // Fields accessed via rawState that should be unified in the accessor + LatestBlockHeader *phase0.BeaconBlockHeader + ETH1Data *phase0.ETH1Data + NextWithdrawalIndex capella.WithdrawalIndex + NextWithdrawalValidatorIndex phase0.ValidatorIndex + + // Fulu-only fields (nil for Gloas) + LatestExecutionPayloadHeader *deneb.ExecutionPayloadHeader + + // Gloas-only fields (nil/zero for Fulu) + BuilderPendingPayments []*gloas.BuilderPendingPayment + BuilderPendingWithdrawals []*gloas.BuilderPendingWithdrawal + Builders []*gloas.Builder + NextWithdrawalBuilderIndex gloas.BuilderIndex + LatestExecutionPayloadBid *gloas.ExecutionPayloadBid + LatestBlockHash phase0.Hash32 + ExecutionPayloadAvailability []byte + PayloadExpectedWithdrawals []*capella.Withdrawal + PTCWindow [][]phase0.ValidatorIndex + + // Caches (lazily populated, not written back). + // Shared via StateTransition to persist across multiple ApplyBlock calls. + caches *stateTransitionCaches + + // Back-references for writing mutated slices/values back to the underlying state. + rawState *spec.VersionedBeaconState +} + +// stateTransitionCaches holds lazily-populated caches that persist across +// multiple ApplyBlock / epoch transition calls on the same state. +// +// Caches that depend on the active validator set (totalActiveBal, +// baseRewardPerIncr, activeIndices) are keyed by epoch and auto-invalidate +// when crossing an epoch boundary. Effective-balance-dependent caches must +// also be cleared explicitly via invalidateBalanceCaches after the epoch +// transition runs processEffectiveBalanceUpdates / processRegistryUpdates / +// processPendingDeposits, since those mutate effective balances within an +// epoch without changing the cache key. +type stateTransitionCaches struct { + pubkeyCache map[phase0.BLSPubKey]phase0.ValidatorIndex + activeIndicesEpoch phase0.Epoch + activeIndices []phase0.ValidatorIndex + totalActiveBalEpoch phase0.Epoch + totalActiveBalCache *phase0.Gwei + baseRewardPerIncrEpoch phase0.Epoch + baseRewardPerIncrCache *phase0.Gwei + committeeCache *committeeCache +} + +func newStateTransitionCaches() *stateTransitionCaches { + return &stateTransitionCaches{ + committeeCache: newCommitteeCache(), + } +} + +// invalidateBalanceCaches clears caches that depend on validator effective +// balances (must be called after processEffectiveBalanceUpdates). +func (c *stateTransitionCaches) invalidateBalanceCaches() { + c.totalActiveBalCache = nil + c.baseRewardPerIncrCache = nil + c.activeIndices = nil + c.committeeCache = newCommitteeCache() +} + +// newAccessor creates a stateAccessor from the given state, pulling specs, +// dynSsz, and caches from the StateTransition instance. +func (st *StateTransition) newAccessor(state *spec.VersionedBeaconState) (*stateAccessor, error) { + s := &stateAccessor{ + version: state.Version, + specs: st.specs, + dynSsz: st.dynSsz, + rawState: state, + caches: st.caches, + } + + switch state.Version { + case spec.DataVersionFulu: + if state.Fulu == nil { + return nil, fmt.Errorf("nil fulu state") + } + f := state.Fulu + s.Slot = f.Slot + s.Validators = f.Validators + s.Balances = f.Balances + s.RANDAOMixes = f.RANDAOMixes + s.Slashings = f.Slashings + s.PreviousEpochParticipation = f.PreviousEpochParticipation + s.CurrentEpochParticipation = f.CurrentEpochParticipation + s.JustificationBits = f.JustificationBits + s.PreviousJustifiedCheckpoint = f.PreviousJustifiedCheckpoint + s.CurrentJustifiedCheckpoint = f.CurrentJustifiedCheckpoint + s.FinalizedCheckpoint = f.FinalizedCheckpoint + s.InactivityScores = f.InactivityScores + s.CurrentSyncCommittee = f.CurrentSyncCommittee + s.NextSyncCommittee = f.NextSyncCommittee + s.ETH1DataVotes = f.ETH1DataVotes + s.BlockRoots = f.BlockRoots + s.StateRoots = f.StateRoots + s.HistoricalSummaries = f.HistoricalSummaries + s.Eth1DepositIndex = f.ETH1DepositIndex + s.DepositRequestsStartIndex = f.DepositRequestsStartIndex + s.DepositBalanceToConsume = f.DepositBalanceToConsume + s.ExitBalanceToConsume = f.ExitBalanceToConsume + s.EarliestExitEpoch = f.EarliestExitEpoch + s.ConsolidationBalanceToConsume = f.ConsolidationBalanceToConsume + s.EarliestConsolidationEpoch = f.EarliestConsolidationEpoch + s.PendingDeposits = f.PendingDeposits + s.PendingPartialWithdrawals = f.PendingPartialWithdrawals + s.PendingConsolidations = f.PendingConsolidations + s.ProposerLookahead = f.ProposerLookahead + s.LatestBlockHeader = f.LatestBlockHeader + s.ETH1Data = f.ETH1Data + s.NextWithdrawalIndex = f.NextWithdrawalIndex + s.NextWithdrawalValidatorIndex = f.NextWithdrawalValidatorIndex + s.LatestExecutionPayloadHeader = f.LatestExecutionPayloadHeader + case spec.DataVersionGloas: + if state.Gloas == nil { + return nil, fmt.Errorf("nil gloas state") + } + g := state.Gloas + s.Slot = g.Slot + s.Validators = g.Validators + s.Balances = g.Balances + s.RANDAOMixes = g.RANDAOMixes + s.Slashings = g.Slashings + s.PreviousEpochParticipation = g.PreviousEpochParticipation + s.CurrentEpochParticipation = g.CurrentEpochParticipation + s.JustificationBits = g.JustificationBits + s.PreviousJustifiedCheckpoint = g.PreviousJustifiedCheckpoint + s.CurrentJustifiedCheckpoint = g.CurrentJustifiedCheckpoint + s.FinalizedCheckpoint = g.FinalizedCheckpoint + s.InactivityScores = g.InactivityScores + s.CurrentSyncCommittee = g.CurrentSyncCommittee + s.NextSyncCommittee = g.NextSyncCommittee + s.ETH1DataVotes = g.ETH1DataVotes + s.BlockRoots = g.BlockRoots + s.StateRoots = g.StateRoots + s.HistoricalSummaries = g.HistoricalSummaries + s.Eth1DepositIndex = g.ETH1DepositIndex + s.DepositRequestsStartIndex = g.DepositRequestsStartIndex + s.DepositBalanceToConsume = g.DepositBalanceToConsume + s.ExitBalanceToConsume = g.ExitBalanceToConsume + s.EarliestExitEpoch = g.EarliestExitEpoch + s.ConsolidationBalanceToConsume = g.ConsolidationBalanceToConsume + s.EarliestConsolidationEpoch = g.EarliestConsolidationEpoch + s.PendingDeposits = g.PendingDeposits + s.PendingPartialWithdrawals = g.PendingPartialWithdrawals + s.PendingConsolidations = g.PendingConsolidations + s.ProposerLookahead = g.ProposerLookahead + s.BuilderPendingPayments = g.BuilderPendingPayments + s.BuilderPendingWithdrawals = g.BuilderPendingWithdrawals + s.LatestBlockHeader = g.LatestBlockHeader + s.ETH1Data = g.ETH1Data + s.NextWithdrawalIndex = g.NextWithdrawalIndex + s.NextWithdrawalValidatorIndex = g.NextWithdrawalValidatorIndex + s.Builders = g.Builders + s.NextWithdrawalBuilderIndex = g.NextWithdrawalBuilderIndex + s.LatestExecutionPayloadBid = g.LatestExecutionPayloadBid + s.LatestBlockHash = g.LatestBlockHash + s.ExecutionPayloadAvailability = g.ExecutionPayloadAvailability + s.PayloadExpectedWithdrawals = g.PayloadExpectedWithdrawals + s.PTCWindow = g.PTCWindow + default: + return nil, fmt.Errorf("unsupported state version: %v", state.Version) + } + + return s, nil +} + +// writeBack writes mutated slice headers and scalar fields back to the underlying +// VersionedBeaconState. This is needed because Go slice reassignment (e.g. +// s.Balances = newSlice) doesn't update the original struct field. +// Call this after all epoch processing is complete. +func (s *stateAccessor) writeBack() { + switch s.version { + case spec.DataVersionFulu: + f := s.rawState.Fulu + f.Slot = s.Slot + f.Validators = s.Validators + f.Balances = s.Balances + f.RANDAOMixes = s.RANDAOMixes + f.Slashings = s.Slashings + f.PreviousEpochParticipation = s.PreviousEpochParticipation + f.CurrentEpochParticipation = s.CurrentEpochParticipation + f.JustificationBits = s.JustificationBits + f.PreviousJustifiedCheckpoint = s.PreviousJustifiedCheckpoint + f.CurrentJustifiedCheckpoint = s.CurrentJustifiedCheckpoint + f.FinalizedCheckpoint = s.FinalizedCheckpoint + f.InactivityScores = s.InactivityScores + f.CurrentSyncCommittee = s.CurrentSyncCommittee + f.NextSyncCommittee = s.NextSyncCommittee + f.ETH1DataVotes = s.ETH1DataVotes + f.BlockRoots = s.BlockRoots + f.StateRoots = s.StateRoots + f.HistoricalSummaries = s.HistoricalSummaries + f.ETH1DepositIndex = s.Eth1DepositIndex + f.DepositRequestsStartIndex = s.DepositRequestsStartIndex + f.DepositBalanceToConsume = s.DepositBalanceToConsume + f.ExitBalanceToConsume = s.ExitBalanceToConsume + f.EarliestExitEpoch = s.EarliestExitEpoch + f.ConsolidationBalanceToConsume = s.ConsolidationBalanceToConsume + f.EarliestConsolidationEpoch = s.EarliestConsolidationEpoch + f.PendingDeposits = s.PendingDeposits + f.PendingPartialWithdrawals = s.PendingPartialWithdrawals + f.PendingConsolidations = s.PendingConsolidations + f.ProposerLookahead = s.ProposerLookahead + f.LatestBlockHeader = s.LatestBlockHeader + f.ETH1Data = s.ETH1Data + f.NextWithdrawalIndex = s.NextWithdrawalIndex + f.NextWithdrawalValidatorIndex = s.NextWithdrawalValidatorIndex + f.LatestExecutionPayloadHeader = s.LatestExecutionPayloadHeader + case spec.DataVersionGloas: + g := s.rawState.Gloas + g.Slot = s.Slot + g.Validators = s.Validators + g.Balances = s.Balances + g.RANDAOMixes = s.RANDAOMixes + g.Slashings = s.Slashings + g.PreviousEpochParticipation = s.PreviousEpochParticipation + g.CurrentEpochParticipation = s.CurrentEpochParticipation + g.JustificationBits = s.JustificationBits + g.PreviousJustifiedCheckpoint = s.PreviousJustifiedCheckpoint + g.CurrentJustifiedCheckpoint = s.CurrentJustifiedCheckpoint + g.FinalizedCheckpoint = s.FinalizedCheckpoint + g.InactivityScores = s.InactivityScores + g.CurrentSyncCommittee = s.CurrentSyncCommittee + g.NextSyncCommittee = s.NextSyncCommittee + g.ETH1DataVotes = s.ETH1DataVotes + g.BlockRoots = s.BlockRoots + g.StateRoots = s.StateRoots + g.HistoricalSummaries = s.HistoricalSummaries + g.ETH1DepositIndex = s.Eth1DepositIndex + g.DepositRequestsStartIndex = s.DepositRequestsStartIndex + g.DepositBalanceToConsume = s.DepositBalanceToConsume + g.ExitBalanceToConsume = s.ExitBalanceToConsume + g.EarliestExitEpoch = s.EarliestExitEpoch + g.ConsolidationBalanceToConsume = s.ConsolidationBalanceToConsume + g.EarliestConsolidationEpoch = s.EarliestConsolidationEpoch + g.PendingDeposits = s.PendingDeposits + g.PendingPartialWithdrawals = s.PendingPartialWithdrawals + g.PendingConsolidations = s.PendingConsolidations + g.ProposerLookahead = s.ProposerLookahead + g.BuilderPendingPayments = s.BuilderPendingPayments + g.BuilderPendingWithdrawals = s.BuilderPendingWithdrawals + g.LatestBlockHeader = s.LatestBlockHeader + g.ETH1Data = s.ETH1Data + g.NextWithdrawalIndex = s.NextWithdrawalIndex + g.NextWithdrawalValidatorIndex = s.NextWithdrawalValidatorIndex + g.Builders = s.Builders + g.NextWithdrawalBuilderIndex = s.NextWithdrawalBuilderIndex + g.LatestExecutionPayloadBid = s.LatestExecutionPayloadBid + g.LatestBlockHash = s.LatestBlockHash + g.ExecutionPayloadAvailability = s.ExecutionPayloadAvailability + g.PayloadExpectedWithdrawals = s.PayloadExpectedWithdrawals + g.PTCWindow = s.PTCWindow + } +} + +// computeStateHTR computes the hash tree root of the underlying state. +// Uses dynamic-ssz when available (roughly 2x faster than fastssz). +// Must call writeBack() first to ensure all accessor fields are synced. +func (s *stateAccessor) computeStateHTR() (phase0.Root, error) { + s.writeBack() + switch s.version { + case spec.DataVersionFulu: + return s.dynSsz.HashTreeRoot(s.rawState.Fulu) + case spec.DataVersionGloas: + return s.dynSsz.HashTreeRoot(s.rawState.Gloas) + default: + return phase0.Root{}, fmt.Errorf("unsupported version: %v", s.version) + } +} + +// computeLatestBlockHeaderHTR computes hash_tree_root(state.latest_block_header). +func (s *stateAccessor) computeLatestBlockHeaderHTR() (phase0.Root, error) { + if s.LatestBlockHeader == nil { + return phase0.Root{}, nil + } + return s.LatestBlockHeader.HashTreeRoot() +} + +// clearNextSlotAvailabilityBit clears the execution payload availability bit +// for the next slot (Gloas-specific process_slot step). +func (s *stateAccessor) clearNextSlotAvailabilityBit() { + if s.version < spec.DataVersionGloas || len(s.ExecutionPayloadAvailability) == 0 { + return + } + nextIdx := (uint64(s.Slot) + 1) % s.specs.SlotsPerHistoricalRoot + byteIdx := nextIdx / 8 + bitIdx := nextIdx % 8 + if byteIdx < uint64(len(s.ExecutionPayloadAvailability)) { + s.ExecutionPayloadAvailability[byteIdx] &^= 1 << bitIdx + } +} + +// setAvailabilityBit sets the execution payload availability bit for the current slot. +func (s *stateAccessor) setAvailabilityBit() { + if s.version < spec.DataVersionGloas || len(s.ExecutionPayloadAvailability) == 0 { + return + } + bitfieldLen := uint64(len(s.ExecutionPayloadAvailability)) * 8 + idx := uint64(s.Slot) % bitfieldLen + s.ExecutionPayloadAvailability[idx/8] |= 1 << (idx % 8) +} + +// getAvailabilityBit returns the execution payload availability bit for a given slot. +func (s *stateAccessor) getAvailabilityBit(slot phase0.Slot) bool { + if s.version < spec.DataVersionGloas || len(s.ExecutionPayloadAvailability) == 0 { + return false + } + idx := uint64(slot) % s.specs.SlotsPerHistoricalRoot + byteIdx := idx / 8 + bitIdx := idx % 8 + if byteIdx >= uint64(len(s.ExecutionPayloadAvailability)) { + return false + } + return s.ExecutionPayloadAvailability[byteIdx]&(1< 0 && v.WithdrawalCredentials[0] == 0x02 +} + +// hasExecutionWithdrawalCredential checks for 0x01 or 0x02 withdrawal credential prefix. +func hasExecutionWithdrawalCredential(v *phase0.Validator) bool { + if len(v.WithdrawalCredentials) == 0 { + return false + } + return v.WithdrawalCredentials[0] == 0x01 || v.WithdrawalCredentials[0] == 0x02 +} + +// isEligibleForActivationQueue checks if a validator is eligible to be added to activation queue. +// Modified in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-is_eligible_for_activation_queue +func isEligibleForActivationQueue(v *phase0.Validator, specs *consensus.ChainSpec) bool { + return v.ActivationEligibilityEpoch == FarFutureEpoch && + v.EffectiveBalance >= phase0.Gwei(specs.MinActivationBalance) +} + +// isEligibleForActivation checks if a validator is eligible for activation. +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#is_eligible_for_activation +func isEligibleForActivation(v *phase0.Validator, finalizedEpoch phase0.Epoch) bool { + return v.ActivationEligibilityEpoch <= finalizedEpoch && + v.ActivationEpoch == FarFutureEpoch +} + +// intSqrt returns the integer square root of n. +func intSqrt(n uint64) uint64 { + if n == 0 { + return 0 + } + x := n + y := (x + 1) / 2 + for y < x { + x = y + y = (x + n/x) / 2 + } + return x +} + +// FarFutureEpoch is the sentinel value for unset epochs. +const FarFutureEpoch = phase0.Epoch(math.MaxUint64) + +// Altair constants. +const ( + TimelySourceFlagIndex = 0 + TimelyTargetFlagIndex = 1 + TimelyHeadFlagIndex = 2 + + TimelySourceWeight = 14 + TimelyTargetWeight = 26 + TimelyHeadWeight = 14 + SyncRewardWeight = 2 + ProposerWeight = 8 + WeightDenominator = 64 + + ParticipationFlagCount = 3 + BaseRewardsPerEpoch = 4 +) + +var ParticipationFlagWeights = [ParticipationFlagCount]uint64{ + TimelySourceWeight, + TimelyTargetWeight, + TimelyHeadWeight, +} + +// hasFlag checks if a participation flags byte has the given flag set. +func hasFlag(flags altair.ParticipationFlags, flagIndex int) bool { + return flags&altair.ParticipationFlags(1<= delta { + s.Balances[index] -= delta + } else { + s.Balances[index] = 0 + } +} + +// getBalanceChurnLimit returns the balance churn limit (Electra+). +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-get_balance_churn_limit +func (s *stateAccessor) getBalanceChurnLimit() phase0.Gwei { + churn := uint64(s.getTotalActiveBalance()) / s.specs.ChurnLimitQuotient + if s.specs.MinPerEpochChurnLimitElectra > churn { + churn = s.specs.MinPerEpochChurnLimitElectra + } + return phase0.Gwei(churn - churn%s.specs.EffectiveBalanceIncrement) +} + +// getActivationExitChurnLimit returns the activation/exit churn limit. +// New in Electra: https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-get_activation_exit_churn_limit +func (s *stateAccessor) getActivationExitChurnLimit() phase0.Gwei { + churn := s.getBalanceChurnLimit() + if phase0.Gwei(s.specs.MaxPerEpochActivationExitChurnLimit) < churn { + return phase0.Gwei(s.specs.MaxPerEpochActivationExitChurnLimit) + } + return churn +} diff --git a/indexer/beacon/statetransition/statetransition.go b/indexer/beacon/statetransition/statetransition.go new file mode 100644 index 000000000..07b526480 --- /dev/null +++ b/indexer/beacon/statetransition/statetransition.go @@ -0,0 +1,334 @@ +// Package statetransition implements consensus-spec state transition functions +// for Fulu+ beacon states. +// +// The primary entry point is PrepareEpochPreState, which takes a post-block state +// (typically the last block of a parent epoch) and advances it to the pre-state +// of a target epoch by applying payload processing (Gloas+) and epoch transitions. +// +// This produces the normally inaccessible pre-slot-1, post-epoch-transition state +// that the beacon API cannot serve directly. +// +// Only needed for Fulu+ states. Pre-Fulu states already provide the correct +// epoch boundary values from the post-state of the parent epoch's last block. +package statetransition + +import ( + "fmt" + "time" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + dynssz "github.com/pk910/dynamic-ssz" +) + +// StateTransition holds the chain spec, dynamic SSZ encoder, and reusable caches +// for applying multiple blocks and epoch transitions to the same state. +// Create one per state replay session and reuse across ApplyBlock calls. +type StateTransition struct { + specs *consensus.ChainSpec + dynSsz *dynssz.DynSsz + caches *stateTransitionCaches +} + +// NewStateTransition creates a new StateTransition with the given chain spec and dynssz encoder. +func NewStateTransition(specs *consensus.ChainSpec, ds *dynssz.DynSsz) *StateTransition { + return &StateTransition{ + specs: specs, + dynSsz: ds, + caches: newStateTransitionCaches(), + } +} + +// ApplyInfo collects optional timing information from block application. +// Pass a non-nil pointer to ApplyBlockWithInfo to receive this data. +type ApplyInfo struct { + // EpochTransitionDur is non-zero when the block's process_slots crossed an + // epoch boundary, triggering process_epoch. + EpochTransitionDur time.Duration +} + +// ApplyBlock applies a beacon block to the state in-place. +func (st *StateTransition) ApplyBlock(state *spec.VersionedBeaconState, block *spec.VersionedSignedBeaconBlock) error { + return st.applyBlock(state, block, phase0.Root{}, nil) +} + +// ApplyBlockWithStateRoot is like ApplyBlock but accepts the current state's +// hash tree root as a hint, skipping the expensive HTR computation in the first +// process_slot. The hint must match the HTR of the current state — typically +// sourced from the previously applied block's state_root field. Passing an +// incorrect hint will produce an inconsistent state and is undefined behavior. +func (st *StateTransition) ApplyBlockWithStateRoot(state *spec.VersionedBeaconState, block *spec.VersionedSignedBeaconBlock, parentStateRoot phase0.Root) error { + return st.applyBlock(state, block, parentStateRoot, nil) +} + +// ApplyBlockWithInfo is like ApplyBlockWithStateRoot but also populates info +// with timing details (e.g. epoch transition duration). +func (st *StateTransition) ApplyBlockWithInfo(state *spec.VersionedBeaconState, block *spec.VersionedSignedBeaconBlock, parentStateRoot phase0.Root, info *ApplyInfo) error { + return st.applyBlock(state, block, parentStateRoot, info) +} + +// ApplyExecutionPayload applies a Gloas execution payload to the state. +func (st *StateTransition) ApplyExecutionPayload(state *spec.VersionedBeaconState, payload *gloas.SignedExecutionPayloadEnvelope) error { + if payload == nil || payload.Message == nil { + return nil + } + return st.processExecutionPayload(state, payload.Message) +} + +// PrepareEpochPreState advances a post-block state to the pre-state of the target epoch. +func (st *StateTransition) PrepareEpochPreState(state *spec.VersionedBeaconState, epoch phase0.Epoch, payload *gloas.ExecutionPayloadEnvelope, info *TransitionInfo) error { + if state.Version < spec.DataVersionFulu { + return nil + } + + // Step 1: For Gloas+ pre-payload states, apply the execution payload transition. + if payload != nil && IsPrePayloadState(state) { + if err := st.processExecutionPayload(state, payload); err != nil { + return fmt.Errorf("process_execution_payload: %w", err) + } + } + + // Step 2: Advance to the first slot of the target epoch. + targetSlot := phase0.Slot(uint64(epoch) * st.specs.SlotsPerEpoch) + if err := st.processSlots(state, targetSlot, info); err != nil { + return fmt.Errorf("process_slots to epoch %d (slot %d): %w", epoch, targetSlot, err) + } + + return nil +} + +// TransitionInfo collects metadata from the state transition that callers may +// need for downstream processing. Pass a non-nil pointer to PrepareEpochPreState +// to receive this information; pass nil if not needed. +type TransitionInfo struct { + // DelayedBuilderPayments is the number of delayed builder payments appended + // to BuilderPendingWithdrawals by the last epoch transition's + // process_builder_pending_payments. This tells the state simulator how many + // entries at the tail of the queue are delayed (vs direct payments from block payloads). + DelayedBuilderPayments uint32 +} + +// processSlots advances the state from its current slot to targetSlot, applying +// epoch transitions at every epoch boundary crossed. +// +// Skips per-slot state/block root caching (process_slot) since we cannot compute +// hash_tree_root efficiently and the cached roots don't affect the epoch transition +// outputs we need. Jumps directly to each epoch boundary. +// +// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md#process_slots +func (st *StateTransition) processSlots(state *spec.VersionedBeaconState, targetSlot phase0.Slot, info *TransitionInfo) error { + currentSlot, err := state.Slot() + if err != nil { + return fmt.Errorf("failed to get state slot: %w", err) + } + + if currentSlot >= targetSlot { + return nil + } + + s, err := st.newAccessor(state) + if err != nil { + return fmt.Errorf("failed to create state accessor: %w", err) + } + + slotsPerEpoch := st.specs.SlotsPerEpoch + + for s.Slot < targetSlot { + processSlotBlockRootCaching(s) + + // Apply epoch transition at epoch boundary (last slot of epoch). + if (uint64(s.Slot)+1)%slotsPerEpoch == 0 { + if err := processEpochInternal(s, info); err != nil { + return fmt.Errorf("process_epoch at slot %d: %w", s.Slot, err) + } + } + + s.Slot++ + } + + s.writeBack() + + return nil +} + +// processSlotBlockRootCaching implements the essential parts of process_slot: +// computes the state root, fills latest_block_header.state_root if zero, +// then caches the block root. The state root must be computed first because +// the block root depends on the header's state_root field. +func processSlotBlockRootCaching(s *stateAccessor) { + stateRoot, err := s.computeStateHTR() + if err != nil { + return + } + + idx := uint64(s.Slot) % s.specs.SlotsPerHistoricalRoot + s.StateRoots[idx] = stateRoot + + // Fill latest_block_header.state_root if zero (set after each processBlockHeader). + if s.LatestBlockHeader != nil && s.LatestBlockHeader.StateRoot == (phase0.Root{}) { + s.LatestBlockHeader.StateRoot = stateRoot + } + + blockRoot, err := s.computeLatestBlockHeaderHTR() + if err != nil { + return + } + + s.BlockRoots[idx] = blockRoot + + // Gloas: clear the next slot's execution payload availability bit. + s.clearNextSlotAvailabilityBit() +} + +// IsPrePayloadState checks whether a Gloas state is pre-payload +// (the execution payload for the latest block has NOT been processed yet). +func IsPrePayloadState(state *spec.VersionedBeaconState) bool { + if state.Version < spec.DataVersionGloas || state.Gloas == nil { + return false + } + + slot, err := state.Slot() + if err != nil { + return false + } + + bitfieldLen := uint64(len(state.Gloas.ExecutionPayloadAvailability)) * 8 + if bitfieldLen == 0 { + return true + } + + idx := uint64(slot) % bitfieldLen + return state.Gloas.ExecutionPayloadAvailability[idx/8]&(1<<(idx%8)) == 0 +} + +// processExecutionPayload applies the Gloas execution payload state transition +// on a pre-payload state. Processes execution requests and records the builder +// payment, transitioning the state from pre-payload to post-payload. +// +// New in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_execution_payload +func (st *StateTransition) processExecutionPayload(state *spec.VersionedBeaconState, envelope *gloas.ExecutionPayloadEnvelope) error { + if state.Version < spec.DataVersionGloas || state.Gloas == nil || envelope == nil { + return nil + } + + s, err := st.newAccessor(state) + if err != nil { + return fmt.Errorf("failed to create state accessor: %w", err) + } + + // Cache latest block header state root (spec: fill before payload processing). + // https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_execution_payload + if s.LatestBlockHeader != nil && s.LatestBlockHeader.StateRoot == (phase0.Root{}) { + stateRoot, htrErr := s.computeStateHTR() + if htrErr != nil { + return fmt.Errorf("failed to compute state root for header fill: %w", htrErr) + } + s.LatestBlockHeader.StateRoot = stateRoot + } + + // Process execution requests (deposits, withdrawals, consolidations). + if envelope.ExecutionRequests != nil { + for _, deposit := range envelope.ExecutionRequests.Deposits { + s.PendingDeposits = append(s.PendingDeposits, &electra.PendingDeposit{ + Pubkey: deposit.Pubkey, + WithdrawalCredentials: deposit.WithdrawalCredentials, + Amount: deposit.Amount, + Signature: deposit.Signature, + Slot: s.Slot, + }) + } + + for _, withdrawal := range envelope.ExecutionRequests.Withdrawals { + processWithdrawalRequest(s, withdrawal) + } + + for _, consolidation := range envelope.ExecutionRequests.Consolidations { + processConsolidationRequest(s, consolidation) + } + } + + // Queue the builder payment (direct withdrawal for delivered payload). + slotsPerEpoch := st.specs.SlotsPerEpoch + paymentIdx := slotsPerEpoch + uint64(s.Slot)%slotsPerEpoch + if paymentIdx < uint64(len(s.BuilderPendingPayments)) { + payment := s.BuilderPendingPayments[paymentIdx] + if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 { + s.BuilderPendingWithdrawals = append(s.BuilderPendingWithdrawals, payment.Withdrawal) + } + s.BuilderPendingPayments[paymentIdx] = &gloas.BuilderPendingPayment{} + } + + // Set execution payload availability bit. + s.setAvailabilityBit() + + // Cache the execution payload block hash. + if envelope.Payload != nil { + s.LatestBlockHash = envelope.Payload.BlockHash + } + + s.writeBack() + return nil +} + +// processEpochInternal runs the epoch transition on the accessor without writeBack. +// Used by processSlots at each epoch boundary. +// +// Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/beacon-chain.md#modified-process_epoch +// Modified in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_epoch +func processEpochInternal(s *stateAccessor, info *TransitionInfo) error { + if err := processJustificationAndFinalization(s); err != nil { + return fmt.Errorf("process_justification_and_finalization: %w", err) + } + + if err := processInactivityUpdates(s); err != nil { + return fmt.Errorf("process_inactivity_updates: %w", err) + } + + if err := processRewardsAndPenalties(s); err != nil { + return fmt.Errorf("process_rewards_and_penalties: %w", err) + } + + if err := processRegistryUpdates(s); err != nil { + return fmt.Errorf("process_registry_updates: %w", err) + } + + if err := processSlashings(s); err != nil { + return fmt.Errorf("process_slashings: %w", err) + } + + processEth1DataReset(s) + + if err := processPendingDeposits(s); err != nil { + return fmt.Errorf("process_pending_deposits: %w", err) + } + + processPendingConsolidations(s) + + // Gloas-only: process builder pending payments + if s.version >= spec.DataVersionGloas { + delayedCount := processBuilderPendingPayments(s) + if info != nil { + info.DelayedBuilderPayments = delayedCount + } + } + + processEffectiveBalanceUpdates(s) + // Effective balances may have changed; clear caches that depend on them. + s.caches.invalidateBalanceCaches() + processSlashingsReset(s) + processRandaoMixesReset(s) + processHistoricalSummariesUpdate(s) + processParticipationFlagUpdates(s) + processSyncCommitteeUpdates(s) + processProposerLookahead(s) + + // Gloas-only: process PTC window + if s.version >= spec.DataVersionGloas { + processPtcWindow(s) + } + + return nil +} diff --git a/indexer/beacon/statetransition/withdrawals.go b/indexer/beacon/statetransition/withdrawals.go new file mode 100644 index 000000000..b3b440a56 --- /dev/null +++ b/indexer/beacon/statetransition/withdrawals.go @@ -0,0 +1,304 @@ +package statetransition + +import ( + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" +) + +// expectedWithdrawals holds the result of get_expected_withdrawals. +type expectedWithdrawals struct { + withdrawals []*capella.Withdrawal + processedBuilderWithdrawalsCount uint64 + processedPartialWithdrawalsCount uint64 + processedBuildersSweepCount uint64 + processedValidatorsSweepCount uint64 +} + +// processWithdrawals implements the Gloas version of process_withdrawals. +// Modified in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_withdrawals +func processWithdrawals(s *stateAccessor) { + if !isParentBlockFull(s) { + return + } + + expected := getExpectedWithdrawals(s) + + // apply_withdrawals + for _, w := range expected.withdrawals { + if isBuilderIndex(w.ValidatorIndex) { + // Builder withdrawal: decrease builder balance + builderIdx := convertValidatorIndexToBuilderIndex(w.ValidatorIndex) + if int(builderIdx) < len(s.Builders) { + builder := s.Builders[builderIdx] + amount := phase0.Gwei(w.Amount) + if amount > builder.Balance { + amount = builder.Balance + } + builder.Balance -= amount + } + } else { + if int(w.ValidatorIndex) < len(s.Balances) { + s.decreaseBalance(w.ValidatorIndex, phase0.Gwei(w.Amount)) + } + } + } + + // update_next_withdrawal_index + if len(expected.withdrawals) > 0 { + lastIdx := expected.withdrawals[len(expected.withdrawals)-1].Index + s.NextWithdrawalIndex = lastIdx + 1 + } + + // update_payload_expected_withdrawals + s.PayloadExpectedWithdrawals = expected.withdrawals + + // update_builder_pending_withdrawals: remove processed entries from the front + if expected.processedBuilderWithdrawalsCount > 0 { + n := expected.processedBuilderWithdrawalsCount + if n > uint64(len(s.BuilderPendingWithdrawals)) { + n = uint64(len(s.BuilderPendingWithdrawals)) + } + s.BuilderPendingWithdrawals = s.BuilderPendingWithdrawals[n:] + } + + // update_pending_partial_withdrawals: remove processed entries from the front + if expected.processedPartialWithdrawalsCount > 0 { + n := expected.processedPartialWithdrawalsCount + if n > uint64(len(s.PendingPartialWithdrawals)) { + n = uint64(len(s.PendingPartialWithdrawals)) + } + s.PendingPartialWithdrawals = s.PendingPartialWithdrawals[n:] + } + + // update_next_withdrawal_builder_index + if expected.processedBuildersSweepCount > 0 && len(s.Builders) > 0 { + nextIdx := uint64(s.NextWithdrawalBuilderIndex) + expected.processedBuildersSweepCount + s.NextWithdrawalBuilderIndex = gloas.BuilderIndex(nextIdx % uint64(len(s.Builders))) + } + + // update_next_withdrawal_validator_index (Capella spec, unchanged in Gloas) + if uint64(len(expected.withdrawals)) == s.specs.MaxWithdrawalsPerPayload { + lastW := expected.withdrawals[len(expected.withdrawals)-1] + s.NextWithdrawalValidatorIndex = phase0.ValidatorIndex((uint64(lastW.ValidatorIndex) + 1) % uint64(len(s.Validators))) + } else { + s.NextWithdrawalValidatorIndex = phase0.ValidatorIndex((uint64(s.NextWithdrawalValidatorIndex) + s.specs.MaxValidatorsPerWithdrawalsSweep) % uint64(len(s.Validators))) + } +} + +// BuilderIndexFlag separates builder indices from validator indices. +const BuilderIndexFlag = uint64(1 << 40) + +func isBuilderIndex(idx phase0.ValidatorIndex) bool { + return uint64(idx)&BuilderIndexFlag != 0 +} + +func convertBuilderIndexToValidatorIndex(builderIdx gloas.BuilderIndex) phase0.ValidatorIndex { + return phase0.ValidatorIndex(uint64(builderIdx) | BuilderIndexFlag) +} + +func convertValidatorIndexToBuilderIndex(validatorIdx phase0.ValidatorIndex) gloas.BuilderIndex { + return gloas.BuilderIndex(uint64(validatorIdx) &^ BuilderIndexFlag) +} + +// getBalanceAfterWithdrawals returns a validator's balance minus any amounts +// already scheduled in prior withdrawals within the same batch. +// https://github.com/ethereum/consensus-specs/blob/master/specs/capella/beacon-chain.md#new-get_balance_after_withdrawals +func getBalanceAfterWithdrawals(s *stateAccessor, vidx phase0.ValidatorIndex, withdrawals []*capella.Withdrawal) phase0.Gwei { + balance := s.Balances[vidx] + for _, w := range withdrawals { + if w.ValidatorIndex == vidx { + if balance >= w.Amount { + balance -= w.Amount + } else { + balance = 0 + } + } + } + return balance +} + +// isFullyWithdrawableValidator checks if a validator is fully withdrawable. +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-is_fully_withdrawable_validator +func isFullyWithdrawableValidator(v *phase0.Validator, balance phase0.Gwei, epoch phase0.Epoch) bool { + return hasExecutionWithdrawalCredential(v) && v.WithdrawableEpoch <= epoch && balance > 0 +} + +// isPartiallyWithdrawableValidator checks if a validator is partially withdrawable (sweep). +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-is_partially_withdrawable_validator +func isPartiallyWithdrawableValidator(v *phase0.Validator, balance phase0.Gwei, maxEB phase0.Gwei) bool { + return hasExecutionWithdrawalCredential(v) && v.EffectiveBalance == maxEB && balance > maxEB +} + +// isEligibleForPartialWithdrawals checks if a validator can process a pending partial withdrawal. +// https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-is_eligible_for_partial_withdrawals +func isEligibleForPartialWithdrawals(v *phase0.Validator, balance phase0.Gwei, minActivationBalance phase0.Gwei) bool { + return v.ExitEpoch == FarFutureEpoch && + v.EffectiveBalance >= minActivationBalance && + balance > minActivationBalance +} + +// getExpectedWithdrawals computes the expected withdrawals for the current slot. +// Modified in Gloas: https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-get_expected_withdrawals +func getExpectedWithdrawals(s *stateAccessor) *expectedWithdrawals { + result := &expectedWithdrawals{} + nextIdx := s.NextWithdrawalIndex + maxWithdrawals := s.specs.MaxWithdrawalsPerPayload + epoch := s.currentEpoch() + + // 1. Builder pending withdrawals (Gloas-specific) + if len(s.BuilderPendingWithdrawals) > 0 { + builderLimit := maxWithdrawals - 1 + for _, bpw := range s.BuilderPendingWithdrawals { + if uint64(len(result.withdrawals)) >= builderLimit { + break + } + result.withdrawals = append(result.withdrawals, &capella.Withdrawal{ + Index: nextIdx, + ValidatorIndex: convertBuilderIndexToValidatorIndex(bpw.BuilderIndex), + Address: bpw.FeeRecipient, + Amount: bpw.Amount, + }) + nextIdx++ + result.processedBuilderWithdrawalsCount++ + } + } + + // 2. Pending partial withdrawals + // Limit: min(prior_count + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP, MAX_WITHDRAWALS_PER_PAYLOAD - 1) + // https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#new-get_pending_partial_withdrawals + { + partialLimit := uint64(len(result.withdrawals)) + s.specs.MaxPendingPartialsPerWithdrawalsSweep + if partialLimit > maxWithdrawals-1 { + partialLimit = maxWithdrawals - 1 + } + + minBalance := phase0.Gwei(s.specs.MinActivationBalance) + for _, pw := range s.PendingPartialWithdrawals { + if pw.WithdrawableEpoch > epoch || uint64(len(result.withdrawals)) >= partialLimit { + break + } + result.processedPartialWithdrawalsCount++ + + validator := s.Validators[pw.ValidatorIndex] + balance := getBalanceAfterWithdrawals(s, pw.ValidatorIndex, result.withdrawals) + if !isEligibleForPartialWithdrawals(validator, balance, minBalance) { + continue + } + + amount := balance - minBalance + if phase0.Gwei(pw.Amount) < amount { + amount = phase0.Gwei(pw.Amount) + } + + result.withdrawals = append(result.withdrawals, &capella.Withdrawal{ + Index: nextIdx, + ValidatorIndex: pw.ValidatorIndex, + Address: getWithdrawalAddress(validator), + Amount: amount, + }) + nextIdx++ + } + } + + // 3. Builder sweep withdrawals (Gloas-specific) + if len(s.Builders) > 0 { + builderSweepLimit := maxWithdrawals - 1 + buildersLimit := uint64(len(s.Builders)) + if s.specs.MaxBuildersPerWithdrawalsSweep > 0 && s.specs.MaxBuildersPerWithdrawalsSweep < buildersLimit { + buildersLimit = s.specs.MaxBuildersPerWithdrawalsSweep + } + + builderIdx := s.NextWithdrawalBuilderIndex + for i := uint64(0); i < buildersLimit; i++ { + if uint64(len(result.withdrawals)) >= builderSweepLimit { + break + } + + builder := s.Builders[builderIdx] + if builder.WithdrawableEpoch <= epoch && builder.Balance > 0 { + result.withdrawals = append(result.withdrawals, &capella.Withdrawal{ + Index: nextIdx, + ValidatorIndex: convertBuilderIndexToValidatorIndex(builderIdx), + Address: builder.ExecutionAddress, + Amount: builder.Balance, + }) + nextIdx++ + } + + builderIdx = gloas.BuilderIndex((uint64(builderIdx) + 1) % uint64(len(s.Builders))) + result.processedBuildersSweepCount++ + } + } + + // 4. Validator sweep withdrawals (uses full MAX limit) + // https://github.com/ethereum/consensus-specs/blob/master/specs/electra/beacon-chain.md#modified-get_validators_sweep_withdrawals + validatorCount := uint64(len(s.Validators)) + if validatorCount > 0 { + startIdx := uint64(s.NextWithdrawalValidatorIndex) + bound := s.specs.MaxValidatorsPerWithdrawalsSweep + if validatorCount < bound { + bound = validatorCount + } + + for i := uint64(0); i < bound && uint64(len(result.withdrawals)) < maxWithdrawals; i++ { + vidx := phase0.ValidatorIndex((startIdx + i) % validatorCount) + validator := s.Validators[vidx] + balance := getBalanceAfterWithdrawals(s, vidx, result.withdrawals) + + result.processedValidatorsSweepCount++ + + // Full withdrawal: exited and withdrawable + if isFullyWithdrawableValidator(validator, balance, epoch) { + result.withdrawals = append(result.withdrawals, &capella.Withdrawal{ + Index: nextIdx, + ValidatorIndex: vidx, + Address: getWithdrawalAddress(validator), + Amount: balance, + }) + nextIdx++ + continue + } + + // Partial (sweep) withdrawal: excess balance above max effective balance, + // only when effective_balance has reached the max. + maxEB := s.getMaxEffectiveBalance(validator) + if isPartiallyWithdrawableValidator(validator, balance, maxEB) { + result.withdrawals = append(result.withdrawals, &capella.Withdrawal{ + Index: nextIdx, + ValidatorIndex: vidx, + Address: getWithdrawalAddress(validator), + Amount: balance - maxEB, + }) + nextIdx++ + } + } + } + + return result +} + +// getWithdrawalAddress extracts the withdrawal address from validator credentials. +func getWithdrawalAddress(v *phase0.Validator) [20]byte { + var addr [20]byte + if len(v.WithdrawalCredentials) >= 32 { + copy(addr[:], v.WithdrawalCredentials[12:32]) + } + return addr +} + +// isParentBlockFull checks if the parent block had an execution payload (Gloas). +// Spec: return state.latest_execution_payload_bid.block_hash == state.latest_block_hash +// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-is_parent_block_full +func isParentBlockFull(s *stateAccessor) bool { + if s.version < spec.DataVersionGloas { + return true // Pre-Gloas: always full + } + + if s.LatestExecutionPayloadBid == nil { + return false + } + + return s.LatestExecutionPayloadBid.BlockHash == s.LatestBlockHash +} diff --git a/indexer/beacon/synchronizer.go b/indexer/beacon/synchronizer.go index 436ce6fe3..9d49519a8 100644 --- a/indexer/beacon/synchronizer.go +++ b/indexer/beacon/synchronizer.go @@ -9,13 +9,14 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" ) @@ -264,11 +265,17 @@ func (s *synchronizer) loadBlockHeader(client *Client, slot phase0.Slot) (*phase } func (s *synchronizer) loadBlockBody(client *Client, root phase0.Root) (*spec.VersionedSignedBeaconBlock, error) { - ctx, cancel := context.WithTimeout(s.syncCtx, beaconHeaderRequestTimeout) + ctx, cancel := context.WithTimeout(s.syncCtx, beaconBodyRequestTimeout) defer cancel() return LoadBeaconBlock(ctx, client, root) } +func (s *synchronizer) loadBlockPayload(client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(s.syncCtx, executionPayloadRequestTimeout) + defer cancel() + return LoadExecutionPayload(ctx, client, root) +} + func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry bool) (bool, error) { if !utils.Config.Indexer.ResyncForceUpdate && db.IsEpochSynchronized(s.syncCtx, uint64(syncEpoch)) { return true, nil @@ -327,6 +334,17 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry block.SetBlock(blockBody) } + if slot > 0 && chainState.IsEip7732Enabled(chainState.EpochOfSlot(slot)) { + blockPayload, err := s.loadBlockPayload(client, phase0.Root(blockRoot)) + if err != nil && !lastTry { + return false, fmt.Errorf("error fetching slot %v execution payload: %v", slot, err) + } + + if blockPayload != nil { + block.SetExecutionPayload(blockPayload) + } + } + s.cachedBlocks[slot] = block } @@ -364,8 +382,10 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry dependentRoot = phase0.Root(depRoot) } - epochState := newEpochState(dependentRoot) + epochState := newEpochState(dependentRoot, syncEpoch) + t1 := time.Now() state, err := epochState.loadState(s.syncCtx, client, nil) + loadDuration := time.Since(t1) if (err != nil || epochState.loadingStatus != 2) && !lastTry { return false, fmt.Errorf("error fetching epoch %v state: %v", syncEpoch, err) } @@ -385,7 +405,7 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry if epochState != nil && epochState.loadingStatus == 2 { epochStats = newEpochStats(syncEpoch, dependentRoot) epochStats.dependentState = epochState - epochStats.processState(s.indexer, validatorSet) + epochStats.processState(s.indexer, validatorSet, loadDuration) epochStatsValues = epochStats.GetValues(false) } @@ -410,6 +430,36 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry sim.validatorSet = validatorSet } + // Determine payload status for canonical blocks (ePBS only) + // A payload is orphaned if the next canonical block doesn't build on it + allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...) + for i, block := range canonicalBlocks { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(s.indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next canonical block + var nextBlock *Block + if i+1 < len(allCanonicalBlocks) { + nextBlock = allCanonicalBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(s.indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // save blocks err = db.RunDBTransaction(func(tx *sqlx.Tx) error { err = s.indexer.dbWriter.persistEpochData(tx, syncEpoch, canonicalBlocks, epochStats, epochVotes, sim) diff --git a/indexer/beacon/validatoractivity.go b/indexer/beacon/validatoractivity.go index df56e13ff..c352fc517 100644 --- a/indexer/beacon/validatoractivity.go +++ b/indexer/beacon/validatoractivity.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) // validatorActivityCache is the cache for the validator activity. diff --git a/indexer/beacon/validatorcache.go b/indexer/beacon/validatorcache.go index 54e3b86cf..d2df4908a 100644 --- a/indexer/beacon/validatorcache.go +++ b/indexer/beacon/validatorcache.go @@ -10,10 +10,10 @@ import ( "sync" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" ) @@ -345,8 +345,9 @@ func (cache *validatorCache) getValidatorFlags(validatorIndex phase0.ValidatorIn return cache.valsetCache[validatorIndex].statusFlags } -// setFinalizedEpoch sets the last finalized epoch and updates the validator set -func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDependentRoot phase0.Root) { +// setFinalizedEpoch sets the last finalized epoch and updates the validator set. +// dependentRoot is the dependent root of the finalized epoch (last block of the parent epoch). +func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, dependentRoot phase0.Root) { cache.cacheMutex.Lock() defer cache.cacheMutex.Unlock() @@ -361,7 +362,7 @@ func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDepe // Find the finalized validator state for _, diff := range cachedValidator.validatorDiffs { - if diff.dependentRoot == nextEpochDependentRoot { + if diff.dependentRoot == dependentRoot { cachedValidator.finalValidator = diff.validator cachedValidator.finalChecksum = calculateValidatorChecksum(diff.validator) cachedValidator.statusFlags = GetValidatorStatusFlags(diff.validator) @@ -398,6 +399,9 @@ func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDepe cache.lastFinalizedActiveCount = activeCount + cache.indexer.logger.Infof("finalized validator set for epoch %v (dependent root: %v, updated: %v, total: %v)", + epoch, dependentRoot.String(), updatedCount, len(cache.valsetCache)) + if updatedCount > 0 { select { case cache.triggerDbUpdate <- true: diff --git a/indexer/beacon/writedb.go b/indexer/beacon/writedb.go index d684ab11a..1cc8fafda 100644 --- a/indexer/beacon/writedb.go +++ b/indexer/beacon/writedb.go @@ -4,15 +4,15 @@ import ( "fmt" "math" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/bellatrix" - "github.com/attestantio/go-eth2-client/spec/capella" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/bellatrix" + "github.com/ethpandaops/go-eth2-client/spec/capella" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" ) @@ -79,6 +79,11 @@ func (dbw *dbWriter) persistBlockData(tx *sqlx.Tx, block *Block, epochStats *Epo dbBlock.Status = dbtypes.Orphaned } + // Apply payload orphaned status from block flag (set during finalization/sync) + if block.isPayloadOrphaned { + dbBlock.PayloadStatus = dbtypes.PayloadStatusOrphaned + } + err := db.InsertSlot(dbw.indexer.ctx, tx, dbBlock) if err != nil { return nil, fmt.Errorf("error inserting slot: %v", err) @@ -251,6 +256,8 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override epochStatsValues = epochStats.GetValues(true) } + chainState := dbw.indexer.consensusPool.GetChainState() + graffiti, _ := blockBody.Graffiti() attestations, _ := blockBody.Attestations() deposits, _ := blockBody.Deposits() @@ -259,28 +266,76 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override proposerSlashings, _ := blockBody.ProposerSlashings() blsToExecChanges, _ := blockBody.BLSToExecutionChanges() syncAggregate, _ := blockBody.SyncAggregate() + executionBlockHash, _ := blockBody.ExecutionBlockHash() blobKzgCommitments, _ := blockBody.BlobKZGCommitments() - var executionExtraData []byte var executionBlockNumber uint64 - var executionBlockHash phase0.Hash32 + var executionBlockParentHash []byte + var executionExtraData []byte var executionTransactions []bellatrix.Transaction var executionWithdrawals []*capella.Withdrawal - - executionPayload, _ := blockBody.ExecutionPayload() - if executionPayload != nil { - executionExtraData, _ = executionPayload.ExtraData() - executionBlockHash, _ = executionPayload.BlockHash() - executionBlockNumber, _ = executionPayload.BlockNumber() - executionTransactions, _ = executionPayload.Transactions() - executionWithdrawals, _ = executionPayload.Withdrawals() + var depositRequests []*electra.DepositRequest + var payloadStatus dbtypes.PayloadStatus + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + executionBlockNumber = blockPayload.Message.Payload.BlockNumber + executionBlockParentHash = blockPayload.Message.Payload.ParentHash[:] + executionExtraData = blockPayload.Message.Payload.ExtraData + executionTransactions = blockPayload.Message.Payload.Transactions + executionWithdrawals = blockPayload.Message.Payload.Withdrawals + depositRequests = blockPayload.Message.ExecutionRequests.Deposits + payloadStatus = dbtypes.PayloadStatusCanonical + } else { + payloadStatus = dbtypes.PayloadStatusMissing + } + } else { + payloadStatus = dbtypes.PayloadStatusCanonical + executionBlockNumber, _ = blockBody.ExecutionBlockNumber() + executionPayload, _ := blockBody.ExecutionPayload() + if executionPayload != nil { + executionExtraData, _ = executionPayload.ExtraData() + executionTransactions, _ = executionPayload.Transactions() + executionWithdrawals, _ = executionPayload.Withdrawals() + if parentHash, err := executionPayload.ParentHash(); err == nil { + executionBlockParentHash = parentHash[:] + } + } + executionRequests, _ := blockBody.ExecutionRequests() + if executionRequests != nil { + depositRequests = executionRequests.Deposits + } } - var depositRequests []*electra.DepositRequest + // Get builder index from block, default to -1 (self-built/MaxUint64) + var builderIndexInt64 int64 = -1 + if blockIndex := block.GetBlockIndex(dbw.indexer.ctx); blockIndex != nil { + if blockIndex.BuilderIndex == math.MaxUint64 { + builderIndexInt64 = -1 + } else { + builderIndexInt64 = int64(blockIndex.BuilderIndex) + } + } - executionRequests, _ := blockBody.ExecutionRequests() - if executionRequests != nil { - depositRequests = executionRequests.Deposits + // Extract execution payload bid from Gloas blocks and add to bid cache. + // This ensures bids are persisted even when syncing from blocks (not just SSE events). + if blockBody.Version >= spec.DataVersionGloas && blockBody.Gloas != nil && + blockBody.Gloas.Message != nil && blockBody.Gloas.Message.Body != nil && + blockBody.Gloas.Message.Body.SignedExecutionPayloadBid != nil && + blockBody.Gloas.Message.Body.SignedExecutionPayloadBid.Message != nil { + bidMsg := blockBody.Gloas.Message.Body.SignedExecutionPayloadBid.Message + dbw.indexer.blockBidCache.AddBid(&dbtypes.BlockBid{ + ParentRoot: bidMsg.ParentBlockRoot[:], + ParentHash: bidMsg.ParentBlockHash[:], + BlockHash: bidMsg.BlockHash[:], + FeeRecipient: bidMsg.FeeRecipient[:], + GasLimit: uint64(bidMsg.GasLimit), + BuilderIndex: int64(bidMsg.BuilderIndex), + Slot: uint64(bidMsg.Slot), + Value: uint64(bidMsg.Value), + ElPayment: uint64(bidMsg.ExecutionPayment), + }) } dbBlock := dbtypes.Slot{ @@ -301,7 +356,9 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override BLSChangeCount: uint64(len(blsToExecChanges)), BlobCount: uint64(len(blobKzgCommitments)), RecvDelay: block.recvDelay, + PayloadStatus: payloadStatus, BlockUid: block.BlockUID, + BuilderIndex: builderIndexInt64, } blockSize, err := getBlockSize(block.dynSsz, blockBody) @@ -337,6 +394,7 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override dbBlock.EthTransactionCount = uint64(len(executionTransactions)) dbBlock.EthBlockNumber = &executionBlockNumber dbBlock.EthBlockHash = executionBlockHash[:] + dbBlock.EthBlockParentHash = executionBlockParentHash dbBlock.EthBlockExtra = executionExtraData dbBlock.EthBlockExtraText = utils.GraffitiToString(executionExtraData[:]) dbBlock.WithdrawCount = uint64(len(executionWithdrawals)) @@ -415,6 +473,15 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas) dbBlock.EthFeeRecipient = payload.FeeRecipient[:] } + case spec.DataVersionGloas: + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + payload := blockPayload.Message.Payload + dbBlock.EthGasUsed = payload.GasUsed + dbBlock.EthGasLimit = payload.GasLimit + dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas) + dbBlock.EthFeeRecipient = payload.FeeRecipient[:] + } } } @@ -488,15 +555,27 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat proposerSlashings, _ := blockBody.ProposerSlashings() blsToExecChanges, _ := blockBody.BLSToExecutionChanges() syncAggregate, _ := blockBody.SyncAggregate() - executionTransactions, _ := blockBody.ExecutionTransactions() - executionWithdrawals, _ := blockBody.Withdrawals() blobKzgCommitments, _ := blockBody.BlobKZGCommitments() + var executionTransactions []bellatrix.Transaction + var executionWithdrawals []*capella.Withdrawal var depositRequests []*electra.DepositRequest - executionRequests, _ := blockBody.ExecutionRequests() - if executionRequests != nil { - depositRequests = executionRequests.Deposits + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + dbEpoch.PayloadCount++ + executionTransactions = blockPayload.Message.Payload.Transactions + executionWithdrawals = blockPayload.Message.Payload.Withdrawals + depositRequests = blockPayload.Message.ExecutionRequests.Deposits + } + } else { + executionTransactions, _ = blockBody.ExecutionTransactions() + executionWithdrawals, _ = blockBody.Withdrawals() + executionRequests, _ := blockBody.ExecutionRequests() + if executionRequests != nil { + depositRequests = executionRequests.Deposits + } } dbEpoch.AttestationCount += uint64(len(attestations)) @@ -570,6 +649,13 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat dbEpoch.EthGasUsed += payload.GasUsed dbEpoch.EthGasLimit += payload.GasLimit } + case spec.DataVersionGloas: + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + payload := blockPayload.Message.Payload + dbEpoch.EthGasUsed += payload.GasUsed + dbEpoch.EthGasLimit += payload.GasLimit + } } } } @@ -658,14 +744,26 @@ func (dbw *dbWriter) persistBlockDepositRequests(tx *sqlx.Tx, block *Block, orph } func (dbw *dbWriter) buildDbDepositRequests(block *Block, orphaned bool, overrideForkId *ForkKey) []*dbtypes.Deposit { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.Deposit{} } deposits := requests.Deposits @@ -765,19 +863,31 @@ func (dbw *dbWriter) persistBlockWithdrawals(tx *sqlx.Tx, block *Block, orphaned // If tx is nil (read path), only existing accounts are looked up. // If sim is non-nil, it's used to determine pending partial withdrawal count for type classification. func (dbw *dbWriter) buildDbWithdrawals(block *Block, orphaned bool, overrideForkId *ForkKey, tx *sqlx.Tx, sim *stateSimulator) []*dbtypes.Withdrawal { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil - } + chainState := dbw.indexer.consensusPool.GetChainState() - executionPayload, err := blockBody.ExecutionPayload() - if err != nil || executionPayload == nil { - return nil - } + var executionWithdrawals []*capella.Withdrawal + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + executionWithdrawals = blockPayload.Message.Payload.Withdrawals + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } - withdrawals, err := executionPayload.Withdrawals() - if err != nil || len(withdrawals) == 0 { - return nil + executionPayload, err := blockBody.ExecutionPayload() + if err != nil || executionPayload == nil { + return nil + } + + withdrawals, err := executionPayload.Withdrawals() + if err != nil || len(withdrawals) == 0 { + return nil + } + + executionWithdrawals = withdrawals } forkId := uint64(block.forkId) @@ -785,24 +895,15 @@ func (dbw *dbWriter) buildDbWithdrawals(block *Block, orphaned bool, overrideFor forkId = uint64(*overrideForkId) } - // Reconstruct sim from epoch stats if not provided (read path) - if sim == nil { - chainState := dbw.indexer.consensusPool.GetChainState() - epochStats := dbw.indexer.epochCache.getEpochStatsByEpochAndRoot(chainState.EpochOfSlot(block.Slot), block.Root) - if epochStats != nil { - sim = newStateSimulator(dbw.indexer, epochStats) - } - } - - // Compute pending partial withdrawal count from sim state - pendingPartialCount := dbw.countPendingPartialWithdrawals(block, sim) + // Simulate pending withdrawal state to get classifications + simResult := dbw.getWithdrawalSimResult(block, sim) blockEpoch := dbw.indexer.consensusPool.GetChainState().EpochOfSlot(block.Slot) - dbWithdrawals := make([]*dbtypes.Withdrawal, len(withdrawals)) - for idx, withdrawal := range withdrawals { - // Classify withdrawal type - withdrawalType := dbw.classifyWithdrawalType(idx, pendingPartialCount, withdrawal.ValidatorIndex, phase0.Gwei(withdrawal.Amount), blockEpoch) + dbWithdrawals := make([]*dbtypes.Withdrawal, len(executionWithdrawals)) + for idx, withdrawal := range executionWithdrawals { + // Classify withdrawal type and resolve reference slot + withdrawalType, refSlot := dbw.classifyWithdrawalType(idx, simResult, withdrawal.ValidatorIndex, phase0.Gwei(withdrawal.Amount), blockEpoch) dbWithdrawals[idx] = &dbtypes.Withdrawal{ BlockUid: block.BlockUID, @@ -812,75 +913,66 @@ func (dbw *dbWriter) buildDbWithdrawals(block *Block, orphaned bool, overrideFor ForkId: forkId, Validator: uint64(withdrawal.ValidatorIndex), Address: withdrawal.Address[:], + RefSlot: refSlot, Amount: uint64(withdrawal.Amount), } } // Resolve account IDs for withdrawal addresses - dbw.resolveWithdrawalAccounts(withdrawals, dbWithdrawals, tx) + dbw.resolveWithdrawalAccounts(executionWithdrawals, dbWithdrawals, tx) return dbWithdrawals } -// countPendingPartialWithdrawals computes how many pending partial withdrawals -// will be processed for the given block, following the consensus spec logic. -// This replays the sim state up to (but not including) the target block to get -// the pending withdrawals list, then counts how many would be processed. -func (dbw *dbWriter) countPendingPartialWithdrawals(block *Block, sim *stateSimulator) int { +// getWithdrawalSimResult gets the withdrawal classification from the state simulator. +func (dbw *dbWriter) getWithdrawalSimResult(block *Block, sim *stateSimulator) *withdrawalSimResult { if sim == nil { - return 0 - } - - chainSpec := dbw.indexer.consensusPool.GetChainState().GetSpecs() - if chainSpec.MaxPendingPartialsPerWithdrawalsSweep == 0 { - return 0 // Pre-Electra: no pending partial withdrawals - } - - // Replay state up to (but not including) target block - parentBlocks := sim.getParentBlocks(block) - state := sim.resetState(block) - if state == nil { - return 0 + // Reconstruct sim from epoch stats (read path) + chainState := dbw.indexer.consensusPool.GetChainState() + epochStats := dbw.indexer.epochCache.getEpochStatsByEpochAndRoot(chainState.EpochOfSlot(block.Slot), block.Root) + if epochStats != nil { + sim = newStateSimulator(dbw.indexer, epochStats) + } } - for _, parentBlock := range parentBlocks { - sim.applyBlock(parentBlock) + if sim == nil { + return &withdrawalSimResult{} } - // Now sim.prevState.pendingWithdrawals reflects state before target block - processed := 0 - for _, pw := range sim.prevState.pendingWithdrawals { - if pw.WithdrawableEpoch > sim.epochStats.epoch { - break - } - - validator := sim.getValidator(pw.ValidatorIndex) - if validator == nil { - break - } - - if validator.ExitEpoch != FarFutureEpoch || validator.EffectiveBalance < phase0.Gwei(chainSpec.MinActivationBalance) { - continue - } + return sim.replayWithdrawalState(block) +} - processed++ - if uint64(processed) >= chainSpec.MaxPendingPartialsPerWithdrawalsSweep { - break +// classifyWithdrawalType determines the withdrawal type and reference slot for a single withdrawal. +// Uses the sim result for builder payment and partial withdrawal counts/classifications. +// The spec order in the execution payload is: +// +// [0..BuilderPaymentCount-1] = builder payments (from sim) +// [BuilderPaymentCount..BuilderPaymentCount+PartialCount-1] = requested withdrawals (type 3) +// [remaining with builder flag] = builder full withdrawal (type 5) +// [remaining without builder flag, exited+withdrawable] = full withdrawal (type 1) +// [remaining without builder flag] = sweep withdrawal (type 2) +func (dbw *dbWriter) classifyWithdrawalType(idx int, simResult *withdrawalSimResult, validatorIndex phase0.ValidatorIndex, amount phase0.Gwei, blockEpoch phase0.Epoch) (uint8, *uint64) { + isBuilder := uint64(validatorIndex)&BuilderIndexFlag != 0 + + // First N withdrawals are builder payments — type and ref slot from sim + if idx < simResult.BuilderPaymentCount { + if idx < len(simResult.BuilderPayments) { + bp := simResult.BuilderPayments[idx] + return bp.Type, bp.RefSlot } + return dbtypes.WithdrawalTypeBuilderPayment, nil } - return processed -} + // Next M withdrawals are from pending partial withdrawals (EIP-7002 requested) + if idx < simResult.BuilderPaymentCount+simResult.PartialCount { + return dbtypes.WithdrawalTypeRequestedWithdrawal, nil + } -// classifyWithdrawalType determines the withdrawal type based on position, validator state, and amount. -func (dbw *dbWriter) classifyWithdrawalType(idx int, pendingPartialCount int, validatorIndex phase0.ValidatorIndex, amount phase0.Gwei, blockEpoch phase0.Epoch) uint8 { - // First N withdrawals are from pending partial withdrawals (EIP-7002 requested) - if idx < pendingPartialCount { - return dbtypes.WithdrawalTypeRequestedWithdrawal + // Remaining withdrawals with builder flag are builder sweep (full balance withdrawal) + if isBuilder { + return dbtypes.WithdrawalTypeBuilderFullWithdrawal, nil } // Check if this is a full withdrawal (validator exited and withdrawable, with significant amount) - // Only flag as full withdrawal when amount >= EJECTION_BALANCE - 0.5 ETH to avoid - // flagging small dust sweeps that happen after exit/consolidation. validator := dbw.indexer.GetValidatorByIndex(validatorIndex, nil) if validator != nil && validator.ExitEpoch != FarFutureEpoch && validator.WithdrawableEpoch <= blockEpoch { chainSpec := dbw.indexer.consensusPool.GetChainState().GetSpecs() @@ -889,12 +981,12 @@ func (dbw *dbWriter) classifyWithdrawalType(idx int, pendingPartialCount int, va fullWithdrawalThreshold = phase0.Gwei(chainSpec.EjectionBalance - 500000000) } if amount >= fullWithdrawalThreshold { - return dbtypes.WithdrawalTypeFullWithdrawal + return dbtypes.WithdrawalTypeFullWithdrawal, nil } } // Default: sweep withdrawal (excess balance or small dust after exit) - return dbtypes.WithdrawalTypeSweepWithdrawal + return dbtypes.WithdrawalTypeSweepWithdrawal, nil } // resolveWithdrawalAccounts looks up account IDs for withdrawal addresses. @@ -1055,14 +1147,29 @@ func (dbw *dbWriter) persistBlockConsolidationRequests(tx *sqlx.Tx, block *Block } func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.ConsolidationRequest { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + var blockNumber uint64 + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + blockNumber = payload.Message.Payload.BlockNumber + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() + blockNumber, _ = blockBody.ExecutionBlockNumber() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.ConsolidationRequest{} } if sim == nil { @@ -1084,8 +1191,6 @@ func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, o blockResults = sim.replayBlockResults(block) } - blockNumber, _ := blockBody.ExecutionBlockNumber() - dbConsolidations := make([]*dbtypes.ConsolidationRequest, len(consolidations)) for idx, consolidation := range consolidations { dbConsolidation := &dbtypes.ConsolidationRequest{ @@ -1136,14 +1241,29 @@ func (dbw *dbWriter) persistBlockWithdrawalRequests(tx *sqlx.Tx, block *Block, o } func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.WithdrawalRequest { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + var blockNumber uint64 + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + blockNumber = payload.Message.Payload.BlockNumber + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() + blockNumber, _ = blockBody.ExecutionBlockNumber() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.WithdrawalRequest{} } if sim == nil { @@ -1165,8 +1285,6 @@ func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, over blockResults = sim.replayBlockResults(block) } - blockNumber, _ := blockBody.ExecutionBlockNumber() - dbWithdrawalRequests := make([]*dbtypes.WithdrawalRequest, len(withdrawalRequests)) for idx, withdrawalRequest := range withdrawalRequests { dbWithdrawalRequest := &dbtypes.WithdrawalRequest{ diff --git a/indexer/execution/indexerctx.go b/indexer/execution/indexerctx.go index 7d7195e1b..b56fe0e3d 100644 --- a/indexer/execution/indexerctx.go +++ b/indexer/execution/indexerctx.go @@ -6,11 +6,11 @@ import ( "slices" "sort" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/clients/execution" "github.com/ethpandaops/dora/indexer/beacon" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/indexer/execution/system_contracts/consolidation_indexer.go b/indexer/execution/system_contracts/consolidation_indexer.go index e25229b67..f442e2bea 100644 --- a/indexer/execution/system_contracts/consolidation_indexer.go +++ b/indexer/execution/system_contracts/consolidation_indexer.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" diff --git a/indexer/execution/system_contracts/deposit_indexer.go b/indexer/execution/system_contracts/deposit_indexer.go index d16d94948..369bb8671 100644 --- a/indexer/execution/system_contracts/deposit_indexer.go +++ b/indexer/execution/system_contracts/deposit_indexer.go @@ -8,11 +8,11 @@ import ( "strings" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" blsu "github.com/protolambda/bls12-381-util" zrnt_common "github.com/protolambda/zrnt/eth2/beacon/common" diff --git a/indexer/execution/system_contracts/withdrawal_indexer.go b/indexer/execution/system_contracts/withdrawal_indexer.go index e2f22a351..48898e9b2 100644 --- a/indexer/execution/system_contracts/withdrawal_indexer.go +++ b/indexer/execution/system_contracts/withdrawal_indexer.go @@ -5,9 +5,9 @@ import ( "math/big" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" diff --git a/indexer/execution/txindexer/txindexer.go b/indexer/execution/txindexer/txindexer.go index e1b0a2952..b45afdecb 100644 --- a/indexer/execution/txindexer/txindexer.go +++ b/indexer/execution/txindexer/txindexer.go @@ -9,7 +9,7 @@ import ( "sync/atomic" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" diff --git a/indexer/mevrelay/mevindexer.go b/indexer/mevrelay/mevindexer.go index 059d0e180..a16de329d 100644 --- a/indexer/mevrelay/mevindexer.go +++ b/indexer/mevrelay/mevindexer.go @@ -13,8 +13,8 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" diff --git a/indexer/snooper/snooper_manager.go b/indexer/snooper/snooper_manager.go index 3004cf89d..f4aa2f638 100644 --- a/indexer/snooper/snooper_manager.go +++ b/indexer/snooper/snooper_manager.go @@ -7,12 +7,12 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/execution" "github.com/ethpandaops/dora/clients/execution/snooper" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) diff --git a/services/chainservice.go b/services/chainservice.go index 0c8c99e7b..37aca1a08 100644 --- a/services/chainservice.go +++ b/services/chainservice.go @@ -9,9 +9,9 @@ import ( "strings" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/ethpandaops/dora/blockdb" @@ -265,6 +265,13 @@ func (cs *ChainService) StartService() error { return fmt.Errorf("failed initializing s3 blockdb: %v", err) } cs.logger.Infof("S3 blockdb initialized at %v", utils.Config.BlockDb.S3.Bucket) + case "tiered": + err := blockdb.InitWithTiered(utils.Config.BlockDb.Tiered, cs.logger) + if err != nil { + return fmt.Errorf("failed initializing tiered blockdb: %v", err) + } + cs.logger.Infof("Tiered blockdb initialized (Pebble cache: %v, S3: %v)", + utils.Config.BlockDb.Tiered.Pebble.Path, utils.Config.BlockDb.Tiered.S3.Bucket) default: cs.logger.Infof("Blockdb disabled") } diff --git a/services/chainservice_blocks.go b/services/chainservice_blocks.go index e063ef81c..b006f7575 100644 --- a/services/chainservice_blocks.go +++ b/services/chainservice_blocks.go @@ -7,11 +7,13 @@ import ( "math" "strings" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/deneb" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/deneb" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" + btypes "github.com/ethpandaops/dora/blockdb/types" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" @@ -22,6 +24,7 @@ type CombinedBlockResponse struct { Root phase0.Root Header *phase0.SignedBeaconBlockHeader Block *spec.VersionedSignedBeaconBlock + Payload *gloas.SignedExecutionPayloadEnvelope Orphaned bool } @@ -103,6 +106,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockInfo.Root, Header: blockInfo.GetHeader(), Block: blockInfo.GetBlock(ctx), + Payload: blockInfo.GetExecutionPayload(ctx), Orphaned: !bs.beaconIndexer.IsCanonicalBlock(blockInfo, nil), } } @@ -115,6 +119,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockInfo.Root, Header: blockInfo.GetHeader(), Block: blockInfo.GetBlock(ctx), + Payload: blockInfo.GetExecutionPayload(ctx), Orphaned: true, } } @@ -127,18 +132,34 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot } var block *spec.VersionedSignedBeaconBlock + var payload *gloas.SignedExecutionPayloadEnvelope bodyRetry := 0 for ; bodyRetry < 3; bodyRetry++ { client := clients[bodyRetry%len(clients)] - block, err = beacon.LoadBeaconBlock(ctx, client, blockroot) - if block != nil { - break - } else if err != nil { - log := logrus.WithError(err) - if client != nil { - log = log.WithField("client", client.GetClient().GetName()) + if block == nil { + block, err = beacon.LoadBeaconBlock(ctx, client, blockroot) + if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block body for root 0x%x", blockroot) + } + } + + if block.Version >= spec.DataVersionGloas { + payload, err = beacon.LoadExecutionPayload(ctx, client, blockroot) + if payload != nil { + break + } else if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block payload for root 0x%x", blockroot) } - log.Warnf("Error loading block body for root 0x%x", blockroot) + } else if block != nil { + break } } if err == nil && block != nil { @@ -146,6 +167,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockroot, Header: header, Block: block, + Payload: payload, Orphaned: false, } } @@ -153,16 +175,24 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot // try loading from block db if result == nil && header != nil && blockdb.GlobalBlockDb != nil { - blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], func(version uint64, block []byte) (interface{}, error) { - return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) - }) - if err == nil && blockData != nil { - result = &CombinedBlockResponse{ + blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], + btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload, + func(version uint64, block []byte) (any, error) { + return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) + }, func(version uint64, payload []byte) (any, error) { + return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload) + }) + if err == nil && blockData != nil && blockData.Body != nil { + resp := &CombinedBlockResponse{ Root: blockroot, Header: header, Block: blockData.Body.(*spec.VersionedSignedBeaconBlock), Orphaned: false, } + if blockData.Payload != nil { + resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope) + } + result = resp } } @@ -232,6 +262,7 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl Root: cachedBlock.Root, Header: blockHeader, Block: blockBody, + Payload: cachedBlock.GetExecutionPayload(ctx), Orphaned: isOrphaned, } } @@ -248,25 +279,40 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl var err error var block *spec.VersionedSignedBeaconBlock + var payload *gloas.SignedExecutionPayloadEnvelope bodyRetry := 0 for ; bodyRetry < 3; bodyRetry++ { client := clients[bodyRetry%len(clients)] block, err = beacon.LoadBeaconBlock(ctx, client, blockRoot) - if block != nil { - break - } else if err != nil { + if err != nil { log := logrus.WithError(err) if client != nil { log = log.WithField("client", client.GetClient().GetName()) } log.Warnf("Error loading block body for slot %v", slot) } + + if block != nil && block.Version >= spec.DataVersionGloas { + payload, err = beacon.LoadExecutionPayload(ctx, client, blockRoot) + if payload != nil { + break + } else if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block payload for root 0x%x", blockRoot) + } + } else if block != nil { + break + } } if err == nil && block != nil { result = &CombinedBlockResponse{ Root: blockRoot, Header: header, Block: block, + Payload: payload, Orphaned: orphaned, } } @@ -274,22 +320,30 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl // try loading from block db if result == nil && header != nil && blockdb.GlobalBlockDb != nil { - blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], func(version uint64, block []byte) (interface{}, error) { - return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) - }) - if err == nil && blockData != nil { + blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], + btypes.BlockDataFlagHeader|btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload, + func(version uint64, block []byte) (any, error) { + return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) + }, func(version uint64, payload []byte) (any, error) { + return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload) + }) + if err == nil && blockData != nil && blockData.Body != nil { header := &phase0.SignedBeaconBlockHeader{} err = header.UnmarshalSSZ(blockData.HeaderData) if err != nil { return nil, err } - result = &CombinedBlockResponse{ + resp := &CombinedBlockResponse{ Root: blockRoot, Header: header, Block: blockData.Body.(*spec.VersionedSignedBeaconBlock), Orphaned: false, } + if blockData.Payload != nil { + resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope) + } + result = resp } } @@ -309,6 +363,49 @@ func (bs *ChainService) GetBlobSidecarsByBlockRoot(ctx context.Context, blockroo return client.GetClient().GetRPCClient().GetBlobSidecarsByBlockroot(ctx, blockroot) } +// getPayloadStatus computes the payload status for a given block. +func (bs *ChainService) getPayloadStatus(ctx context.Context, block *beacon.Block, canonicalHead *beacon.Block) dbtypes.PayloadStatus { + chainState := bs.consensusPool.GetChainState() + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + return dbtypes.PayloadStatusCanonical + } + + if !block.HasExecutionPayload() { + return dbtypes.PayloadStatusMissing + } + + blockIndex := block.GetBlockIndex(ctx) + if blockIndex == nil { + return dbtypes.PayloadStatusCanonical + } + + // Get child blocks and check if any canonical child builds on this payload + childBlocks := bs.beaconIndexer.GetBlockByParentRoot(block.Root) + + if len(childBlocks) == 0 { + // no children, so it's canonical for now + return dbtypes.PayloadStatusCanonical + } + + for _, child := range childBlocks { + childIndex := child.GetBlockIndex(ctx) + if childIndex == nil { + continue + } + // Check if child is in the canonical chain (use original head since + // children are at higher slots than the updated lastCanonicalBlock) + if !bs.beaconIndexer.IsCanonicalBlockByHead(child, canonicalHead) { + continue + } + // Check if child builds on this block's execution payload + if bytes.Equal(childIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + return dbtypes.PayloadStatusCanonical + } + } + + return dbtypes.PayloadStatusOrphaned +} + // GetDbBlocksForSlots retrieves blocks for a range of slots from cache & database. // The firstSlot parameter specifies the starting slot. // The slotLimit parameter limits the number of slots to retrieve. @@ -334,7 +431,10 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 proposerAssignmentsEpoch := phase0.Epoch(math.MaxInt64) getCanonicalProposer := func(slot phase0.Slot) phase0.ValidatorIndex { epoch := chainState.EpochOfSlot(slot) - if epoch != proposerAssignmentsEpoch { + if proposerAssignmentsEpoch != phase0.Epoch(math.MaxInt64) && epoch == proposerAssignmentsEpoch+1 && chainState.IsFuluEnabled(epoch) { + // extended proposer lookahead in fulu, use the same proposer assignments as the previous epoch + } else if epoch != proposerAssignmentsEpoch { + assignmentsEpoch := epoch if epochStats := bs.beaconIndexer.GetEpochStats(epoch, nil); epochStats != nil { if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil { proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{} @@ -343,8 +443,20 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 proposerAssignments[slot] = proposer } } + } else if epoch > 0 && chainState.IsFuluEnabled(epoch-1) { + if epochStats := bs.beaconIndexer.GetEpochStats(epoch-1, nil); epochStats != nil { + if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil { + assignmentsEpoch = epoch - 1 + proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{} + for slotIdx, proposer := range epochStatsValues.ProposerDuties { + slot := chainState.EpochToSlot(assignmentsEpoch) + phase0.Slot(slotIdx) + proposerAssignments[slot] = proposer + } + } + } + } - proposerAssignmentsEpoch = epoch + proposerAssignmentsEpoch = assignmentsEpoch } proposer, ok := proposerAssignments[slot] @@ -364,6 +476,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 blocks := bs.beaconIndexer.GetBlocksBySlot(slot) for _, block := range blocks { isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock) + payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock) if isCanonical { lastCanonicalBlock = block } @@ -372,6 +485,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 } dbBlock := block.GetDbBlock(bs.beaconIndexer, isCanonical) if dbBlock != nil { + dbBlock.PayloadStatus = payloadStatus resBlocks = append(resBlocks, dbBlock) } } @@ -434,6 +548,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 } isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock) + payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock) if isCanonical { lastCanonicalBlock = block } @@ -450,9 +565,10 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 blockRoots = append(blockRoots, block.Root[:]) blockRootsIdx = append(blockRootsIdx, len(resBlocks)) resBlocks = append(resBlocks, &dbtypes.Slot{ - Slot: uint64(slot), - Proposer: uint64(blockHeader.Message.ProposerIndex), - Status: blockStatus, + Slot: uint64(slot), + Proposer: uint64(blockHeader.Message.ProposerIndex), + Status: blockStatus, + PayloadStatus: payloadStatus, }) } @@ -495,6 +611,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 for idx, blockRoot := range blockRoots { if dbBlock, ok := blockMap[phase0.Root(blockRoot)]; ok { dbBlock.Status = resBlocks[blockRootsIdx[idx]].Status + dbBlock.PayloadStatus = resBlocks[blockRootsIdx[idx]].PayloadStatus resBlocks[blockRootsIdx[idx]] = dbBlock } } @@ -549,10 +666,11 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6 } type cachedDbBlock struct { - slot uint64 - proposer uint64 - orphaned bool - block *beacon.Block + slot uint64 + proposer uint64 + orphaned bool + payloadStatus dbtypes.PayloadStatus + block *beacon.Block } // GetDbBlocksByFilter retrieves a filtered range of blocks from cache & database. @@ -580,7 +698,10 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes proposerAssignmentsEpoch := phase0.Epoch(math.MaxInt64) getCanonicalProposer := func(slot phase0.Slot) phase0.ValidatorIndex { epoch := chainState.EpochOfSlot(slot) - if epoch != proposerAssignmentsEpoch { + if proposerAssignmentsEpoch != phase0.Epoch(math.MaxInt64) && epoch == proposerAssignmentsEpoch+1 && chainState.IsFuluEnabled(epoch) { + // extended proposer lookahead in fulu, use the same proposer assignments as the previous epoch + } else if epoch != proposerAssignmentsEpoch { + assignmentsEpoch := epoch if epochStats := bs.beaconIndexer.GetEpochStats(epoch, nil); epochStats != nil { if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil { proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{} @@ -589,8 +710,20 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes proposerAssignments[slot] = proposer } } + } else if epoch > 0 && chainState.IsFuluEnabled(epoch-1) { + if epochStats := bs.beaconIndexer.GetEpochStats(epoch-1, nil); epochStats != nil { + if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil { + assignmentsEpoch = epoch - 1 + proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{} + for slotIdx, proposer := range epochStatsValues.ProposerDuties { + slot := chainState.EpochToSlot(assignmentsEpoch) + phase0.Slot(slotIdx) + proposerAssignments[slot] = proposer + } + } + } + } - proposerAssignmentsEpoch = epoch + proposerAssignmentsEpoch = assignmentsEpoch } proposer, ok := proposerAssignments[slot] @@ -611,9 +744,14 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes filter.MaxSlot = &maxSlot } + if filter.WithPayloadMask == 0 { + filter.WithPayloadMask = dbtypes.PayloadStatusMaskAll + } + // get blocks from cache // iterate from current slot to finalized slot - lastCanonicalBlock := bs.beaconIndexer.GetCanonicalHead(nil) + canonicalHead := bs.beaconIndexer.GetCanonicalHead(nil) + lastCanonicalBlock := canonicalHead // apply epoch filter to slot range cacheStartSlot := startSlot @@ -687,6 +825,8 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes } isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock) + payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock) + if isCanonical { lastCanonicalBlock = block } @@ -707,6 +847,17 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes continue } + // filter by payload status + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing == 0 && payloadStatus == dbtypes.PayloadStatusMissing { + continue + } + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskCanonical == 0 && payloadStatus == dbtypes.PayloadStatusCanonical { + continue + } + if filter.WithPayloadMask&dbtypes.PayloadStatusMaskOrphaned == 0 && payloadStatus == dbtypes.PayloadStatusOrphaned { + continue + } + // filter by graffiti if filter.Graffiti != "" { blockGraffiti := string(blockIndex.Graffiti[:]) @@ -820,6 +971,28 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes } } + // filter by builder index + if filter.BuilderIndex != nil { + builderIndex := blockIndex.BuilderIndex + // Convert uint64 to int64 for comparison (-1 means self-built/MaxUint64) + var builderIndexInt64 int64 + if builderIndex == math.MaxUint64 { + builderIndexInt64 = -1 + } else { + builderIndexInt64 = int64(builderIndex) + } + if builderIndexInt64 != *filter.BuilderIndex { + continue + } + } + + // filter by EL block parent hash + if len(filter.EthBlockParentHash) > 0 { + if !bytes.Equal(blockIndex.ExecutionParentHash[:], filter.EthBlockParentHash) { + continue + } + } + // filter by gas used if filter.MinGasUsed != nil || filter.MaxGasUsed != nil { gasUsed := blockIndex.GasUsed @@ -854,16 +1027,22 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes } cachedMatches = append(cachedMatches, cachedDbBlock{ - slot: uint64(block.Slot), - proposer: uint64(blockHeader.Message.ProposerIndex), - orphaned: !isCanonical, - block: block, + slot: uint64(block.Slot), + proposer: uint64(blockHeader.Message.ProposerIndex), + orphaned: !isCanonical, + payloadStatus: payloadStatus, + block: block, }) } // reconstruct missing blocks from epoch duties // For slot/root filtering, we still need to check if we need missing blocks for that specific slot - shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 && filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil && filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 && filter.MinGasUsed == nil && filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil && filter.WithMevBlock == 0 + shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 && + filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil && + filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 && + filter.BuilderIndex == nil && filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing != 0 && len(filter.EthBlockParentHash) == 0 && filter.MinGasUsed == nil && + filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil && + filter.WithMevBlock == 0 && filter.ProposerIndex == nil && filter.ProposerName == "" // If filtering by slot, only check missing for that specific slot if filter.Slot != nil { @@ -975,6 +1154,7 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes if block.block != nil { if block.slot >= uint64(prunedSlot) { assignedBlock.Block = block.block.GetDbBlock(bs.beaconIndexer, !block.orphaned) + assignedBlock.Block.PayloadStatus = block.payloadStatus } else { blockRoots = append(blockRoots, block.block.Root[:]) blockRootsIdx = append(blockRootsIdx, resIdx) @@ -992,12 +1172,14 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes if blockMap != nil { for idx, blockRoot := range blockRoots { if dbBlock, ok := blockMap[phase0.Root(blockRoot)]; ok { + cachedMatch := cachedMatches[blockRootsCachedId[idx]] dbBlock.Status = dbtypes.Canonical - if cachedMatches[blockRootsCachedId[idx]].orphaned { + if cachedMatch.orphaned { dbBlock.Status = dbtypes.Orphaned } + dbBlock.PayloadStatus = cachedMatch.payloadStatus resBlocks[blockRootsIdx[idx]].Block = dbBlock } } diff --git a/services/chainservice_builder.go b/services/chainservice_builder.go new file mode 100644 index 000000000..d7c93f6af --- /dev/null +++ b/services/chainservice_builder.go @@ -0,0 +1,268 @@ +package services + +import ( + "bytes" + "context" + "slices" + "sort" + + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/indexer/beacon" +) + +// BuilderIndexFlag separates builder indices from validator indices +// A validator/builder index with this flag set is a builder index +const BuilderIndexFlag = beacon.BuilderIndexFlag + +type BuilderWithIndex struct { + Index gloas.BuilderIndex + Builder *gloas.Builder + Superseded bool +} + +// GetFilteredBuilderSet returns builders matching the filter criteria +func (bs *ChainService) GetFilteredBuilderSet(ctx context.Context, filter *dbtypes.BuilderFilter, withBalance bool) ([]BuilderWithIndex, uint64) { + var overrideForkId *beacon.ForkKey + + canonicalHead := bs.beaconIndexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil, 0 + } + + var balances []phase0.Gwei + if withBalance { + balances = bs.beaconIndexer.GetRecentBuilderBalances(overrideForkId) + } + currentEpoch := bs.consensusPool.GetChainState().CurrentEpoch() + + cachedResults := make([]BuilderWithIndex, 0, 1000) + cachedIndexes := map[uint64]bool{} + + // Get matching entries from cached builders + bs.beaconIndexer.StreamActiveBuilderDataForRoot(canonicalHead.Root, false, ¤tEpoch, func(index gloas.BuilderIndex, flags uint16, activeData *beacon.BuilderData, builder *gloas.Builder) error { + if builder == nil { + return nil + } + if filter.MinIndex != nil && uint64(index) < *filter.MinIndex { + return nil + } + if filter.MaxIndex != nil && uint64(index) > *filter.MaxIndex { + return nil + } + if len(filter.PubKey) > 0 { + pubkeylen := min(len(filter.PubKey), 48) + if !bytes.Equal(builder.PublicKey[:pubkeylen], filter.PubKey) { + return nil + } + } + if len(filter.ExecutionAddress) > 0 { + if !bytes.Equal(builder.ExecutionAddress[:], filter.ExecutionAddress) { + return nil + } + } + + if len(filter.Status) > 0 { + builderStatus := getBuilderStatus(builder, currentEpoch, false) + if !slices.Contains(filter.Status, builderStatus) { + return nil + } + } + + cachedResults = append(cachedResults, BuilderWithIndex{ + Index: index, + Builder: builder, + }) + cachedIndexes[uint64(index)] = true + + return nil + }) + + // Get matching entries from DB + dbIndexes, err := db.GetBuilderIndexesByFilter(ctx, *filter, uint64(currentEpoch)) + if err != nil { + bs.logger.Warnf("error getting builder indexes by filter: %v", err) + return nil, 0 + } + + // Sort results + var sortFn func(builderA, builderB BuilderWithIndex) bool + switch filter.OrderBy { + case dbtypes.BuilderOrderIndexAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Index < builderB.Index + } + case dbtypes.BuilderOrderIndexDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Index > builderB.Index + } + case dbtypes.BuilderOrderPubKeyAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) < 0 + } + case dbtypes.BuilderOrderPubKeyDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) > 0 + } + case dbtypes.BuilderOrderBalanceAsc: + if balances == nil { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.Balance < builderB.Builder.Balance + } + } else { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return balances[builderA.Index] < balances[builderB.Index] + } + sort.Slice(dbIndexes, func(i, j int) bool { + if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) { + return dbIndexes[i] < dbIndexes[j] + } + return balances[dbIndexes[i]] < balances[dbIndexes[j]] + }) + } + case dbtypes.BuilderOrderBalanceDesc: + if balances == nil { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.Balance > builderB.Builder.Balance + } + } else { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return balances[builderA.Index] > balances[builderB.Index] + } + sort.Slice(dbIndexes, func(i, j int) bool { + if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) { + return dbIndexes[i] > dbIndexes[j] + } + return balances[dbIndexes[i]] > balances[dbIndexes[j]] + }) + } + case dbtypes.BuilderOrderDepositEpochAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.DepositEpoch < builderB.Builder.DepositEpoch + } + case dbtypes.BuilderOrderDepositEpochDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.DepositEpoch > builderB.Builder.DepositEpoch + } + case dbtypes.BuilderOrderWithdrawableEpochAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.WithdrawableEpoch < builderB.Builder.WithdrawableEpoch + } + case dbtypes.BuilderOrderWithdrawableEpochDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.WithdrawableEpoch > builderB.Builder.WithdrawableEpoch + } + } + + sort.Slice(cachedResults, func(i, j int) bool { + return sortFn(cachedResults[i], cachedResults[j]) + }) + + // Stream builder set from db and merge cached results + resCap := filter.Limit + if resCap == 0 { + resCap = uint64(len(cachedResults) + len(dbIndexes)) + } + result := make([]BuilderWithIndex, 0, resCap) + cachedIndex := 0 + matchingCount := uint64(0) + resultCount := uint64(0) + dbEntryCount := uint64(0) + + db.StreamBuildersByIndexes(ctx, dbIndexes, func(dbBuilder *dbtypes.Builder) bool { + dbEntryCount++ + builderWithIndex := BuilderWithIndex{ + Index: gloas.BuilderIndex(dbBuilder.BuilderIndex), + Builder: beacon.UnwrapDbBuilder(dbBuilder), + Superseded: dbBuilder.Superseded, + } + + for cachedIndex < len(cachedResults) && (cachedResults[cachedIndex].Index == builderWithIndex.Index || sortFn(cachedResults[cachedIndex], builderWithIndex)) { + if matchingCount >= filter.Offset { + resultBuilder := cachedResults[cachedIndex] + if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) { + resultBuilder.Builder.Balance = balances[resultBuilder.Index] + } + result = append(result, resultBuilder) + resultCount++ + } + matchingCount++ + cachedIndex++ + + if filter.Limit > 0 && resultCount >= filter.Limit { + return false // stop streaming + } + } + + if cachedIndexes[dbBuilder.BuilderIndex] { + return true // skip this index, cache entry is newer + } + + if matchingCount >= filter.Offset { + if !builderWithIndex.Superseded && balances != nil && dbBuilder.BuilderIndex < uint64(len(balances)) { + builderWithIndex.Builder.Balance = balances[dbBuilder.BuilderIndex] + } + result = append(result, builderWithIndex) + resultCount++ + } + matchingCount++ + + if filter.Limit > 0 && resultCount >= filter.Limit { + return false // stop streaming + } + + return true // get more from db + }) + + for cachedIndex < len(cachedResults) && (filter.Limit == 0 || resultCount < filter.Limit) { + if matchingCount >= filter.Offset { + resultBuilder := cachedResults[cachedIndex] + if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) { + resultBuilder.Builder.Balance = balances[resultBuilder.Index] + } + result = append(result, resultBuilder) + resultCount++ + } + matchingCount++ + cachedIndex++ + } + + // Add remaining cached results + matchingCount += uint64(len(cachedResults) - cachedIndex) + + // Add remaining db results + remainingDbCount := uint64(0) + for i := dbEntryCount; i < uint64(len(dbIndexes)); i++ { + if cachedIndexes[dbIndexes[i]] { + continue + } + remainingDbCount++ + } + matchingCount += remainingDbCount + + return result, matchingCount +} + +// GetBuilderByIndex returns the builder by index +func (bs *ChainService) GetBuilderByIndex(index gloas.BuilderIndex) *gloas.Builder { + return bs.beaconIndexer.GetBuilderByIndex(index, nil) +} + +// GetBuilderBalances returns the current builder balances (epoch-start adjusted for in-epoch withdrawals). +func (bs *ChainService) GetBuilderBalances() []phase0.Gwei { + return bs.beaconIndexer.GetRecentBuilderBalances(nil) +} + +// getBuilderStatus determines the status of a builder +func getBuilderStatus(builder *gloas.Builder, currentEpoch phase0.Epoch, superseded bool) dbtypes.BuilderStatus { + if superseded { + return dbtypes.BuilderStatusSupersededFilter + } + if builder.WithdrawableEpoch <= currentEpoch { + return dbtypes.BuilderStatusExitedFilter + } + return dbtypes.BuilderStatusActiveFilter +} diff --git a/services/chainservice_consolidations.go b/services/chainservice_consolidations.go index f4114d27a..7f06f80be 100644 --- a/services/chainservice_consolidations.go +++ b/services/chainservice_consolidations.go @@ -5,12 +5,12 @@ import ( "context" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/prysmaticlabs/prysm/v5/container/slice" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" diff --git a/services/chainservice_deposits.go b/services/chainservice_deposits.go index 5491a2530..e951189a6 100644 --- a/services/chainservice_deposits.go +++ b/services/chainservice_deposits.go @@ -8,11 +8,11 @@ import ( "strings" "sync" - "github.com/attestantio/go-eth2-client/spec/electra" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" + "github.com/ethpandaops/go-eth2-client/spec/electra" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/prysmaticlabs/prysm/v5/container/slice" "github.com/sirupsen/logrus" ) @@ -306,7 +306,8 @@ func (bs *ChainService) GetDepositOperationsByFilter(ctx context.Context, filter if len(txFilter.WithdrawalAddress) > 0 { wdcreds := depositWithTx.WithdrawalCredentials - if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 { + // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit + if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 && wdcreds[0] != 0x03 { continue } @@ -527,11 +528,11 @@ func (bs *ChainService) GetIndexedDepositQueue(ctx context.Context, headBlock *b indexedQueue.QueueEstimation = queueEpoch if lastNormalDeposit != nil && !bytes.Equal(lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) { - // something is bad, return empty queue - logrus.Warnf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue, %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) - return &IndexedDepositQueue{ - Queue: []*IndexedDepositQueueEntry{}, - } + // Mismatch between queue and included deposits - this can happen if there are + // builder deposits (0x03) that skip the queue. Log warning but still return + // the queue to show useful information. The deposit indexes might not be perfectly + // matched but the queue itself is still valid. + logrus.Debugf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue (possibly due to builder deposits), %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) } return indexedQueue @@ -599,7 +600,17 @@ func (bs *ChainService) getLastIncludedDeposit(ctx context.Context, headRoot pha } if len(deposits) > 0 { - lastDeposits = deposits + // Filter out builder deposits (0x03) as they skip the queue + filteredDeposits := make([]*dbtypes.Deposit, 0, len(deposits)) + for _, deposit := range deposits { + if len(deposit.WithdrawalCredentials) > 0 && deposit.WithdrawalCredentials[0] == 0x03 { + continue // Skip builder deposits + } + filteredDeposits = append(filteredDeposits, deposit) + } + if len(filteredDeposits) > 0 { + lastDeposits = filteredDeposits + } } } } diff --git a/services/chainservice_epochs.go b/services/chainservice_epochs.go index 1d99b3c60..be21c224a 100644 --- a/services/chainservice_epochs.go +++ b/services/chainservice_epochs.go @@ -3,9 +3,9 @@ package services import ( "context" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) func (bs *ChainService) GetDbEpochs(ctx context.Context, firstEpoch uint64, limit uint32) []*dbtypes.Epoch { diff --git a/services/chainservice_objects.go b/services/chainservice_objects.go index e29e7042e..c32724328 100644 --- a/services/chainservice_objects.go +++ b/services/chainservice_objects.go @@ -5,7 +5,7 @@ import ( "slices" "strings" - "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/db" diff --git a/services/chainservice_validators.go b/services/chainservice_validators.go index b88338025..012cad3f9 100644 --- a/services/chainservice_validators.go +++ b/services/chainservice_validators.go @@ -7,11 +7,11 @@ import ( "sort" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" ) func (bs *ChainService) GetValidatorByIndex(index phase0.ValidatorIndex, withBalance bool) *v1.Validator { diff --git a/services/chainservice_withdrawals.go b/services/chainservice_withdrawals.go index 10d05aa9c..15a7645da 100644 --- a/services/chainservice_withdrawals.go +++ b/services/chainservice_withdrawals.go @@ -6,8 +6,8 @@ import ( "slices" "strings" - v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/attestantio/go-eth2-client/spec/phase0" + v1 "github.com/ethpandaops/go-eth2-client/api/v1" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/db" @@ -476,7 +476,10 @@ func (bs *ChainService) GetWithdrawalsByFilter(ctx context.Context, filter *dbty withdrawals := block.GetDbWithdrawals(bs.beaconIndexer, isCanonical) for idx, withdrawal := range withdrawals { - if filter.Validator != nil && withdrawal.Validator != *filter.Validator { + if filter.MinIndex > 0 && withdrawal.Validator < filter.MinIndex { + continue + } + if filter.MaxIndex > 0 && withdrawal.Validator > filter.MaxIndex { continue } if filter.AccountID != nil { diff --git a/services/dasguardian.go b/services/dasguardian.go index 40feb624b..c6ae75170 100644 --- a/services/dasguardian.go +++ b/services/dasguardian.go @@ -7,11 +7,11 @@ import ( "reflect" "time" - "github.com/attestantio/go-eth2-client/spec" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common/hexutil" - dasguardian "github.com/probe-lab/eth-das-guardian" - "github.com/probe-lab/eth-das-guardian/api" + dasguardian "github.com/ethpandaops/eth-das-guardian" + "github.com/ethpandaops/eth-das-guardian/api" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" ) @@ -131,10 +131,13 @@ func (d *dasGuardianAPI) Init(ctx context.Context) error { } func (d *dasGuardianAPI) GetStateVersion() string { - fuluForkEpoch := d.GetFuluForkEpoch() currentEpoch := GlobalBeaconService.GetChainState().CurrentEpoch() - if currentEpoch >= phase0.Epoch(fuluForkEpoch) { + if currentEpoch >= phase0.Epoch(d.GetGloasForkEpoch()) { + return "gloas" + } + + if currentEpoch >= phase0.Epoch(d.GetFuluForkEpoch()) { return "fulu" } @@ -174,6 +177,19 @@ func (d *dasGuardianAPI) GetFuluForkEpoch() uint64 { return *specs.FuluForkEpoch } +func (d *dasGuardianAPI) GetGloasForkEpoch() uint64 { + specs := GlobalBeaconService.GetChainState().GetSpecs() + if specs == nil { + return 0 + } + + if specs.GloasForkEpoch == nil { + return math.MaxInt64 + } + + return *specs.GloasForkEpoch +} + func (d *dasGuardianAPI) GetNodeIdentity(ctx context.Context) (*api.NodeIdentity, error) { // Get the first available consensus client consensusClients := GlobalBeaconService.GetConsensusClients() diff --git a/services/validatornames.go b/services/validatornames.go index c32403050..53e4fab29 100644 --- a/services/validatornames.go +++ b/services/validatornames.go @@ -13,7 +13,6 @@ import ( "sync" "time" - "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/config" @@ -21,6 +20,7 @@ import ( "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" "github.com/ethpandaops/dora/utils" + "github.com/ethpandaops/go-eth2-client/spec/phase0" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" "golang.org/x/exp/maps" diff --git a/statecache/statecache.go b/statecache/statecache.go new file mode 100644 index 000000000..b553f205f --- /dev/null +++ b/statecache/statecache.go @@ -0,0 +1,275 @@ +// Package statecache provides an optional file-system-backed cache for beacon states. +// States are stored as compressed SSZ files keyed by (dependentRoot, targetEpoch). +// The cache limits the number of stored states and re-initializes from the +// filesystem on restart (no in-memory index — just scans the directory). +package statecache + +import ( + "compress/gzip" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/ethpandaops/dora/types" + "github.com/ethpandaops/go-eth2-client/spec" + "github.com/ethpandaops/go-eth2-client/spec/fulu" + "github.com/ethpandaops/go-eth2-client/spec/gloas" + "github.com/ethpandaops/go-eth2-client/spec/phase0" + + dynssz "github.com/pk910/dynamic-ssz" +) + +// StateCache manages cached beacon states on the local filesystem. +// It is safe for concurrent use. +type StateCache struct { + mu sync.Mutex + path string + maxStates uint + dynSsz *dynssz.DynSsz +} + +// New creates a new StateCache. Returns nil if the config disables caching. +// The directory is created if it doesn't exist. +func New(cfg *types.Config, dynSsz *dynssz.DynSsz) *StateCache { + scCfg := cfg.Indexer.StateCache + if !scCfg.Enabled || scCfg.Path == "" { + return nil + } + + maxStates := scCfg.MaxStates + if maxStates == 0 { + maxStates = 5 + } + + if err := os.MkdirAll(scCfg.Path, 0o750); err != nil { + return nil + } + + return &StateCache{ + path: scCfg.Path, + maxStates: maxStates, + dynSsz: dynSsz, + } +} + +// stateKey identifies a cached state by dependent root and target epoch. +type stateKey struct { + DependentRoot phase0.Root + TargetEpoch phase0.Epoch +} + +// filename returns the cache filename for a state key. +// Format: _.ssz.gz +func (k stateKey) filename() string { + return fmt.Sprintf("%d_%s.ssz.gz", k.TargetEpoch, hex.EncodeToString(k.DependentRoot[:])) +} + +// Check returns true if a cached state exists for the given key. +func (sc *StateCache) Check(dependentRoot phase0.Root, targetEpoch phase0.Epoch) bool { + if sc == nil { + return false + } + + key := stateKey{DependentRoot: dependentRoot, TargetEpoch: targetEpoch} + path := filepath.Join(sc.path, key.filename()) + _, err := os.Stat(path) + return err == nil +} + +// Load reads a cached state from disk. Returns nil if not found. +func (sc *StateCache) Load(dependentRoot phase0.Root, targetEpoch phase0.Epoch) *spec.VersionedBeaconState { + if sc == nil { + return nil + } + + key := stateKey{DependentRoot: dependentRoot, TargetEpoch: targetEpoch} + path := filepath.Join(sc.path, key.filename()) + + f, err := os.Open(path) + if err != nil { + return nil + } + defer f.Close() + + gz, err := gzip.NewReader(f) + if err != nil { + return nil + } + defer gz.Close() + + sszData, err := io.ReadAll(gz) + if err != nil { + return nil + } + + // Read version marker (first byte) + if len(sszData) < 1 { + return nil + } + version := spec.DataVersion(sszData[0]) + sszData = sszData[1:] + + state, err := unmarshalState(sc.dynSsz, version, sszData) + if err != nil { + return nil + } + + return state +} + +// Store writes a state to disk and enforces the max states limit. +func (sc *StateCache) Store(dependentRoot phase0.Root, targetEpoch phase0.Epoch, state *spec.VersionedBeaconState) error { + if sc == nil { + return nil + } + + sc.mu.Lock() + defer sc.mu.Unlock() + + sszData, err := marshalState(sc.dynSsz, state) + if err != nil { + return fmt.Errorf("failed to marshal state: %w", err) + } + + // Prepend version marker + versioned := make([]byte, 1+len(sszData)) + versioned[0] = byte(state.Version) + copy(versioned[1:], sszData) + + key := stateKey{DependentRoot: dependentRoot, TargetEpoch: targetEpoch} + path := filepath.Join(sc.path, key.filename()) + + if err := os.MkdirAll(sc.path, 0o750); err != nil { + return fmt.Errorf("failed to ensure cache directory: %w", err) + } + + f, err := os.CreateTemp(sc.path, "state-*.tmp") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tmpPath := f.Name() + + gz := gzip.NewWriter(f) + if _, err := gz.Write(versioned); err != nil { + gz.Close() + f.Close() + os.Remove(tmpPath) + return fmt.Errorf("failed to write compressed data: %w", err) + } + gz.Close() + f.Close() + + if err := os.Rename(tmpPath, path); err != nil { + os.Remove(tmpPath) + return fmt.Errorf("failed to rename temp file: %w", err) + } + + // Enforce max states limit + sc.evict() + + return nil +} + +// evict removes the oldest cached states to stay within the max limit. +// Must be called with sc.mu held. +func (sc *StateCache) evict() { + entries, err := os.ReadDir(sc.path) + if err != nil { + return + } + + type cachedEntry struct { + name string + modTime int64 + } + + var cached []cachedEntry + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".ssz.gz") { + continue + } + info, err := entry.Info() + if err != nil { + continue + } + cached = append(cached, cachedEntry{name: entry.Name(), modTime: info.ModTime().UnixNano()}) + } + + if uint(len(cached)) <= sc.maxStates { + return + } + + // Sort by modification time ascending (oldest first) + sort.Slice(cached, func(i, j int) bool { + return cached[i].modTime < cached[j].modTime + }) + + // Remove oldest until within limit + toRemove := uint(len(cached)) - sc.maxStates + for i := uint(0); i < toRemove; i++ { + os.Remove(filepath.Join(sc.path, cached[i].name)) + } +} + +// marshalState serializes a VersionedBeaconState to SSZ bytes. +func marshalState(dynSsz *dynssz.DynSsz, state *spec.VersionedBeaconState) ([]byte, error) { + switch state.Version { + case spec.DataVersionFulu: + if state.Fulu == nil { + return nil, fmt.Errorf("nil fulu state") + } + if dynSsz != nil { + return dynSsz.MarshalSSZ(state.Fulu) + } + return state.Fulu.MarshalSSZ() + case spec.DataVersionGloas: + if state.Gloas == nil { + return nil, fmt.Errorf("nil gloas state") + } + if dynSsz != nil { + return dynSsz.MarshalSSZ(state.Gloas) + } + return state.Gloas.MarshalSSZ() + default: + return nil, fmt.Errorf("unsupported state version: %v", state.Version) + } +} + +// unmarshalState deserializes SSZ bytes into a VersionedBeaconState. +func unmarshalState(dynSsz *dynssz.DynSsz, version spec.DataVersion, data []byte) (*spec.VersionedBeaconState, error) { + state := &spec.VersionedBeaconState{Version: version} + + switch version { + case spec.DataVersionFulu: + state.Fulu = new(fulu.BeaconState) + var err error + if dynSsz != nil { + err = dynSsz.UnmarshalSSZ(state.Fulu, data) + } else { + err = state.Fulu.UnmarshalSSZ(data) + } + if err != nil { + return nil, fmt.Errorf("failed to unmarshal fulu state: %w", err) + } + case spec.DataVersionGloas: + state.Gloas = new(gloas.BeaconState) + var err error + if dynSsz != nil { + err = dynSsz.UnmarshalSSZ(state.Gloas, data) + } else { + err = state.Gloas.UnmarshalSSZ(data) + } + if err != nil { + return nil, fmt.Errorf("failed to unmarshal gloas state: %w", err) + } + default: + return nil, fmt.Errorf("unsupported state version: %v", version) + } + + return state, nil +} diff --git a/static/css/layout.css b/static/css/layout.css index e0df61b79..665ee04d7 100644 --- a/static/css/layout.css +++ b/static/css/layout.css @@ -329,6 +329,26 @@ span.validator-label { padding: 1px .25rem; } +.badge.split-warning { + background: linear-gradient( + 90deg, + rgba(255,255,255,0) 0%, + rgba(255,255,255,0) 50%, + rgba(255,193,7,1) 50%, + rgba(255,193,7,1) 100% + ); +} + +.badge.split-info { + background: linear-gradient( + 90deg, + rgba(255,255,255,0) 0%, + rgba(255,255,255,0) 50%, + rgba(13,202,240,1) 50%, + rgba(13,202,240,1) 100% + ); +} + .text-monospace { font-family: var(--bs-font-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace) !important; } diff --git a/templates/_shared/txDetailsModal.html b/templates/_shared/txDetailsModal.html new file mode 100644 index 000000000..a283f9928 --- /dev/null +++ b/templates/_shared/txDetailsModal.html @@ -0,0 +1,167 @@ +{{ define "txDetailsModal" }} + +
+
+
+
+ Tx Hash: +
+
+
+ +
+ +
+
+
+
+
+
+ Block: +
+
+
+ +
+ +
+
+
+
+
+
+ Block Time: +
+
+
+ +
+ +
+
+
+
+
+
+ TX Origin: +
+
+
+ +
+ +
+
+
+
+
+
+ TX Target: +
+
+
+ +
+ +
+
+
+
+
+
+ +{{ end }} +{{ define "txDetailsModal-js" }} + +{{ end }} +{{ define "txDetailsModal-css" }} + +{{ end }} diff --git a/templates/address/system_deposits.html b/templates/address/system_deposits.html index 09d31379c..225cd3a8e 100644 --- a/templates/address/system_deposits.html +++ b/templates/address/system_deposits.html @@ -39,12 +39,28 @@ Requested + {{ else if eq .Type 4 }} + + Builder Withdrawal + + {{ else if eq .Type 5 }} + + Builder Payment + + {{ else if eq .Type 6 }} + + Builder Delayed + {{ else }} Unknown {{ end }} - {{ formatValidatorWithIndex .ValidatorIndex .ValidatorName }} + {{ if .IsBuilder }} + {{ formatBuilderWithIndex .ValidatorIndex .ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex .ValidatorIndex .ValidatorName }} + {{ end }} {{ formatEthFromGweiP .Amount 6 }} diff --git a/templates/blocks/blocks.html b/templates/blocks/blocks.html index e1e413b5d..0c3001e1f 100644 --- a/templates/blocks/blocks.html +++ b/templates/blocks/blocks.html @@ -48,6 +48,7 @@

Blocks

+ @@ -88,6 +89,7 @@

Blocks

{{ if .DisplayTxCount }}Txs / Blobs{{ end }} {{ if .DisplayGasUsage }}Gas Usage{{ end }} {{ if .DisplaySyncAgg }}Sync Agg %{{ end }} + {{ if .DisplayBuilder }}Builder{{ end }} {{ if .DisplayMevBlock }}MEV Block{{ end }} {{ if .DisplayGasLimit }}Gas Limit{{ end }} {{ if .DisplayBlockSize }}Block Size{{ end }} @@ -162,6 +164,11 @@

Blocks

{{ end }}{{ end }} {{ if $g.DisplaySyncAgg }}{{ if not (eq $slot.Status 0) }}{{ formatPercentageAlert $slot.SyncParticipation 2 95 80 }}{{ end }}{{ end }} + {{ if $g.DisplayBuilder }}{{ if not (eq $slot.Status 0) }} + {{ if $slot.HasBuilder }} + {{ formatBuilder $slot.BuilderIndex $slot.BuilderName }} + {{ end }} + {{ end }}{{ end }} {{ if $g.DisplayMevBlock }}{{ if not (eq $slot.Status 0) }} {{ if $slot.IsMevBlock }} Yes @@ -190,6 +197,7 @@

Blocks

{{ if $g.DisplayTxCount }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayGasUsage }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplaySyncAgg }}{{ $colCount = add $colCount 1 }}{{ end }} + {{ if $g.DisplayBuilder }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayMevBlock }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayGasLimit }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayBlockSize }}{{ $colCount = add $colCount 1 }}{{ end }} diff --git a/templates/builder/builder.html b/templates/builder/builder.html new file mode 100644 index 000000000..dd6e44365 --- /dev/null +++ b/templates/builder/builder.html @@ -0,0 +1,289 @@ +{{ define "page" }} +
+
+

Builder {{ formatBuilderWithIndex .Index .Name }}

+ +
+ +
+
+ +
+
+ Status: +
+
+ +
+
+
+
Deposited
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
Pending
+
+ + +
+
+
+
+
+
+
+
+
+
+
+
+
Active
+
+ + + +
+
+
+
+
+
+
+
+
+ {{ if .ShowWithdrawable }} + + {{ end }} +
+
+
+
+
Exited
+
+ +
+
+
+
+
+
+
+
+
+
+
+ +
+
Index:
+
+ {{ formatBuilderWithIndex .Index .Name }} + +
+
+
+
Public Key:
+
+ 0x{{ printf "%x" .PublicKey }} + +
+
+
+
Execution Address:
+
+ {{ ethAddressLink .ExecutionAddress }} +
+
+
+
Status:
+
+ {{ if eq .State "Pending" }} + Pending + {{ else if eq .State "Active" }} + Active + {{ else if eq .State "Exited" }} + Exited + {{ else if eq .State "Superseded" }} + Superseded + {{ else }} + {{ .State }} + {{ end }} +
+
+
+
Balance:
+
+ {{ formatEthFromGwei .Balance }} + +
+
+
+
Version:
+
+ {{ .Version }} +
+
+ {{ if .ShowDeposit }} +
+
Deposit Epoch:
+
+ {{ formatAddCommas .DepositEpoch }} + ({{ formatRecentTimeShort .DepositTs }}) +
+
+ {{ end }} + {{ if .ShowWithdrawable }} +
+
Withdrawable Epoch:
+
+ {{ formatAddCommas .WithdrawableEpoch }} + ({{ formatRecentTimeShort .WithdrawableTs }}) +
+
+ {{ end }} + {{ if .ExitReason }} +
+
Exit Reason:
+
+ {{ if .ExitReasonVoluntaryExit }} + Builder submitted a voluntary exit request in slot {{ .ExitReasonSlot }} + {{ else if .ExitReasonWithdrawal }} + Builder submitted a full withdrawal request in slot {{ .ExitReasonSlot }} + {{ if .ExitReasonTxDetails }} +
+ + Transaction: {{ ethTransactionLink .ExitReasonTxHash 0 }} + + + {{ end }} + {{ else }} + {{ .ExitReason }} + {{ end }} +
+
+ {{ end }} + +
+
+ + {{ template "txDetailsModal" . }} + + + +
+
+ {{ if eq .TabView "blocks" }} + {{ template "recentBlocks" . }} + {{ end }} +
+
+ {{ if eq .TabView "bids" }} + {{ template "recentBids" . }} + {{ end }} +
+
+ {{ if eq .TabView "deposits" }} + {{ template "recentDeposits" . }} + {{ end }} +
+
+ {{ if eq .TabView "withdrawals" }} + {{ template "builderWithdrawals" . }} + {{ end }} +
+
+ +
+{{ end }} +{{ define "lazyPage" }} + {{ if eq .TabView "blocks" }} + {{ template "recentBlocks" . }} + {{ else if eq .TabView "bids" }} + {{ template "recentBids" . }} + {{ else if eq .TabView "deposits" }} + {{ template "recentDeposits" . }} + {{ else if eq .TabView "withdrawals" }} + {{ template "builderWithdrawals" . }} + {{ else }} + Unknown tab + {{ end }} +{{ end }} +{{ define "js" }} + +{{ template "txDetailsModal-js" . }} +{{ end }} +{{ define "css" }} + +{{ template "txDetailsModal-css" . }} +{{ end }} diff --git a/templates/builder/notfound.html b/templates/builder/notfound.html new file mode 100644 index 000000000..b92472273 --- /dev/null +++ b/templates/builder/notfound.html @@ -0,0 +1,27 @@ +{{ define "js" }} +{{ end }} + +{{ define "css" }} +{{ end }} + +{{ define "page" }} +
+
+
+

Builder not found

+ +
+
+
+
+
Sorry but we could not find the builder you are looking for. The builder may not exist or has not been indexed yet.
+
+
+
+{{ end }} diff --git a/templates/builder/recentBids.html b/templates/builder/recentBids.html new file mode 100644 index 000000000..d3cbddb7b --- /dev/null +++ b/templates/builder/recentBids.html @@ -0,0 +1,57 @@ +{{ define "recentBids" }} +
+
+
+ + + + + + + + + + + + + + {{ if gt (len .RecentBids) 0 }} + {{ range $i, $bid := .RecentBids }} + + + + + + + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
SlotTimeBlock HashGas LimitValueEL PaymentStatus
{{ formatAddCommas $bid.Slot }}{{ formatRecentTimeShort $bid.Ts }} + + 0x{{ printf "%x" $bid.BlockHash }} + + + {{ formatAddCommas $bid.GasLimit }}{{ formatEthFromGwei $bid.Value }}{{ formatEthFromGwei $bid.ElPayment }} + {{ if $bid.IsWinning }} + Won + {{ else }} + - + {{ end }} +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builder/recentBlocks.html b/templates/builder/recentBlocks.html new file mode 100644 index 000000000..128dde3f7 --- /dev/null +++ b/templates/builder/recentBlocks.html @@ -0,0 +1,65 @@ +{{ define "recentBlocks" }} +
+
+
+ + + + + + + + + + + + + {{ if gt (len .RecentBlocks) 0 }} + {{ range $i, $block := .RecentBlocks }} + + + + + + + + + {{ end }} + {{ if .HasMoreBlocks }} + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
EpochSlotBlock HashStatusTimeValue
{{ formatAddCommas $block.Epoch }}{{ formatAddCommas $block.Slot }} + + 0x{{ printf "%x" $block.BlockHash }} + + + {{ if eq $block.Status 0 }} + Missing + {{ else if eq $block.Status 1 }} + Canonical + {{ else if eq $block.Status 2 }} + Orphaned + {{ else }} + Unknown + {{ end }} + {{ formatRecentTimeShort $block.Ts }}{{ formatEthFromGwei $block.Value }}
+ View more blocks +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builder/recentDeposits.html b/templates/builder/recentDeposits.html new file mode 100644 index 000000000..2dbfd4d6a --- /dev/null +++ b/templates/builder/recentDeposits.html @@ -0,0 +1,85 @@ +{{ define "recentDeposits" }} +
+
+
+ + + + + + + + + + + + + + {{ if gt (len .RecentDeposits) 0 }} + {{ range $i, $deposit := .RecentDeposits }} + + + {{ if $deposit.Orphaned }} + + {{ else }} + + {{ end }} + + + + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
TypeSlotTimeAmountDepositorTransactionStatus
+ {{ if eq $deposit.Type "exit" }} + Voluntary Exit + {{ else if eq $deposit.Type "deposit" }} + Deposit + {{ else }} + {{ $deposit.Type }} + {{ end }} + {{ formatAddCommas $deposit.SlotNumber }}{{ formatAddCommas $deposit.SlotNumber }}{{ formatRecentTimeShort $deposit.Time }}{{ formatFullEthFromGwei $deposit.Amount }} + {{ if $deposit.DepositorAddress }} + {{ ethAddressLink $deposit.DepositorAddress }} + + {{ else }} + ? + {{ end }} + + {{- if $deposit.HasTransaction }} +
+ {{ ethTransactionLink $deposit.TransactionHash 0 }} +
+ +
+
+ +
+
+ {{- else }} + ? + {{- end }} +
+ {{ if $deposit.Orphaned }} + Orphaned + {{ else }} + Included + {{ end }} +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builder/withdrawals.html b/templates/builder/withdrawals.html new file mode 100644 index 000000000..ea280d856 --- /dev/null +++ b/templates/builder/withdrawals.html @@ -0,0 +1,99 @@ +{{ define "builderWithdrawals" }} +
+
+
+ + + + + + + + + + + + + {{ if gt .WithdrawalCount 0 }} + {{ range $i, $withdrawal := .Withdrawals }} + + {{ if $withdrawal.Orphaned }} + + {{ else }} + + {{ end }} + + + + + + + {{ end }} + + + + {{ else }} + + + + {{ end }} + +
SlotTimeTypeRefAmountStatus
{{ formatAddCommas $withdrawal.SlotNumber }}{{ formatAddCommas $withdrawal.SlotNumber }}{{ formatRecentTimeShort $withdrawal.Time }} + {{ if eq $withdrawal.Type 1 }} + + Full + + {{ else if eq $withdrawal.Type 2 }} + + Sweep + + {{ else if eq $withdrawal.Type 3 }} + + Requested + + {{ else if eq $withdrawal.Type 4 }} + + Builder Withdrawal + + {{ else if eq $withdrawal.Type 5 }} + + Builder Payment + + {{ else if eq $withdrawal.Type 6 }} + + Builder Delayed + + {{ else }} + Unknown + {{ end }} + + {{ if gt $withdrawal.RefSlot 0 }} + {{ if $withdrawal.RefSlotRoot }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ else }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ end }} + {{ else }} + - + {{ end }} + + {{ formatEthFromGweiP $withdrawal.Amount 6 }} + + {{ if $withdrawal.Orphaned }} + Orphaned + {{ else }} + Canonical + {{ end }} +
+ {{ if gt .AdditionalWithdrawalCount 0 }} + View {{ formatAddCommas .AdditionalWithdrawalCount }} more withdrawals + {{ end }} +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builders/builders.html b/templates/builders/builders.html new file mode 100644 index 000000000..25869e1a2 --- /dev/null +++ b/templates/builders/builders.html @@ -0,0 +1,303 @@ +{{ define "page" }} +
+
+

Builders Overview

+ +
+ +
+
+ + {{ if not .IsDefaultSorting }}{{ end }} +
+
+ Builder Filters +
+
+
+
+
+
+
+ PubKey +
+
+ +
+
+
+
+ Index +
+
+ +
+
+
+
+ Execution Address +
+
+ +
+
+
+
+
+
+
+
+ Status +
+
+ +
+
+ +
+
+ +
+
+
+ +
+
+
+ +
+
+
+
+
+
+ +
+
+
+ + + + + + + + + + + + + {{ if gt .BuilderCount 0 }} + + {{ range $i, $builder := .Builders }} + + + + + + + + + + {{ end }} + + {{ else }} + + + + + + + + {{ end }} +
+ Index +
+ + +
+
+ Public Key +
+ + +
+
Execution Address + Balance +
+ + +
+
State + Deposit +
+ + +
+
+ Withdrawable +
+ + +
+
{{ formatAddCommas $builder.Index }}0x{{ printf "%x" $builder.PublicKey }}{{ ethAddressLink .ExecutionAddress }}{{ formatEthFromGwei $builder.Balance }}{{ $builder.State }} + {{- if $builder.ShowDeposit -}} + {{ formatRecentTimeShort $builder.DepositTs }} + (Epoch {{ formatAddCommas $builder.DepositEpoch }}) + {{- else -}} + - + {{- end -}} + + {{- if $builder.ShowWithdrawable -}} + {{ formatRecentTimeShort $builder.WithdrawableTs }} + (Epoch {{ formatAddCommas $builder.WithdrawableEpoch }}) + {{- else -}} + - + {{- end -}} +
+
+ {{ template "professor_svg" }} +
+
+
+ {{ if gt .TotalPages 1 }} +
+
+
+
Showing builder {{ .FirstBuilder }} to {{ .LastBuilder }}
+
+
+
+
+
+ + {{ range $key, $value := .UrlParams }} + {{ if ne $key "p" }} + + {{ end }} + {{ end }} + {{ if not .IsDefaultSorting }} + + {{ end }} +
+ + +
+
+
+
+ +
+
+
+ {{ end }} +
+ +
+
+{{ end }} +{{ define "js" }} + + +{{ end }} +{{ define "css" }} + + +{{ end }} diff --git a/templates/deposits/deposits.html b/templates/deposits/deposits.html index 6945e6b2f..c95b3ba8b 100644 --- a/templates/deposits/deposits.html +++ b/templates/deposits/deposits.html @@ -162,7 +162,11 @@
This table displays the deposits received by the Beac
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} @@ -187,18 +191,18 @@
This table displays the deposits received by the Beac {{ end }} {{ if $deposit.IsQueued }} - Queued {{ end }} {{ if $deposit.InvalidSignature }} - @@ -295,7 +299,11 @@
This table displays the deposits made for validators
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} @@ -410,7 +418,11 @@
This table displays deposits waiting to be activated
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/el_withdrawals/el_withdrawals.html b/templates/el_withdrawals/el_withdrawals.html index e9e60bb61..cc777631e 100644 --- a/templates/el_withdrawals/el_withdrawals.html +++ b/templates/el_withdrawals/el_withdrawals.html @@ -51,26 +51,36 @@

- Validator Index + Entity Type +
+
+ +
+
+
+
+ {{ if eq .FilterEntity "builder" }}Builder Index{{ else }}Validator Index{{ end }}
- -
-
- - +
+
-
- +
- Validator Name + {{ if eq .FilterEntity "builder" }}Builder Name{{ else }}Validator Name{{ end }}
- +
@@ -187,7 +197,11 @@

{{- if $request.ValidatorValid }} - {{ formatValidatorWithIndex $request.ValidatorIndex $request.ValidatorName }} + {{- if $request.IsBuilder }} + {{ formatBuilderWithIndex $request.ValidatorIndex $request.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $request.ValidatorIndex $request.ValidatorName }} + {{- end }} {{- else }}
0x{{ printf "%x" $request.PublicKey }} @@ -454,6 +468,25 @@

} }); + // Entity type toggle + function updateEntityFields(entity) { + var isAll = (entity === 'all'); + $('.entity-field').each(function() { + $(this).prop('readonly', isAll).toggleClass('entity-field-disabled', isAll); + if (isAll) { $(this).val(''); } + }); + if (!isAll) { + $('.entity-label').each(function() { $(this).text($(this).data(entity)); }); + $('.entity-placeholder').each(function() { $(this).attr('placeholder', $(this).data(entity)); }); + } + } + $('.entity-select').on('change', function() { updateEntityFields($(this).val()); }); + $('.entity-field').on('mousedown', function() { + if ($(this).prop('readonly')) { + $('.entity-select').val('validator').trigger('change'); + } + }); + }); {{ end }} @@ -466,6 +499,12 @@

padding-right: 10px; } +.entity-field-disabled { + background-color: var(--bs-secondary-bg); + opacity: 0.65; + cursor: pointer; +} + .tx-details-label { min-width: 90px; } diff --git a/templates/epoch/epoch.html b/templates/epoch/epoch.html index f047ee436..8f6a8b271 100644 --- a/templates/epoch/epoch.html +++ b/templates/epoch/epoch.html @@ -177,15 +177,15 @@

{{ else if $slot.Scheduled }} Scheduled {{ else if eq $slot.Status 1 }} - Proposed + Proposed {{ else if eq $slot.Status 2 }} - Orphaned + Orphaned {{ else if not $epoch.Synchronized }} ? {{ else if eq $slot.Status 0 }} Missed {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/exits/exits.html b/templates/exits/exits.html index f7a9dfab5..90578d2a9 100644 --- a/templates/exits/exits.html +++ b/templates/exits/exits.html @@ -151,7 +151,11 @@

This table displays the most recent voluntary exit re {{ formatRecentTimeShort $exit.Time }} - {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ if $exit.IsBuilder }} + {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ end }}
@@ -339,7 +343,11 @@
This table displays the most recent execution layer t {{- if $exitReq.ValidatorValid }} - {{ formatValidatorWithIndex $exitReq.ValidatorIndex $exitReq.ValidatorName }} + {{ if $exitReq.IsBuilder }} + {{ formatBuilderWithIndex $exitReq.ValidatorIndex $exitReq.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $exitReq.ValidatorIndex $exitReq.ValidatorName }} + {{ end }} {{- else }}
0x{{ printf "%x" $exitReq.PublicKey }} diff --git a/templates/included_deposits/included_deposits.html b/templates/included_deposits/included_deposits.html index 85f8d5b59..9b25aaf09 100644 --- a/templates/included_deposits/included_deposits.html +++ b/templates/included_deposits/included_deposits.html @@ -194,7 +194,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/index/recentBlocks.html b/templates/index/recentBlocks.html index 47f381bc1..172084371 100644 --- a/templates/index/recentBlocks.html +++ b/templates/index/recentBlocks.html @@ -41,9 +41,9 @@
Genesis Missed - Proposed - Missed (Orphaned) - Unknown + Proposed + Missed (Orphaned) + Unknown @@ -74,11 +74,11 @@
Missed {{ else if eq .Status 1 }} - Proposed + Proposed {{ else if eq .Status 2 }} - Missed (Orphaned) + Missed (Orphaned) {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $block.Ts }} diff --git a/templates/index/recentSlots.html b/templates/index/recentSlots.html index 8b74d6755..12b8066ff 100644 --- a/templates/index/recentSlots.html +++ b/templates/index/recentSlots.html @@ -42,9 +42,9 @@
Genesis Missed - Proposed - Missed (Orphaned) - Unknown + Proposed + Missed (Orphaned) + Unknown @@ -97,11 +97,11 @@
Missed {{ else if eq .Status 1 }} - Proposed + Proposed {{ else if eq .Status 2 }} - Orphaned + Missed (Orphaned) {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/initiated_deposits/initiated_deposits.html b/templates/initiated_deposits/initiated_deposits.html index 75950ac09..8a9b577b9 100644 --- a/templates/initiated_deposits/initiated_deposits.html +++ b/templates/initiated_deposits/initiated_deposits.html @@ -166,7 +166,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/queued_deposits/queued_deposits.html b/templates/queued_deposits/queued_deposits.html index 424bf6370..da75a769c 100644 --- a/templates/queued_deposits/queued_deposits.html +++ b/templates/queued_deposits/queued_deposits.html @@ -135,7 +135,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/slot/attestations.html b/templates/slot/attestations.html index 17ad6eaae..09068b912 100644 --- a/templates/slot/attestations.html +++ b/templates/slot/attestations.html @@ -12,9 +12,25 @@
Slot:
-
+
+
+ {{ html "" }} + Payload Status: + {{ html "" }} +
+
+ {{ html "" }} + FULL + {{ html "" }} + {{ html "" }} + EMPTY + {{ html "" }} + {{ html "" }} + UNKNOWN + {{ html "" }} +
@@ -184,6 +200,7 @@ self.signature = base64ToBytes(data.signature); self.validators = data.validators; self.included_validators = data.included_validators; + self.payload_status = data.payload_status !== undefined ? data.payload_status : null; self.showDetails = ko.observable(false); diff --git a/templates/slot/bids.html b/templates/slot/bids.html new file mode 100644 index 000000000..37f6184f8 --- /dev/null +++ b/templates/slot/bids.html @@ -0,0 +1,42 @@ +{{ define "block_bids" }} +
+ + + + + + + + + + + + + + {{ range $i, $bid := .Block.Bids }} + + + + + + + + + + {{ end }} + +
BuilderBlock HashFee RecipientGas LimitValueEL PaymentTotal
+ {{ if $bid.IsSelfBuilt }} + Self-built + {{ else }} + {{ formatValidatorWithIndex $bid.BuilderIndex $bid.BuilderName }} + {{ end }} + {{ if $bid.IsWinning }}Winner{{ end }} + +
+ 0x{{ printf "%x" $bid.BlockHash }} + +
+
{{ ethAddressLink $bid.FeeRecipient }}{{ formatAddCommas $bid.GasLimit }}{{ formatEthFromGwei $bid.Value }}{{ formatEthFromGwei $bid.ElPayment }}{{ formatEthFromGwei $bid.TotalValue }}
+
+{{ end }} diff --git a/templates/slot/consolidation_requests.html b/templates/slot/consolidation_requests.html index 61a1e6140..4f84782d7 100644 --- a/templates/slot/consolidation_requests.html +++ b/templates/slot/consolidation_requests.html @@ -28,7 +28,11 @@ {{- if $consolidationreq.SourceFound }} - {{ formatValidatorWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }} + {{- if $consolidationreq.SourceIsBuilder }} + {{ formatBuilderWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }} + {{- else }} + {{ formatValidatorWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }} + {{- end }} {{- else }} ? {{- end }} @@ -41,7 +45,11 @@ {{- if $consolidationreq.TargetFound }} - {{ formatValidatorWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }} + {{- if $consolidationreq.TargetIsBuilder }} + {{ formatBuilderWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }} + {{- else }} + {{ formatValidatorWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }} + {{- end }} {{- else }} ? {{- end }} diff --git a/templates/slot/deposit_requests.html b/templates/slot/deposit_requests.html index bbbc6a8bd..dda82c410 100644 --- a/templates/slot/deposit_requests.html +++ b/templates/slot/deposit_requests.html @@ -16,13 +16,21 @@ {{ $deposit.Index }} + {{- if $deposit.IsBuilder }} + + {{- else }} + {{- end }} 0x{{ printf "%x" $deposit.PublicKey }} {{- if $deposit.Exists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{- if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{- end }} {{- else }} ? {{- end }} diff --git a/templates/slot/overview.html b/templates/slot/overview.html index 40c031a5a..eb5505275 100644 --- a/templates/slot/overview.html +++ b/templates/slot/overview.html @@ -204,15 +204,27 @@
{{ end }} - {{ if .Block.ExecutionData }} + + {{ if .Block.PayloadHeader }} {{ $block := .Block }} - {{ with .Block.ExecutionData }} + {{ with .Block.PayloadHeader }}
-
Execution Payload:
+
Payload Header:
+
-
Block Number:
-
{{ ethBlockLink .BlockNumber }}
+
Payload Status:
+
+ {{ if eq .PayloadStatus 0 }} + Missing + {{ else if eq .PayloadStatus 1 }} + Revealed + {{ else if eq .PayloadStatus 2 }} + Orphaned + {{ else }} + Unknown + {{ end }} +
@@ -226,11 +238,85 @@
Parent Hash:
- 0x{{ printf "%x" .ParentHash }} - + {{ ethBlockHashLink .ParentBlockHash }} + +
+
+ +
+
Builder:
+
+ {{ formatBuilderWithIndex .BuilderIndex .BuilderName }}
+
+
Block Value:
+
+ {{ formatEthFromGwei .Value }} +
+
+ +
+
Gas Limit:
+
+ {{ .GasLimit }} +
+
+ +
+
Blob KZG Commitments:
+
+ {{ len .BlobKZGCommitments }} blob{{ if ne (len .BlobKZGCommitments) 1 }}s{{ end }} + {{ if .BlobKZGCommitments }} + +
+ {{ range $i, $c := .BlobKZGCommitments }} +
+ {{ $i }}: + 0x{{ printf "%x" $c }} + +
+ {{ end }} +
+ {{ end }} +
+
+
+
+ {{ end }} + {{ end }} + {{ if .Block.ExecutionData }} + {{ $block := .Block }} + {{ with .Block.ExecutionData }} +
+
Execution Payload:
+
+
+
Block Number:
+
{{ ethBlockLink .BlockNumber }}
+
+ + {{ if not $block.PayloadHeader }} +
+
Block Hash:
+
+ {{ ethBlockHashLink .BlockHash }} + +
+
+ +
+
Parent Hash:
+
+ 0x{{ printf "%x" .ParentHash }} + +
+
+ {{ end }} + {{ if .StateRoot }}
State Root:
@@ -281,10 +367,12 @@
-
-
Gas Limit:
-
{{ formatAddCommas .GasLimit }}
-
+ {{ if not $block.PayloadHeader }} +
+
Gas Limit:
+
{{ formatAddCommas .GasLimit }}
+
+ {{ end }}
Base fee per gas:
diff --git a/templates/slot/ptc_votes.html b/templates/slot/ptc_votes.html new file mode 100644 index 000000000..094ca327e --- /dev/null +++ b/templates/slot/ptc_votes.html @@ -0,0 +1,108 @@ +{{ define "block_ptc_votes" }} +
+ {{ if .Block.PtcVotes }} +
+ + PTC (Payload Timeliness Committee) votes included in this block are for + slot {{ .Block.PtcVotes.VotedSlot }} (the previous slot). + {{ if .Block.PtcVotes.VotedBlockRoot }} +
Voted block root: 0x{{ printf "%x" .Block.PtcVotes.VotedBlockRoot }} + {{ end }} +
+ +
+
+
+
+
{{ formatParticipation .Block.PtcVotes.Participation }}
+ Participation +
+
+
+
+
+
+
{{ len .Block.PtcVotes.Aggregates }}
+ Aggregates +
+
+
+
+
+
+
{{ .Block.PtcVotes.TotalPtcSize }}
+ Committee Size +
+
+
+
+ +
Vote Aggregates
+ {{ range $i, $agg := .Block.PtcVotes.Aggregates }} +
+
+
+ + {{ if and $agg.PayloadPresent $agg.BlobDataAvailable }} + Payload + Blob Available + {{ else if $agg.PayloadPresent }} + Payload Only + {{ else if $agg.BlobDataAvailable }} + Blob Only + {{ else }} + Unavailable + {{ end }} + + {{ $agg.VoteCount }} vote{{ if ne $agg.VoteCount 1 }}s{{ end }} + ({{ formatFloat $agg.VotePercent 1 }}%) +
+
+ + PayloadPresent: {{ if $agg.PayloadPresent }}Yes{{ else }}No{{ end }} + + + BlobDataAvailable: {{ if $agg.BlobDataAvailable }}Yes{{ else }}No{{ end }} + +
+
+ {{ if gt (len $agg.Validators) 0 }} +
+ +
+ {{ range $j, $v := $agg.Validators }} + {{ formatValidatorWithIndex $v.Index $v.Name }} + {{ end }} +
+
+ {{ end }} +
+ {{ end }} + + {{ if gt .Block.PtcVotes.NonVoterCount 0 }} +
+
+ Absent + {{ .Block.PtcVotes.NonVoterCount }} validator{{ if ne .Block.PtcVotes.NonVoterCount 1 }}s{{ end }} + ({{ formatFloat .Block.PtcVotes.NonVoterPercent 1 }}%) +
+ {{ if gt (len .Block.PtcVotes.NonVoters) 0 }} +
+ +
+ {{ range $j, $v := .Block.PtcVotes.NonVoters }} + {{ formatValidatorWithIndex $v.Index $v.Name }} + {{ end }} +
+
+ {{ end }} +
+ {{ end }} + {{ else }} +

No PTC vote data available.

+ {{ end }} +
+{{ end }} diff --git a/templates/slot/slot.html b/templates/slot/slot.html index 74fe5c764..fc9bf9b7e 100644 --- a/templates/slot/slot.html +++ b/templates/slot/slot.html @@ -43,9 +43,19 @@

Transactions {{ .Block.TransactionsCount }} {{ end }} + {{ if gt .Block.BidsCount 0 }} + + {{ end }} + {{ if gt .Block.PtcVotesCount 0 }} + + {{ end }} {{ if gt .Block.DepositsCount 0 }}

{{ end }} + {{ if gt .Block.BidsCount 0 }} +
+
+
+
+

Showing {{ .Block.BidsCount }} Execution Payload Bids

+
+
+ {{ template "block_bids" . }} +
+
+ {{ end }} + {{ if gt .Block.PtcVotesCount 0 }} +
+
+
+
+

Showing {{ .Block.PtcVotesCount }} PTC Votes (for slot {{ .Block.PtcVotes.VotedSlot }})

+
+
+ {{ template "block_ptc_votes" . }} +
+
+ {{ end }} {{ if .Block }}
diff --git a/templates/slot/voluntary_exits.html b/templates/slot/voluntary_exits.html index 0ffbc0333..187999d64 100644 --- a/templates/slot/voluntary_exits.html +++ b/templates/slot/voluntary_exits.html @@ -13,7 +13,13 @@ {{ range $i, $exit := .Block.VoluntaryExits }} {{ $i }} - {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + + {{ if $exit.IsBuilder }} + {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ end }} + {{ $exit.Epoch }} 0x{{ printf "%x" $exit.Signature }} diff --git a/templates/slot/withdrawal_requests.html b/templates/slot/withdrawal_requests.html index 1e842771a..c73d111ef 100644 --- a/templates/slot/withdrawal_requests.html +++ b/templates/slot/withdrawal_requests.html @@ -20,7 +20,11 @@ {{- if $withdrawalreq.Exists }} - {{ formatValidatorWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }} + {{- if $withdrawalreq.IsBuilder }} + {{ formatBuilderWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }} + {{- end }} {{- else }} ? {{- end }} diff --git a/templates/slot/withdrawals.html b/templates/slot/withdrawals.html index c2bf3e611..fa1d45f20 100644 --- a/templates/slot/withdrawals.html +++ b/templates/slot/withdrawals.html @@ -5,6 +5,8 @@ Index Validator Index + Type + Ref Recipient Address Amount @@ -13,7 +15,41 @@ {{ range $i, $withdrawal := .Block.Withdrawals }} {{ $withdrawal.Index }} - {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + + {{ if $withdrawal.IsBuilder }} + {{ formatBuilderWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{ end }} + + + {{ if eq $withdrawal.Type 1 }} + Full + {{ else if eq $withdrawal.Type 2 }} + Sweep + {{ else if eq $withdrawal.Type 3 }} + Requested + {{ else if eq $withdrawal.Type 4 }} + Builder Withdrawal + {{ else if eq $withdrawal.Type 5 }} + Builder Payment + {{ else if eq $withdrawal.Type 6 }} + Builder Delayed + {{ else }} + - + {{ end }} + + + {{ if gt $withdrawal.RefSlot 0 }} + {{ if $withdrawal.RefSlotRoot }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ else }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ end }} + {{ else }} + - + {{ end }} + {{ ethAddressLink $withdrawal.Address }} {{ formatEthFromGweiP $withdrawal.Amount 6 }} diff --git a/templates/slots/slots.html b/templates/slots/slots.html index a90bafe82..bb609d390 100644 --- a/templates/slots/slots.html +++ b/templates/slots/slots.html @@ -48,6 +48,7 @@

Slots

+ @@ -88,6 +89,7 @@

Slots

{{ if .DisplayTxCount }}Txs / Blobs{{ end }} {{ if .DisplayGasUsage }}Gas Usage{{ end }} {{ if .DisplaySyncAgg }}Sync Agg %{{ end }} + {{ if .DisplayBuilder }}Builder{{ end }} {{ if .DisplayMevBlock }}MEV Block{{ end }} {{ if .DisplayGasLimit }}Gas Limit{{ end }} {{ if .DisplayBlockSize }}Block Size{{ end }} @@ -132,9 +134,9 @@

Slots

{{ if eq $slot.Slot 0 }} Genesis {{ else if eq $slot.Status 1 }} - Proposed + Proposed {{ else if eq $slot.Status 2 }} - Orphaned + Missed (Orphaned) {{ else if $slot.Scheduled }} Scheduled {{ else if not $slot.Synchronized }} @@ -142,7 +144,7 @@

Slots

{{ else if eq $slot.Status 0 }} Missed {{ else }} - Unknown + Unknown {{ end }} {{ end }} @@ -162,6 +164,11 @@

Slots

{{ end }}{{ end }} {{ if $g.DisplaySyncAgg }}{{ if not (eq $slot.Status 0) }}{{ formatPercentageAlert $slot.SyncParticipation 2 95 80 }}{{ end }}{{ end }} + {{ if $g.DisplayBuilder }}{{ if not (eq $slot.Status 0) }} + {{ if $slot.HasBuilder }} + {{ formatBuilder $slot.BuilderIndex $slot.BuilderName }} + {{ end }} + {{ end }}{{ end }} {{ if $g.DisplayMevBlock }}{{ if not (eq $slot.Status 0) }} {{ if $slot.IsMevBlock }} Yes @@ -190,6 +197,7 @@

Slots

{{ if $g.DisplayTxCount }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayGasUsage }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplaySyncAgg }}{{ $colCount = add $colCount 1 }}{{ end }} + {{ if $g.DisplayBuilder }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayMevBlock }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayGasLimit }}{{ $colCount = add $colCount 1 }}{{ end }} {{ if $g.DisplayBlockSize }}{{ $colCount = add $colCount 1 }}{{ end }} diff --git a/templates/slots_filtered/slots_filtered.html b/templates/slots_filtered/slots_filtered.html index cd4af7a48..640e8431e 100644 --- a/templates/slots_filtered/slots_filtered.html +++ b/templates/slots_filtered/slots_filtered.html @@ -91,31 +91,41 @@

Filtered Slots

+
+
+ Builder +
+
+ +
+
- Missing Blocks + Block Status
-
- +
- Orphaned Blocks + Payload Status
-
- +
@@ -234,6 +244,7 @@

Filtered Slots

+ @@ -281,6 +292,7 @@

Filtered Slots

{{ if .DisplayTxCount }}Txs / Blobs{{ end }} {{ if .DisplayGasUsage }}Gas Usage{{ end }} {{ if .DisplaySyncAgg }}Sync Agg %{{ end }} + {{ if .DisplayBuilder }}Builder{{ end }} {{ if .DisplayMevBlock }}MEV Block{{ end }} {{ if .DisplayGasLimit }}Gas Limit{{ end }} {{ if .DisplayBlockSize }}Block Size{{ end }} @@ -310,9 +322,9 @@

Filtered Slots

{{- if eq $slot.Slot 0 }} Genesis {{- else if eq $slot.Status 1 }} - Proposed + Proposed {{- else if eq $slot.Status 2 }} - Orphaned + Missed (Orphaned) {{- else if $slot.Scheduled }} Scheduled {{- else if not $slot.Synchronized }} @@ -320,7 +332,7 @@

Filtered Slots

{{- else if eq $slot.Status 0 }} Missed {{- else }} - Unknown + Unknown {{- end }} {{- end }} @@ -355,6 +367,13 @@

Filtered Slots

{{- if $g.DisplaySyncAgg }} {{ if not (eq $slot.Status 0) }}{{ formatPercentageAlert $slot.SyncParticipation 2 95 80 }}{{ end }} {{- end }} + {{- if $g.DisplayBuilder }} + {{ if not (eq $slot.Status 0) }} + {{ if $slot.HasBuilder }} + {{ formatBuilder $slot.BuilderIndex $slot.BuilderName }} + {{ end }} + {{ end }} + {{- end }} {{- if $g.DisplayMevBlock }} {{ if not (eq $slot.Status 0) }} {{ if $slot.IsMevBlock }} @@ -463,17 +482,25 @@

Filtered Slots

-{{ template "txDetails-js" . }} +{{ template "txDetailsModal-js" . }} {{ end }} {{ define "css" }} -{{ template "txDetails-css" . }} +{{ template "txDetailsModal-css" . }} {{ end }} \ No newline at end of file diff --git a/templates/validator/withdrawals.html b/templates/validator/withdrawals.html index a8398bdd2..6aa279045 100644 --- a/templates/validator/withdrawals.html +++ b/templates/validator/withdrawals.html @@ -8,6 +8,7 @@ Slot Time Type + Ref Address Amount Status @@ -36,10 +37,33 @@ Requested + {{ else if eq $withdrawal.Type 4 }} + + Builder Withdrawal + + {{ else if eq $withdrawal.Type 5 }} + + Builder Payment + + {{ else if eq $withdrawal.Type 6 }} + + Builder Delayed + {{ else }} Unknown {{ end }} + + {{ if gt $withdrawal.RefSlot 0 }} + {{ if $withdrawal.RefSlotRoot }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ else }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ end }} + {{ else }} + - + {{ end }} + {{ if $withdrawal.Address }} @@ -62,7 +86,7 @@ {{ end }} - + {{ if gt .AdditionalWithdrawalCount 0 }} View {{ formatAddCommas .AdditionalWithdrawalCount }} more withdrawals {{ end }} @@ -70,7 +94,7 @@ {{ else }} - +
{{ template "timeline_svg" }}
diff --git a/templates/validator_slots/slots.html b/templates/validator_slots/slots.html index 214945920..ec6932e5b 100644 --- a/templates/validator_slots/slots.html +++ b/templates/validator_slots/slots.html @@ -71,16 +71,16 @@

Validator {{ format {{ if eq $slot.Slot 0 }} Genesis + {{ else if eq $slot.Status 1 }} + Proposed + {{ else if eq $slot.Status 2 }} + Missed (Orphaned) {{ else if $slot.Scheduled }} Scheduled {{ else if eq $slot.Status 0 }} Missed - {{ else if eq $slot.Status 1 }} - Proposed - {{ else if eq $slot.Status 2 }} - Orphaned {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/voluntary_exits/voluntary_exits.html b/templates/voluntary_exits/voluntary_exits.html index dd6364dc7..e159d187e 100644 --- a/templates/voluntary_exits/voluntary_exits.html +++ b/templates/voluntary_exits/voluntary_exits.html @@ -43,26 +43,36 @@

- Validator Index + Entity Type +
+
+ +
+
+
+
+ {{ if eq .FilterEntity "builder" }}Builder Index{{ else }}Validator Index{{ end }}
- -
-
- - +
+
-
- +
- Validator Name + {{ if eq .FilterEntity "builder" }}Builder Name{{ else }}Validator Name{{ end }}
- +
@@ -139,7 +149,13 @@

{{ formatAddCommas $voluntaryExit.SlotNumber }} {{ end }} {{ formatRecentTimeShort $voluntaryExit.Time }} - {{ formatValidatorWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + + {{ if $voluntaryExit.IsBuilder }} + {{ formatBuilderWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + {{ end }} +
@@ -271,6 +287,24 @@

$('#pageJumpForm').submit(); } }); + + function updateEntityFields(entity) { + var isAll = (entity === 'all'); + $('.entity-field').each(function() { + $(this).prop('readonly', isAll).toggleClass('entity-field-disabled', isAll); + if (isAll) { $(this).val(''); } + }); + if (!isAll) { + $('.entity-label').each(function() { $(this).text($(this).data(entity)); }); + $('.entity-placeholder').each(function() { $(this).attr('placeholder', $(this).data(entity)); }); + } + } + $('.entity-select').on('change', function() { updateEntityFields($(this).val()); }); + $('.entity-field').on('mousedown', function() { + if ($(this).prop('readonly')) { + $('.entity-select').val('validator').trigger('change'); + } + }); }); {{ end }} @@ -283,5 +317,11 @@

padding-right: 10px; } +.entity-field-disabled { + background-color: var(--bs-secondary-bg); + opacity: 0.65; + cursor: pointer; +} + {{ end }} \ No newline at end of file diff --git a/templates/withdrawals/withdrawals.html b/templates/withdrawals/withdrawals.html index 6b2634414..4f13bb0d1 100644 --- a/templates/withdrawals/withdrawals.html +++ b/templates/withdrawals/withdrawals.html @@ -170,7 +170,11 @@

This table displays the most recent withdrawal reques {{- if $withdrawal.ValidatorValid }} - {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- if $withdrawal.IsBuilder }} + {{ formatBuilderWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- end }} {{- else }}
0x{{ printf "%x" $withdrawal.PublicKey }} @@ -339,6 +343,7 @@
This table displays the most recent beacon chain with Block Time Type + Ref Validator Address Amount @@ -381,13 +386,40 @@
This table displays the most recent beacon chain with Requested + {{ else if eq $withdrawal.Type 4 }} + + Builder Withdrawal + + {{ else if eq $withdrawal.Type 5 }} + + Builder Payment + + {{ else if eq $withdrawal.Type 6 }} + + Builder Delayed + {{ else }} Unknown {{ end }} + + {{ if gt $withdrawal.RefSlot 0 }} + {{ if $withdrawal.RefSlotRoot }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ else }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ end }} + {{ else }} + - + {{ end }} + {{ if $withdrawal.HasValidator }} - {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- if $withdrawal.IsBuilder }} + {{ formatBuilderWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- end }} {{ else }} - {{ end }} diff --git a/templates/withdrawals_list/withdrawals_list.html b/templates/withdrawals_list/withdrawals_list.html index cc36aa105..8d5fc9ca5 100644 --- a/templates/withdrawals_list/withdrawals_list.html +++ b/templates/withdrawals_list/withdrawals_list.html @@ -27,10 +27,36 @@

- Validator + Entity Type
- + +
+
+
+
+ {{ if eq .FilterEntity "builder" }}Builder Index{{ else }}Validator Index{{ end }} +
+
+
+ +
+
-
+
+ +
+
+
+
+
+ {{ if eq .FilterEntity "builder" }}Builder Name{{ else }}Validator Name{{ end }} +
+
+
@@ -71,6 +97,9 @@

+ + +

@@ -129,6 +158,7 @@

Block Time Type + Ref Validator Address Amount @@ -171,13 +201,40 @@

Requested + {{ else if eq $withdrawal.Type 4 }} + + Builder Withdrawal + + {{ else if eq $withdrawal.Type 5 }} + + Builder Payment + + {{ else if eq $withdrawal.Type 6 }} + + Builder Delayed + {{ else }} Unknown {{ end }} + + {{ if gt $withdrawal.RefSlot 0 }} + {{ if $withdrawal.RefSlotRoot }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ else }} + {{ formatAddCommas $withdrawal.RefSlot }} + {{ end }} + {{ else }} + - + {{ end }} + {{ if $withdrawal.HasValidator }} - {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- if $withdrawal.IsBuilder }} + {{ formatBuilderWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- else }} + {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }} + {{- end }} {{ else }} - {{ end }} @@ -310,6 +367,26 @@

} }); }); + + // Entity type toggle + function updateEntityFields(entity) { + var isAll = (entity === 'all'); + $('.entity-field').each(function() { + $(this).prop('readonly', isAll).toggleClass('entity-field-disabled', isAll); + if (isAll) { $(this).val(''); } + }); + if (!isAll) { + $('.entity-label').each(function() { $(this).text($(this).data(entity)); }); + $('.entity-placeholder').each(function() { $(this).attr('placeholder', $(this).data(entity)); }); + } + } + $('.entity-select').on('change', function() { updateEntityFields($(this).val()); }); + // Click on readonly/disabled-looking field: switch to Validator + $('.entity-field').on('mousedown', function() { + if ($(this).prop('readonly')) { + $('.entity-select').val('validator').trigger('change'); + } + }); }); {{ end }} @@ -323,6 +400,12 @@

padding-right: 10px; } +.entity-field-disabled { + background-color: var(--bs-secondary-bg); + opacity: 0.65; + cursor: pointer; +} + .filter-multiselect-container { width: 100%; } diff --git a/types/config.go b/types/config.go index de721d0e3..48c2d3cc3 100644 --- a/types/config.go +++ b/types/config.go @@ -129,6 +129,12 @@ type Config struct { PubkeyCachePath string `yaml:"pubkeyCachePath" envconfig:"INDEXER_PUBKEY_CACHE_PATH"` BadChainRoots []string `yaml:"badChainRoots" envconfig:"INDEXER_BAD_CHAIN_ROOTS"` + + StateCache struct { + Enabled bool `yaml:"enabled" envconfig:"INDEXER_STATE_CACHE_ENABLED"` + Path string `yaml:"path" envconfig:"INDEXER_STATE_CACHE_PATH"` + MaxStates uint `yaml:"maxStates" envconfig:"INDEXER_STATE_CACHE_MAX_STATES"` + } `yaml:"stateCache"` } `yaml:"indexer"` TxSignature struct { @@ -162,9 +168,10 @@ type Config struct { Database DatabaseConfig `yaml:"database"` BlockDb struct { - Engine string `yaml:"engine" envconfig:"BLOCKDB_ENGINE"` + Engine string `yaml:"engine" envconfig:"BLOCKDB_ENGINE"` // "pebble", "s3", or "tiered" Pebble PebbleBlockDBConfig `yaml:"pebble"` S3 S3BlockDBConfig `yaml:"s3"` + Tiered TieredBlockDBConfig `yaml:"tiered"` // For tiered storage (Pebble cache + S3 backend) } `yaml:"blockDb"` KillSwitch struct { @@ -257,19 +264,45 @@ type PgsqlWriterDatabaseConfig struct { MaxIdleConns int `yaml:"maxIdleConns" envconfig:"DATABASE_PGSQL_WRITER_MAX_IDLE_CONNS"` } +// BlockDbRetentionConfig configures per-object-type retention behavior. +type BlockDbRetentionConfig struct { + Enabled bool `yaml:"enabled"` + RetentionTime time.Duration `yaml:"retentionTime"` // For age-based cleanup + MaxSize int64 `yaml:"maxSize"` // Size limit in MB (0 = unlimited) + CleanupMode string `yaml:"cleanupMode"` // "age" or "lru" +} + +// PebbleBlockDBConfig configures the Pebble (local) storage engine. type PebbleBlockDBConfig struct { - Path string `yaml:"path" envconfig:"BLOCKDB_ROCKSDB_PATH"` - CacheSize int `yaml:"cacheSize" envconfig:"BLOCKDB_ROCKSDB_CACHE_SIZE"` + Path string `yaml:"path" envconfig:"BLOCKDB_PEBBLE_PATH"` + CacheSize int `yaml:"cacheSize" envconfig:"BLOCKDB_PEBBLE_CACHE_SIZE"` // Pebble internal cache in MB + + // Per-object-type retention configuration (used in tiered mode) + HeaderRetention BlockDbRetentionConfig `yaml:"headerRetention"` + BodyRetention BlockDbRetentionConfig `yaml:"bodyRetention"` + PayloadRetention BlockDbRetentionConfig `yaml:"payloadRetention"` + BalRetention BlockDbRetentionConfig `yaml:"balRetention"` + + // Cleanup configuration + CleanupInterval time.Duration `yaml:"cleanupInterval" envconfig:"BLOCKDB_PEBBLE_CLEANUP_INTERVAL"` } +// S3BlockDBConfig configures the S3 (remote) storage engine. type S3BlockDBConfig struct { - Endpoint string `yaml:"endpoint" envconfig:"BLOCKDB_S3_ENDPOINT"` - Secure YamlBool `yaml:"secure" envconfig:"BLOCKDB_S3_SECURE"` - Bucket string `yaml:"bucket" envconfig:"BLOCKDB_S3_BUCKET"` - Region string `yaml:"region" envconfig:"BLOCKDB_S3_REGION"` - AccessKey string `yaml:"accessKey" envconfig:"BLOCKDB_S3_ACCESS_KEY"` - SecretKey string `yaml:"secretKey" envconfig:"BLOCKDB_S3_SECRET_KEY"` - Path string `yaml:"path" envconfig:"BLOCKDB_S3_PATH"` + Endpoint string `yaml:"endpoint" envconfig:"BLOCKDB_S3_ENDPOINT"` + Secure YamlBool `yaml:"secure" envconfig:"BLOCKDB_S3_SECURE"` + Bucket string `yaml:"bucket" envconfig:"BLOCKDB_S3_BUCKET"` + Region string `yaml:"region" envconfig:"BLOCKDB_S3_REGION"` + AccessKey string `yaml:"accessKey" envconfig:"BLOCKDB_S3_ACCESS_KEY"` + SecretKey string `yaml:"secretKey" envconfig:"BLOCKDB_S3_SECRET_KEY"` + Path string `yaml:"path" envconfig:"BLOCKDB_S3_PATH"` + EnableRangeRequests bool `yaml:"enableRangeRequests" envconfig:"BLOCKDB_S3_ENABLE_RANGE_REQUESTS"` // Use HTTP Range requests for selective loading +} + +// TieredBlockDBConfig configures tiered storage (Pebble cache + S3 backend). +type TieredBlockDBConfig struct { + Pebble PebbleBlockDBConfig `yaml:"pebble"` + S3 S3BlockDBConfig `yaml:"s3"` } // YamlBool is a bool type that can be unmarshalled from both diff --git a/types/models/address.go b/types/models/address.go index 55e34f61b..37130ca84 100644 --- a/types/models/address.go +++ b/types/models/address.go @@ -187,6 +187,7 @@ type AddressPageDataWithdrawal struct { Amount uint64 `json:"amount"` // Gwei ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } // AddressPageDataBlockFee represents a block fee reward on the address page. diff --git a/types/models/blocks.go b/types/models/blocks.go index 2937141d6..d828cc5f6 100644 --- a/types/models/blocks.go +++ b/types/models/blocks.go @@ -31,6 +31,7 @@ type BlocksPageData struct { DisplayBlockSize bool `json:"dp_blocksize"` DisplayRecvDelay bool `json:"dp_recvdelay"` DisplayExecTime bool `json:"dp_exectime"` + DisplayBuilder bool `json:"dp_builder"` DisplayColCount uint64 `json:"display_col_count"` IsDefaultPage bool `json:"default_page"` @@ -88,6 +89,9 @@ type BlocksPageDataSlot struct { ForkGraph []*BlocksPageDataForkGraph `json:"fork_graph"` IsMevBlock bool `json:"is_mev_block"` MevBlockRelays string `json:"mev_block_relays"` + HasBuilder bool `json:"has_builder"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` } type BlocksPageDataForkGraph struct { diff --git a/types/models/builders.go b/types/models/builders.go new file mode 100644 index 000000000..89054e895 --- /dev/null +++ b/types/models/builders.go @@ -0,0 +1,169 @@ +package models + +import ( + "time" +) + +// BuildersPageData is a struct to hold info for the builders page +type BuildersPageData struct { + FilterPubKey string `json:"filter_pubkey"` + FilterIndex string `json:"filter_index"` + FilterExecutionAddr string `json:"filter_execution_addr"` + FilterStatus string `json:"filter_status"` + FilterStatusOpts []BuildersPageDataStatusOption `json:"filter_status_opts"` + + Builders []*BuildersPageDataBuilder `json:"builders"` + BuilderCount uint64 `json:"builder_count"` + FirstBuilder uint64 `json:"first_builder"` + LastBuilder uint64 `json:"last_builder"` + Sorting string `json:"sorting"` + IsDefaultSorting bool `json:"default_sorting"` + IsDefaultPage bool `json:"default_page"` + TotalPages uint64 `json:"total_pages"` + PageSize uint64 `json:"page_size"` + CurrentPageIndex uint64 `json:"page_index"` + PrevPageIndex uint64 `json:"prev_page_index"` + NextPageIndex uint64 `json:"next_page_index"` + LastPageIndex uint64 `json:"last_page_index"` + FilteredPageLink string `json:"filtered_page_link"` + + UrlParams map[string]string `json:"url_params"` +} + +type BuildersPageDataStatusOption struct { + Status string `json:"status"` + Count uint64 `json:"count"` +} + +type BuildersPageDataBuilder struct { + Index uint64 `json:"index"` + PublicKey []byte `json:"pubkey"` + ExecutionAddress []byte `json:"execution_address"` + Balance uint64 `json:"balance"` + State string `json:"state"` + ShowDeposit bool `json:"show_deposit"` + DepositTs time.Time `json:"deposit_ts"` + DepositEpoch uint64 `json:"deposit_epoch"` + ShowWithdrawable bool `json:"show_withdrawable"` + WithdrawableTs time.Time `json:"withdrawable_ts"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` +} + +// BuilderPageData holds data for the builder details page +type BuilderPageData struct { + CurrentEpoch uint64 `json:"current_epoch"` + Index uint64 `json:"index"` + Name string `json:"name"` + PublicKey []byte `json:"pubkey"` + Balance uint64 `json:"balance"` + ExecutionAddress []byte `json:"execution_address"` + Version uint8 `json:"version"` + State string `json:"state"` // "Pending", "Active", "Exited", "Superseded" + + // Deposit lifecycle + ShowDeposit bool `json:"show_deposit"` + DepositEpoch uint64 `json:"deposit_epoch"` + DepositTs time.Time `json:"deposit_ts"` + + // Withdrawable lifecycle + ShowWithdrawable bool `json:"show_withdrawable"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + WithdrawableTs time.Time `json:"withdrawable_ts"` + + IsSuperseded bool `json:"is_superseded"` + + // Exit reason (shown in header when builder has exited) + ExitReason string `json:"exit_reason"` + ExitReasonSlot uint64 `json:"exit_reason_slot"` + ExitReasonVoluntaryExit bool `json:"exit_reason_voluntary_exit"` + ExitReasonWithdrawal bool `json:"exit_reason_withdrawal"` + ExitReasonTxHash []byte `json:"exit_reason_tx_hash"` + ExitReasonTxDetails *BuilderPageDataExitTxDetails `json:"exit_reason_tx_details"` + + // Tab control + TabView string `json:"tab_view"` + GloasIsActive bool `json:"gloas_is_active"` + + // Tab data (loaded conditionally) + RecentBlocks []*BuilderPageDataBlock `json:"recent_blocks"` + RecentBids []*BuilderPageDataBid `json:"recent_bids"` + RecentDeposits []*BuilderPageDataDeposit `json:"recent_deposits"` + Withdrawals []*BuilderPageDataWithdrawal `json:"withdrawals"` + WithdrawalCount uint64 `json:"withdrawal_count"` + AdditionalWithdrawalCount uint64 `json:"additional_withdrawal_count"` + HasMoreBlocks bool `json:"has_more_blocks"` +} + +// BuilderPageDataBlock represents a block/payload built by this builder +type BuilderPageDataBlock struct { + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + Ts time.Time `json:"ts"` + BlockRoot []byte `json:"block_root"` + BlockHash []byte `json:"block_hash"` + Status uint16 `json:"status"` // PayloadStatus + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` +} + +// BuilderPageDataBid represents a bid submitted by this builder +type BuilderPageDataBid struct { + Slot uint64 `json:"slot"` + Ts time.Time `json:"ts"` + ParentRoot []byte `json:"parent_root"` + ParentHash []byte `json:"parent_hash"` + BlockHash []byte `json:"block_hash"` + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` + IsWinning bool `json:"is_winning"` +} + +// BuilderPageDataDeposit represents a builder deposit transaction +type BuilderPageDataDeposit struct { + Type string `json:"type"` // "deposit" + SlotNumber uint64 `json:"slot"` + SlotRoot []byte `json:"slot_root"` + Time time.Time `json:"time"` + Orphaned bool `json:"orphaned"` + Amount uint64 `json:"amount"` + DepositorAddress []byte `json:"depositor_address" ssz-size:"20"` + HasTransaction bool `json:"has_transaction"` + TransactionHash []byte `json:"tx_hash" ssz-size:"32"` + TransactionDetails *BuilderPageDataDepositTxDetails `json:"tx_details" ssz-type:"optional"` +} + +// BuilderPageDataDepositTxDetails contains transaction details for deposit transactions +type BuilderPageDataDepositTxDetails struct { + BlockNumber uint64 `json:"block"` + BlockHash string `json:"block_hash"` + BlockTime uint64 `json:"block_time"` + TxOrigin string `json:"tx_origin"` + TxTarget string `json:"tx_target"` + TxHash string `json:"tx_hash"` +} + +// BuilderPageDataWithdrawal represents a withdrawal on the builder detail page +type BuilderPageDataWithdrawal struct { + SlotNumber uint64 `json:"slot"` + BlockRoot []byte `json:"block_root" ssz-size:"32"` + Time time.Time `json:"time"` + Orphaned bool `json:"orphaned"` + Type uint8 `json:"type"` + Amount uint64 `json:"amount"` + RefSlot uint64 `json:"ref_slot"` + RefSlotRoot []byte `json:"ref_slot_root" ssz-size:"32"` +} + +// BuilderPageDataExitTxDetails contains transaction details for EL-triggered exits +type BuilderPageDataExitTxDetails struct { + BlockNumber uint64 `json:"block"` + BlockHash string `json:"block_hash"` + BlockTime uint64 `json:"block_time"` + TxOrigin string `json:"tx_origin"` + TxTarget string `json:"tx_target"` + TxHash string `json:"tx_hash"` +} diff --git a/types/models/deposits.go b/types/models/deposits.go index 197b3e855..2dbfed184 100644 --- a/types/models/deposits.go +++ b/types/models/deposits.go @@ -43,6 +43,7 @@ type DepositsPageDataInitiatedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataIncludedDeposit struct { @@ -69,6 +70,7 @@ type DepositsPageDataIncludedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataIncludedDepositTxDetails struct { @@ -98,6 +100,7 @@ type DepositsPageDataQueuedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataQueuedDepositTxDetails struct { diff --git a/types/models/el_withdrawals.go b/types/models/el_withdrawals.go index ffedc2047..95b6473a0 100644 --- a/types/models/el_withdrawals.go +++ b/types/models/el_withdrawals.go @@ -6,6 +6,7 @@ import ( // ElWithdrawalsPageData is a struct to hold info for the el_withdrawals page type ElWithdrawalsPageData struct { + FilterEntity string `json:"filter_entity"` // "all", "validator", or "builder" FilterMinSlot uint64 `json:"filter_mins"` FilterMaxSlot uint64 `json:"filter_maxs"` FilterAddress string `json:"filter_address"` @@ -51,6 +52,7 @@ type ElWithdrawalsPageDataWithdrawal struct { ValidatorValid bool `json:"vvalid"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey"` LinkedTransaction bool `json:"linked_tx"` TransactionHash []byte `json:"tx_hash"` diff --git a/types/models/epoch.go b/types/models/epoch.go index 27dd73a28..39a549c2c 100644 --- a/types/models/epoch.go +++ b/types/models/epoch.go @@ -45,6 +45,7 @@ type EpochPageDataSlot struct { Ts time.Time `json:"ts"` Scheduled bool `json:"scheduled"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` AttestationCount uint64 `json:"attestation_count"` diff --git a/types/models/exits.go b/types/models/exits.go index 2d082c2ae..34ee0b1ca 100644 --- a/types/models/exits.go +++ b/types/models/exits.go @@ -32,6 +32,7 @@ type ExitsPageDataRecentExit struct { Orphaned bool `json:"orphaned"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey" ssz-size:"48"` WithdrawalCreds []byte `json:"wdcreds" ssz-size:"32"` ValidatorStatus string `json:"vstatus"` @@ -68,6 +69,7 @@ type ExitsPageDataRecentExitRequest struct { ValidatorValid bool `json:"vvalid"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey" ssz-size:"48"` LinkedTransaction bool `json:"linked_tx"` TransactionHash []byte `json:"tx_hash" ssz-size:"32"` diff --git a/types/models/included_deposits.go b/types/models/included_deposits.go index ecdb4248e..b36b76815 100644 --- a/types/models/included_deposits.go +++ b/types/models/included_deposits.go @@ -62,6 +62,7 @@ type IncludedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type IncludedDepositsPageDataDepositTxDetails struct { diff --git a/types/models/indexPage.go b/types/models/indexPage.go index c04b80d21..7e61bd1cf 100644 --- a/types/models/indexPage.go +++ b/types/models/indexPage.go @@ -68,29 +68,31 @@ type IndexPageDataEpochs struct { } type IndexPageDataBlocks struct { - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` - WithEthBlock bool `json:"has_block"` - EthBlock uint64 `json:"eth_block"` - EthBlockLink string `json:"eth_link"` - Ts time.Time `json:"ts"` - Proposer uint64 `json:"proposer"` - ProposerName string `json:"proposer_name"` - Status uint64 `json:"status"` - BlockRoot []byte `json:"block_root" ssz-size:"32"` + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + WithEthBlock bool `json:"has_block"` + EthBlock uint64 `json:"eth_block"` + EthBlockLink string `json:"eth_link"` + Ts time.Time `json:"ts"` + Proposer uint64 `json:"proposer"` + ProposerName string `json:"proposer_name"` + Status uint64 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` + BlockRoot []byte `json:"block_root" ssz-size:"32"` } type IndexPageDataSlots struct { - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` - EthBlock uint64 `json:"eth_block"` - Ts time.Time `json:"ts"` - Proposer uint64 `json:"proposer"` - ProposerName string `json:"proposer_name"` - Status uint64 `json:"status"` - BlockRoot []byte `json:"block_root" ssz-size:"32"` - ParentRoot []byte `json:"parent_root" ssz-size:"32"` - ForkGraph []*IndexPageDataForkGraph `json:"fork_graph"` + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + EthBlock uint64 `json:"eth_block"` + Ts time.Time `json:"ts"` + Proposer uint64 `json:"proposer"` + ProposerName string `json:"proposer_name"` + Status uint64 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` + BlockRoot []byte `json:"block_root" ssz-size:"32"` + ParentRoot []byte `json:"parent_root" ssz-size:"32"` + ForkGraph []*IndexPageDataForkGraph `json:"fork_graph"` } type IndexPageDataForkGraph struct { diff --git a/types/models/initiated_deposits.go b/types/models/initiated_deposits.go index c2c412618..da467cf7a 100644 --- a/types/models/initiated_deposits.go +++ b/types/models/initiated_deposits.go @@ -53,4 +53,5 @@ type InitiatedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } diff --git a/types/models/queued_deposits.go b/types/models/queued_deposits.go index f27319df6..c77c79fb4 100644 --- a/types/models/queued_deposits.go +++ b/types/models/queued_deposits.go @@ -54,6 +54,7 @@ type QueuedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type QueuedDepositsPageDataDepositTxDetails struct { diff --git a/types/models/search.go b/types/models/search.go index 468214870..f93fa0529 100644 --- a/types/models/search.go +++ b/types/models/search.go @@ -1,6 +1,6 @@ package models -import "github.com/attestantio/go-eth2-client/spec/phase0" +import "github.com/ethpandaops/go-eth2-client/spec/phase0" // SearchBlockResult is a struct to hold the search block result with a given graffiti type SearchBlockResult struct { diff --git a/types/models/slot.go b/types/models/slot.go index b85509bd8..da2fd7328 100644 --- a/types/models/slot.go +++ b/types/models/slot.go @@ -76,12 +76,16 @@ type SlotPageBlockData struct { DepositRequestsCount uint64 `json:"deposit_receipts_count"` WithdrawalRequestsCount uint64 `json:"withdrawal_requests_count"` ConsolidationRequestsCount uint64 `json:"consolidation_requests_count"` + BidsCount uint64 `json:"bids_count"` + PtcVotesCount uint64 `json:"ptc_votes_count"` SlotsPerEpoch uint64 `json:"slots_per_epoch"` TargetCommitteeSize uint64 `json:"target_committee_size"` MaxCommitteesPerSlot uint64 `json:"max_committees_per_slot"` - ExecutionData *SlotPageExecutionData `json:"execution_data"` + PayloadHeader *SlotPagePayloadHeader `json:"payload_header"` + ExecutionData *SlotPageExecutionData `json:"execution_data"` + Attestations []*SlotPageAttestation `json:"attestations"` // Attestations included in this block Deposits []*SlotPageDeposit `json:"deposits"` // Deposits included in this block VoluntaryExits []*SlotPageVoluntaryExit `json:"voluntary_exits"` // Voluntary Exits included in this block @@ -94,6 +98,8 @@ type SlotPageBlockData struct { DepositRequests []*SlotPageDepositRequest `json:"deposit_receipts"` // DepositRequests included in this block WithdrawalRequests []*SlotPageWithdrawalRequest `json:"withdrawal_requests"` // WithdrawalRequests included in this block ConsolidationRequests []*SlotPageConsolidationRequest `json:"consolidation_requests"` // ConsolidationRequests included in this block + Bids []*SlotPageBid `json:"bids"` // Execution payload bids for this block (ePBS) + PtcVotes *SlotPagePtcVotes `json:"ptc_votes"` // PTC votes included in this block (for previous slot) } type SlotPageExecutionData struct { @@ -126,6 +132,20 @@ type SlotPageValidatorName struct { Value string `json:"v"` } +type SlotPagePayloadHeader struct { + PayloadStatus uint16 `json:"payload_status"` + ParentBlockHash []byte `json:"parent_block_hash"` + ParentBlockRoot []byte `json:"parent_block_root"` + BlockHash []byte `json:"block_hash"` + GasLimit uint64 `json:"gas_limit"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` + Slot uint64 `json:"slot"` + Value uint64 `json:"value"` + BlobKZGCommitments [][]byte `json:"blob_kzg_commitments"` + Signature []byte `json:"signature"` +} + type SlotPageAttestation struct { Slot uint64 `json:"slot"` CommitteeIndex []uint64 `json:"committeeindex"` @@ -135,6 +155,8 @@ type SlotPageAttestation struct { Validators []uint64 `json:"validators"` IncludedValidators []uint64 `json:"included_validators"` + PayloadStatus *uint64 `json:"payload_status,omitempty"` + Signature []byte `json:"signature" ssz-size:"96"` BeaconBlockRoot []byte `json:"beaconblockroot" ssz-size:"32"` @@ -155,6 +177,7 @@ type SlotPageDeposit struct { type SlotPageVoluntaryExit struct { ValidatorIndex uint64 `json:"validatorindex"` ValidatorName string `json:"validatorname"` + IsBuilder bool `json:"is_builder"` Epoch uint64 `json:"epoch"` Signature []byte `json:"signature" ssz-size:"96"` } @@ -210,8 +233,12 @@ type SlotPageWithdrawal struct { Index uint64 `json:"index"` ValidatorIndex uint64 `json:"validatorindex"` ValidatorName string `json:"validatorname"` + IsBuilder bool `json:"is_builder"` Address []byte `json:"address" ssz-size:"20"` Amount uint64 `json:"amount"` + Type uint8 `json:"type"` + RefSlot uint64 `json:"ref_slot"` + RefSlotRoot []byte `json:"ref_slot_root" ssz-size:"32"` } type SlotPageBlob struct { @@ -258,6 +285,7 @@ type SlotPageTransaction struct { type SlotPageDepositRequest struct { PublicKey []byte `db:"pubkey" ssz-size:"48"` Exists bool `db:"exists"` + IsBuilder bool `db:"is_builder"` ValidatorIndex uint64 `db:"valindex"` ValidatorName string `db:"valname"` WithdrawalCreds []byte `db:"withdrawal_creds" ssz-size:"32"` @@ -272,18 +300,61 @@ type SlotPageWithdrawalRequest struct { Exists bool `db:"exists"` ValidatorIndex uint64 `db:"valindex"` ValidatorName string `db:"valname"` + IsBuilder bool `db:"is_builder"` Amount uint64 `db:"amount"` } type SlotPageConsolidationRequest struct { - Address []byte `db:"address" ssz-size:"20"` - SourcePubkey []byte `db:"source_pubkey" ssz-size:"48"` - SourceFound bool `db:"source_bool"` - SourceIndex uint64 `db:"source_index"` - SourceName string `db:"source_name"` - TargetPubkey []byte `db:"target_pubkey" ssz-size:"48"` - TargetFound bool `db:"target_bool"` - TargetIndex uint64 `db:"target_index"` - TargetName string `db:"target_name"` - Epoch uint64 `db:"epoch"` + Address []byte `db:"address" ssz-size:"20"` + SourcePubkey []byte `db:"source_pubkey" ssz-size:"48"` + SourceFound bool `db:"source_bool"` + SourceIndex uint64 `db:"source_index"` + SourceName string `db:"source_name"` + SourceIsBuilder bool `db:"source_is_builder"` + TargetPubkey []byte `db:"target_pubkey" ssz-size:"48"` + TargetFound bool `db:"target_bool"` + TargetIndex uint64 `db:"target_index"` + TargetName string `db:"target_name"` + TargetIsBuilder bool `db:"target_is_builder"` + Epoch uint64 `db:"epoch"` +} + +type SlotPageBid struct { + ParentRoot []byte `json:"parent_root"` + ParentHash []byte `json:"parent_hash"` + BlockHash []byte `json:"block_hash"` + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` + IsSelfBuilt bool `json:"is_self_built"` + Slot uint64 `json:"slot"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` + TotalValue uint64 `json:"total_value"` + IsWinning bool `json:"is_winning"` +} + +// SlotPagePtcVotes holds PTC (Payload Timeliness Committee) vote information for a slot. +// These are payload attestations included in this block for the PREVIOUS slot. +type SlotPagePtcVotes struct { + VotedSlot uint64 `json:"voted_slot"` // The slot the votes are for (previous slot) + VotedBlockRoot []byte `json:"voted_block_root"` // The block root being voted on + TotalPtcSize uint64 `json:"total_ptc_size"` // Total PTC committee size + Aggregates []*SlotPagePtcAggregate `json:"aggregates"` // Up to 4 aggregates for different vote flag combinations + NonVoters []types.NamedValidator `json:"non_voters"` // Validators that did not vote + NonVoterCount uint64 `json:"non_voter_count"` // Number of non-voters + NonVoterPercent float64 `json:"non_voter_percent"` // Percentage of non-voters + Participation float64 `json:"participation"` // Overall participation rate +} + +// SlotPagePtcAggregate represents a single PTC vote aggregate for a specific vote flag combination. +type SlotPagePtcAggregate struct { + PayloadPresent bool `json:"payload_present"` // Whether the payload was present + BlobDataAvailable bool `json:"blob_data_available"` // Whether blob data was available + AggregationBits []byte `json:"aggregation_bits"` // Bitfield of participating validators + Validators []types.NamedValidator `json:"validators"` // Validators that voted + Signature []byte `json:"signature"` // Aggregate signature + VoteCount uint64 `json:"vote_count"` // Number of votes in this aggregate + VotePercent float64 `json:"vote_percent"` // Percentage of committee } diff --git a/types/models/slots.go b/types/models/slots.go index 56bbbd469..bd7cb04dd 100644 --- a/types/models/slots.go +++ b/types/models/slots.go @@ -31,6 +31,7 @@ type SlotsPageData struct { DisplayBlockSize bool `json:"dp_blocksize"` DisplayRecvDelay bool `json:"dp_recvdelay"` DisplayExecTime bool `json:"dp_exectime"` + DisplayBuilder bool `json:"dp_builder"` DisplayColCount uint64 `json:"display_col_count"` IsDefaultPage bool `json:"default_page"` @@ -60,6 +61,7 @@ type SlotsPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Synchronized bool `json:"synchronized"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` @@ -88,6 +90,9 @@ type SlotsPageDataSlot struct { ForkGraph []*SlotsPageDataForkGraph `json:"fork_graph"` IsMevBlock bool `json:"is_mev_block"` MevBlockRelays string `json:"mev_block_relays"` + HasBuilder bool `json:"has_builder"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` } type SlotsPageDataForkGraph struct { diff --git a/types/models/slots_filtered.go b/types/models/slots_filtered.go index 9dde77429..12263a745 100644 --- a/types/models/slots_filtered.go +++ b/types/models/slots_filtered.go @@ -6,26 +6,31 @@ import ( // SlotsPageData is a struct to hold info for the slots page type SlotsFilteredPageData struct { - FilterGraffiti string `json:"filter_graffiti"` - FilterInvertGraffiti bool `json:"filter_invert_graffiti"` - FilterExtraData string `json:"filter_extra_data"` - FilterInvertExtraData bool `json:"filter_invert_extra_data"` - FilterProposer string `json:"filter_proposer"` - FilterProposerName string `json:"filter_pname"` - FilterInvertProposer bool `json:"filter_invert_proposer"` - FilterWithOrphaned uint8 `json:"filter_orphaned"` - FilterWithMissing uint8 `json:"filter_missing"` - FilterMinSyncAgg string `json:"filter_min_sync"` - FilterMaxSyncAgg string `json:"filter_max_sync"` - FilterMinExecTime string `json:"filter_min_exec"` - FilterMaxExecTime string `json:"filter_max_exec"` - FilterMinTxCount string `json:"filter_min_tx"` - FilterMaxTxCount string `json:"filter_max_tx"` - FilterMinBlobCount string `json:"filter_min_blob"` - FilterMaxBlobCount string `json:"filter_max_blob"` - FilterForkIds string `json:"filter_fork_ids"` - FilterMinEpoch string `json:"filter_min_epoch"` - FilterMaxEpoch string `json:"filter_max_epoch"` + FilterGraffiti string `json:"filter_graffiti"` + FilterInvertGraffiti bool `json:"filter_invert_graffiti"` + FilterExtraData string `json:"filter_extra_data"` + FilterInvertExtraData bool `json:"filter_invert_extra_data"` + FilterProposer string `json:"filter_proposer"` + FilterProposerName string `json:"filter_pname"` + FilterInvertProposer bool `json:"filter_invert_proposer"` + FilterStatusMissing bool `json:"filter_status_missing"` + FilterStatusCanonical bool `json:"filter_status_canonical"` + FilterStatusOrphaned bool `json:"filter_status_orphaned"` + FilterPayloadMissing bool `json:"filter_payload_missing"` + FilterPayloadCanonical bool `json:"filter_payload_canonical"` + FilterPayloadOrphaned bool `json:"filter_payload_orphaned"` + FilterMinSyncAgg string `json:"filter_min_sync"` + FilterMaxSyncAgg string `json:"filter_max_sync"` + FilterMinExecTime string `json:"filter_min_exec"` + FilterMaxExecTime string `json:"filter_max_exec"` + FilterMinTxCount string `json:"filter_min_tx"` + FilterMaxTxCount string `json:"filter_max_tx"` + FilterMinBlobCount string `json:"filter_min_blob"` + FilterMaxBlobCount string `json:"filter_max_blob"` + FilterForkIds string `json:"filter_fork_ids"` + FilterMinEpoch string `json:"filter_min_epoch"` + FilterMaxEpoch string `json:"filter_max_epoch"` + FilterBuilder string `json:"filter_builder"` DisplayEpoch bool `json:"dp_epoch"` DisplaySlot bool `json:"dp_slot"` @@ -45,6 +50,7 @@ type SlotsFilteredPageData struct { DisplayBlockSize bool `json:"dp_blocksize"` DisplayRecvDelay bool `json:"dp_recvdelay"` DisplayExecTime bool `json:"dp_exectime"` + DisplayBuilder bool `json:"dp_builder"` DisplayColCount uint64 `json:"display_col_count"` HasSnooperClients bool `json:"has_snooper_clients"` @@ -80,6 +86,7 @@ type SlotsFilteredPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Synchronized bool `json:"synchronized"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` @@ -107,4 +114,7 @@ type SlotsFilteredPageDataSlot struct { ExecutionTimes []ExecutionTimeDetail `json:"execution_times"` IsMevBlock bool `json:"is_mev_block"` MevBlockRelays string `json:"mev_block_relays"` + HasBuilder bool `json:"has_builder"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` } diff --git a/types/models/validator.go b/types/models/validator.go index a4e0c0e2a..13f018a0d 100644 --- a/types/models/validator.go +++ b/types/models/validator.go @@ -197,4 +197,6 @@ type ValidatorPageDataBeaconWithdrawal struct { Type uint8 `json:"type"` Address []byte `json:"address" ssz-size:"20"` Amount uint64 `json:"amount"` + RefSlot uint64 `json:"ref_slot"` + RefSlotRoot []byte `json:"ref_slot_root" ssz-size:"32"` } diff --git a/types/models/validator_slots.go b/types/models/validator_slots.go index c6cb6183b..e1b805b13 100644 --- a/types/models/validator_slots.go +++ b/types/models/validator_slots.go @@ -34,6 +34,7 @@ type ValidatorSlotsPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` AttestationCount uint64 `json:"attestation_count"` diff --git a/types/models/voluntary_exits.go b/types/models/voluntary_exits.go index 2d900a182..685dbf825 100644 --- a/types/models/voluntary_exits.go +++ b/types/models/voluntary_exits.go @@ -6,6 +6,7 @@ import ( // VoluntaryExitsPageData is a struct to hold info for the voluntary_exits page type VoluntaryExitsPageData struct { + FilterEntity string `json:"filter_entity"` // "all", "validator", or "builder" FilterMinSlot uint64 `json:"filter_mins"` FilterMaxSlot uint64 `json:"filter_maxs"` FilterMinIndex uint64 `json:"filter_mini"` @@ -41,6 +42,7 @@ type VoluntaryExitsPageDataExit struct { Orphaned bool `json:"orphaned"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey" ssz-size:"48"` WithdrawalCreds []byte `json:"wdcreds" ssz-size:"32"` ValidatorStatus string `json:"vstatus"` diff --git a/types/models/withdrawals.go b/types/models/withdrawals.go index 4c15cad35..8be173455 100644 --- a/types/models/withdrawals.go +++ b/types/models/withdrawals.go @@ -39,6 +39,7 @@ type WithdrawalsPageDataRecentWithdrawal struct { ValidatorValid bool `json:"vvalid"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey" ssz-size:"48"` LinkedTransaction bool `json:"linked_tx"` TransactionHash []byte `json:"tx_hash" ssz-size:"32"` @@ -66,8 +67,11 @@ type WithdrawalsPageDataBeaconWithdrawal struct { Orphaned bool `json:"orphaned"` Type uint8 `json:"type"` HasValidator bool `json:"has_validator"` + IsBuilder bool `json:"is_builder"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` Address []byte `json:"address" ssz-size:"20"` Amount uint64 `json:"amount"` // Gwei + RefSlot uint64 `json:"ref_slot"` + RefSlotRoot []byte `json:"ref_slot_root" ssz-size:"32"` } diff --git a/types/models/withdrawals_list.go b/types/models/withdrawals_list.go index 5374942fa..c774b9246 100644 --- a/types/models/withdrawals_list.go +++ b/types/models/withdrawals_list.go @@ -6,12 +6,15 @@ import ( // WithdrawalsListPageData is a struct to hold info for the withdrawals list page. type WithdrawalsListPageData struct { - FilterValidator string `json:"filter_validator"` - FilterAddress string `json:"filter_address"` - FilterWithType string `json:"filter_type"` - FilterMinAmount string `json:"filter_min_amount"` - FilterMaxAmount string `json:"filter_max_amount"` - FilterWithOrphaned uint8 `json:"filter_orphaned"` + FilterEntity string `json:"filter_entity"` // "all", "validator", or "builder" + FilterMinIndex uint64 `json:"filter_mini"` + FilterMaxIndex uint64 `json:"filter_maxi"` + FilterValidatorName string `json:"filter_vname"` + FilterAddress string `json:"filter_address"` + FilterWithType string `json:"filter_type"` + FilterMinAmount string `json:"filter_min_amount"` + FilterMaxAmount string `json:"filter_max_amount"` + FilterWithOrphaned uint8 `json:"filter_orphaned"` Withdrawals []*WithdrawalsListPageDataWithdrawal `json:"withdrawals"` WithdrawalCount uint64 `json:"withdrawal_count"` @@ -43,8 +46,11 @@ type WithdrawalsListPageDataWithdrawal struct { Orphaned bool `json:"orphaned"` Type uint8 `json:"type"` HasValidator bool `json:"has_validator"` + IsBuilder bool `json:"is_builder"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` Address []byte `json:"address" ssz-size:"20"` Amount uint64 `json:"amount"` // Gwei + RefSlot uint64 `json:"ref_slot"` + RefSlotRoot []byte `json:"ref_slot_root" ssz-size:"32"` } diff --git a/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts b/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts index 9e292326f..f440f8f08 100644 --- a/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts +++ b/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts @@ -30,7 +30,7 @@ const SigningData = new ContainerType({ domain: new ByteVectorType(32), }); -export type CredentialType = '00' | '01' | '02'; +export type CredentialType = '00' | '01' | '02' | '03'; export interface WithdrawalCredentialConfig { type: CredentialType; @@ -66,10 +66,10 @@ export function validateMnemonicWords(mnemonic: string): boolean { /** * Build withdrawal credentials from type and ETH address - * @param credType - '01' for execution, '02' for compounding + * @param credType - '01' for execution, '02' for compounding, '03' for builder * @param address - 20-byte ETH address (0x prefixed) */ -export function buildWithdrawalCredentialsFromAddress(credType: '01' | '02', address: string): string { +export function buildWithdrawalCredentialsFromAddress(credType: '01' | '02' | '03', address: string): string { const cleanAddress = address.startsWith('0x') ? address.slice(2) : address; if (cleanAddress.length !== 40) { throw new Error("Invalid address length"); @@ -113,9 +113,9 @@ export async function buildWithdrawalCredentials( return buildBLSWithdrawalCredentials(withdrawalPubkey); } else { if (!config.address) { - throw new Error("Address required for 0x01/0x02 credentials"); + throw new Error("Address required for 0x01/0x02/0x03 credentials"); } - return buildWithdrawalCredentialsFromAddress(config.type, config.address); + return buildWithdrawalCredentialsFromAddress(config.type as '01' | '02' | '03', config.address); } } diff --git a/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx b/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx index c71180f85..ad2b073e8 100644 --- a/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx +++ b/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx @@ -30,8 +30,8 @@ interface IValidatorOverrideState { useCustomAmount: boolean; // Credential override fields credentialInputMode: CredentialInputMode; - credentialType: CredentialType; // '00', '01', '02' - withdrawalAddress: string; // For 0x01/0x02 + credentialType: CredentialType; // '00', '01', '02', '03' + withdrawalAddress: string; // For 0x01/0x02/0x03 rawCredentials: string; // For raw mode useCustomCredentials: boolean; } @@ -403,6 +403,7 @@ const DepositGeneratorModal: React.FC = (props) => +

{credentialType !== '00' && ( @@ -534,8 +535,9 @@ const DepositGeneratorModal: React.FC = (props) => + - {/* Address input (only for 0x01/0x02) */} + {/* Address input (only for 0x01/0x02/0x03) */} {override.credentialType !== '00' && ( %v", index)) } +func FormatBuilder(index uint64, name string) template.HTML { + return formatBuilder(index, name, "fa-hard-hat mr-2", false) +} + +func FormatBuilderWithIndex(index uint64, name string) template.HTML { + return formatBuilder(index, name, "fa-hard-hat mr-2", true) +} + +func formatBuilder(index uint64, name string, icon string, withIndex bool) template.HTML { + if index == math.MaxUint64 { + return template.HTML(fmt.Sprintf(" Self-built", icon)) + } else if name != "" { + var nameLabel string + if withIndex { + nameLabel = fmt.Sprintf("%v (%v)", html.EscapeString(name), index) + } else { + nameLabel = html.EscapeString(name) + } + return template.HTML(fmt.Sprintf(" %v", index, icon, index, nameLabel)) + } + return template.HTML(fmt.Sprintf(" %v", icon, index, index)) +} + func FormatRecentTimeShort(ts time.Time) template.HTML { duration := time.Until(ts) var timeStr string @@ -871,6 +894,8 @@ func formatWithdrawalHash(hash []byte) template.HTML { colorClass = "text-success" } else if hash[0] == 0x02 { colorClass = "text-info" + } else if hash[0] == 0x03 { + colorClass = "text-primary" } else { colorClass = "text-warning" } @@ -883,8 +908,8 @@ func FormatWithdawalCredentials(hash []byte) template.HTML { return "INVALID CREDENTIALS" } - // For 0x01 or 0x02 credentials, link to the address - if hash[0] == 0x01 || hash[0] == 0x02 { + // For 0x01, 0x02 or 0x03 credentials, link to the address + if hash[0] == 0x01 || hash[0] == 0x02 || hash[0] == 0x03 { addr := fmt.Sprintf("0x%x", hash[12:]) // Use local link when execution indexer is enabled diff --git a/utils/templateFucs.go b/utils/templateFucs.go index 1ec5609dc..4258230d3 100644 --- a/utils/templateFucs.go +++ b/utils/templateFucs.go @@ -154,6 +154,8 @@ func GetTemplateFuncs() template.FuncMap { "formatValidatorWithIndex": FormatValidatorWithIndex, "formatValidatorNameWithIndex": FormatValidatorNameWithIndex, "formatSlashedValidator": FormatSlashedValidator, + "formatBuilder": FormatBuilder, + "formatBuilderWithIndex": FormatBuilderWithIndex, "formatWithdawalCredentials": FormatWithdawalCredentials, "formatRecentTimeShort": FormatRecentTimeShort, "formatGraffiti": FormatGraffiti,