core, core/state: instrument BAL slow-block metrics (#34861)

A first attempt to get the entire slow_blocks to be
BAL&parallel-exec-aware.

- Sequential-path output is unchanged; the `bal` JSON field is
`omitempty`.
- All new types are pure-data snapshots (value receivers, no shared
mutation), so they're safe to copy across goroutine boundaries in the
parallel pipeline.

---------

Co-authored-by: Jared Wasinger <j-wasinger@hotmail.com>
This commit is contained in:
CPerezz 2026-05-12 16:43:25 +02:00 committed by GitHub
parent 230c6a5e11
commit ac36e48264
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 192 additions and 43 deletions

View file

@ -656,6 +656,20 @@ func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *
writeTime := time.Since(writeStart) writeTime := time.Since(writeStart)
var stats ExecuteStats var stats ExecuteStats
wc := stateTransition.WrittenCounts()
d := stateTransition.Deletions()
codeLoaded, codeLoadBytes := prefetchReader.(state.CodeLoadTracker).CodeLoads()
stats.AccountLoaded = al.UniqueAccountCount()
stats.AccountUpdated = wc.Accounts - d.Accounts
stats.AccountDeleted = d.Accounts
stats.StorageLoaded = al.UniqueStorageSlotCount()
stats.StorageUpdated = wc.StorageSlots - d.Storage
stats.StorageDeleted = d.Storage
stats.CodeLoaded = codeLoaded
stats.CodeLoadBytes = codeLoadBytes
stats.CodeUpdated = wc.Codes
stats.CodeUpdateBytes = wc.CodeBytes
stats.ExecWall = res.ExecTime stats.ExecWall = res.ExecTime
stats.PostProcess = res.PostProcessTime stats.PostProcess = res.PostProcessTime
@ -666,12 +680,9 @@ func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *
stats.DatabaseCommit = m.TrieDBCommits stats.DatabaseCommit = m.TrieDBCommits
stats.Prefetch = m.StatePrefetch stats.Prefetch = m.StatePrefetch
} }
stats.Prefetch = prefetchReader.(state.PrefetcherMetricer).Metrics().Elapsed stats.Prefetch = prefetchReader.(state.PrefetcherMetricer).Metrics().Elapsed
if r, ok := prefetchReader.(state.ReaderStater); ok { stats.StateReadCacheStats = prefetchReader.(state.ReaderStater).GetStats()
stats.StateReadCacheStats = r.GetStats()
}
elapsed := time.Since(startTime) + 1 // prevent zero division elapsed := time.Since(startTime) + 1 // prevent zero division
stats.TotalTime = elapsed stats.TotalTime = elapsed
@ -2436,11 +2447,11 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
stats = &ExecuteStats{} stats = &ExecuteStats{}
) )
// Update the metrics touched during block processing and validation // Update the metrics touched during block processing and validation
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing) stats.AccountReads = statedb.AccountReads // Account reads are complete (in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing) stats.StorageReads = statedb.StorageReads // Storage reads are complete (in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation) stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete (in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation) stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete (in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation) stats.AccountHashes = statedb.AccountHashes // Account hashes are complete (in validation)
stats.CodeReads = statedb.CodeReads stats.CodeReads = statedb.CodeReads
stats.AccountLoaded = statedb.AccountLoaded stats.AccountLoaded = statedb.AccountLoaded
@ -2455,9 +2466,9 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
stats.CodeUpdated = statedb.CodeUpdated stats.CodeUpdated = statedb.CodeUpdated
stats.CodeUpdateBytes = statedb.CodeUpdateBytes stats.CodeUpdateBytes = statedb.CodeUpdateBytes
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // EVM processing time
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // Block validation time
stats.CrossValidation = xvtime // The time spent on stateless cross validation stats.CrossValidation = xvtime
// Write the block to the chain and get the status. // Write the block to the chain and get the status.
var status WriteStatus var status WriteStatus

View file

@ -38,17 +38,16 @@ type ExecuteStats struct {
StorageCommits time.Duration // Time spent on the storage trie commit StorageCommits time.Duration // Time spent on the storage trie commit
CodeReads time.Duration // Time spent on the contract code read CodeReads time.Duration // Time spent on the contract code read
// TODO: code bytes loaded AccountLoaded int
AccountLoaded int // Number of accounts loaded AccountUpdated int
AccountUpdated int // Number of accounts updated AccountDeleted int
AccountDeleted int // Number of accounts deleted StorageLoaded int
StorageLoaded int // Number of storage slots loaded StorageUpdated int
StorageUpdated int // Number of storage slots updated StorageDeleted int
StorageDeleted int // Number of storage slots deleted CodeLoaded int
CodeLoaded int // Number of contract code loaded CodeLoadBytes int
CodeLoadBytes int // Number of bytes read from contract code CodeUpdated int
CodeUpdated int // Number of contract code written (CREATE/CREATE2 + EIP-7702) CodeUpdateBytes int
CodeUpdateBytes int // Total bytes of code written
Execution time.Duration // Time spent on the EVM execution Execution time.Duration // Time spent on the EVM execution
Validation time.Duration // Time spent on the block validation Validation time.Duration // Time spent on the block validation
@ -223,7 +222,7 @@ func buildSlowBlockLog(s *ExecuteStats, block *types.Block) slowBlockLog {
}, },
Timing: slowBlockTime{ Timing: slowBlockTime{
ExecutionMs: durationToMs(s.Execution), ExecutionMs: durationToMs(s.Execution),
StateReadMs: durationToMs(s.AccountReads + s.StorageReads + s.CodeReads), StateReadMs: durationToMs(s.AccountReads + s.StorageReads + s.CodeReads + s.Prefetch),
StateHashMs: durationToMs(s.AccountHashes + s.AccountUpdates + s.StorageUpdates), StateHashMs: durationToMs(s.AccountHashes + s.AccountUpdates + s.StorageUpdates),
CommitMs: durationToMs(max(s.AccountCommits, s.StorageCommits) + s.DatabaseCommit + s.BlockWrite), CommitMs: durationToMs(max(s.AccountCommits, s.StorageCommits) + s.DatabaseCommit + s.BlockWrite),
TotalMs: durationToMs(s.TotalTime), TotalMs: durationToMs(s.TotalTime),
@ -304,8 +303,8 @@ func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Durat
} }
func (s *ExecuteStats) reportBALMetrics() { func (s *ExecuteStats) reportBALMetrics() {
accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them accountCommitTimer.Update(s.AccountCommits)
storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them storageCommitTimer.Update(s.StorageCommits)
if m := s.balTransitionStats; m != nil { if m := s.balTransitionStats; m != nil {
stateTriePrefetchTimer.Update(m.StatePrefetch) stateTriePrefetchTimer.Update(m.StatePrefetch)

View file

@ -14,16 +14,13 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// ProcessResultWithMetrics wraps ProcessResult with some metrics that are // ProcessResultWithMetrics wraps ProcessResult with timing breakdown for BAL block processing.
// emitted when executing blocks containing access lists.
type ProcessResultWithMetrics struct { type ProcessResultWithMetrics struct {
ProcessResult *ProcessResult ProcessResult *ProcessResult
PreProcessTime time.Duration PreProcessTime time.Duration
StateTransitionMetrics *state.BALStateTransitionMetrics StateTransitionMetrics *state.BALStateTransitionMetrics
// the time it took to execute all txs in the block ExecTime time.Duration
ExecTime time.Duration PostProcessTime time.Duration
PostProcessTime time.Duration
// TODO: have the prefetch metric in here as well?
} }
// ParallelStateProcessor is used to execute and verify blocks containing // ParallelStateProcessor is used to execute and verify blocks containing

View file

@ -3,6 +3,7 @@ package state
import ( import (
"maps" "maps"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -20,6 +21,7 @@ import (
// this object is only used for a single block. // this object is only used for a single block.
type BALStateTransition struct { type BALStateTransition struct {
accessList bal.AccessListReader accessList bal.AccessListReader
written bal.WrittenCounts
db Database db Database
reader Reader reader Reader
stateTrie Trie stateTrie Trie
@ -40,6 +42,11 @@ type BALStateTransition struct {
tries sync.Map //map[common.Address]Trie tries sync.Map //map[common.Address]Trie
deletions map[common.Address]struct{} deletions map[common.Address]struct{}
// Deletion counters; not derivable from the BAL alone (selfdestruct vs
// balance/nonce reset is indistinguishable without prestate).
accountDeleted int
storageDeleted atomic.Int64
stateUpdate *stateUpdate stateUpdate *stateUpdate
metrics BALStateTransitionMetrics metrics BALStateTransitionMetrics
@ -52,6 +59,19 @@ func (s *BALStateTransition) Metrics() *BALStateTransitionMetrics {
return &s.metrics return &s.metrics
} }
// DeletionCounts holds per-block deletion counters from the parallel root-pass.
type DeletionCounts struct {
Accounts int
Storage int
}
func (s *BALStateTransition) Deletions() DeletionCounts {
return DeletionCounts{
Accounts: s.accountDeleted,
Storage: int(s.storageDeleted.Load()),
}
}
type BALStateTransitionMetrics struct { type BALStateTransitionMetrics struct {
// trie hashing metrics // trie hashing metrics
AccountUpdate time.Duration AccountUpdate time.Duration
@ -75,6 +95,7 @@ func NewBALStateTransition(block *types.Block, prefetchReader Reader, db Databas
return &BALStateTransition{ return &BALStateTransition{
accessList: bal.NewAccessListReader(*block.AccessList()), accessList: bal.NewAccessListReader(*block.AccessList()),
written: block.AccessList().WrittenCounts(),
db: db, db: db,
reader: prefetchReader, reader: prefetchReader,
stateTrie: stateTrie, stateTrie: stateTrie,
@ -90,6 +111,11 @@ func NewBALStateTransition(block *types.Block, prefetchReader Reader, db Databas
}, nil }, nil
} }
// WrittenCounts returns the cached BAL write counts (computed once per block).
func (s *BALStateTransition) WrittenCounts() bal.WrittenCounts {
return s.written
}
func (s *BALStateTransition) Error() error { func (s *BALStateTransition) Error() error {
return s.err return s.err
} }
@ -334,15 +360,11 @@ func (s *BALStateTransition) CommitWithUpdate(block uint64, deleteEmptyObjects b
return common.Hash{}, nil, err return common.Hash{}, nil, err
} }
/* storageDeleted := s.storageDeleted.Load()
TODO: derive these from the BAL accountUpdatedMeter.Mark(int64(s.written.Accounts - s.accountDeleted))
^ I think even then, there is a semantic difference with how these metrics were calculated previously storageUpdatedMeter.Mark(int64(s.written.StorageSlots) - storageDeleted)
I don't know if it makes sense to recompute those, or just derive new ones from the BAL accountDeletedMeter.Mark(int64(s.accountDeleted))
accountUpdatedMeter.Mark(int64(s.accountUpdated)) storageDeletedMeter.Mark(storageDeleted)
storageUpdatedMeter.Mark(s.storageUpdated.Load())
accountDeletedMeter.Mark(int64(s.accountDeleted))
storageDeletedMeter.Mark(s.storageDeleted.Load())
*/
accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
@ -477,6 +499,7 @@ func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
return common.Hash{} return common.Hash{}
} }
s.deletions[mutatedAddr] = struct{}{} s.deletions[mutatedAddr] = struct{}{}
s.accountDeleted++
} else { } else {
acct, code := s.updateAccount(mutatedAddr) acct, code := s.updateAccount(mutatedAddr)

View file

@ -18,6 +18,9 @@ package state
import ( import (
"errors" "errors"
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/overlay" "github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -28,8 +31,6 @@ import (
"github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/database" "github.com/ethereum/go-ethereum/triedb/database"
"sync"
"sync/atomic"
) )
// ContractCodeReader defines the interface for accessing contract code. // ContractCodeReader defines the interface for accessing contract code.
@ -530,6 +531,8 @@ type reader struct {
ContractCodeReader ContractCodeReader
StateReader StateReader
PrefetcherMetricer PrefetcherMetricer
codeLoaded sync.Map // common.Address → int (first-seen len(code))
} }
// newReader constructs a reader with the supplied code reader and state reader. // newReader constructs a reader with the supplied code reader and state reader.
@ -548,6 +551,33 @@ func newReaderWithPrefetch(codeReader ContractCodeReader, stateReader StateReade
} }
} }
func (r *reader) Code(addr common.Address, codeHash common.Hash) []byte {
code := r.ContractCodeReader.Code(addr, codeHash)
if len(code) > 0 {
r.codeLoaded.LoadOrStore(addr, len(code))
}
return code
}
func (r *reader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
size, err := r.ContractCodeReader.CodeSize(addr, codeHash)
if err == nil && size > 0 {
r.codeLoaded.LoadOrStore(addr, size)
}
return size, err
}
// CodeLoads returns the count of unique contracts whose code was fetched and
// the sum of their first-seen byte lengths. Call after Reader use has quiesced.
func (r *reader) CodeLoads() (count, bytes int) {
r.codeLoaded.Range(func(_, v any) bool {
count++
bytes += v.(int)
return true
})
return
}
// GetCodeStats returns the statistics of code access. // GetCodeStats returns the statistics of code access.
func (r *reader) GetCodeStats() ContractCodeReaderStats { func (r *reader) GetCodeStats() ContractCodeReaderStats {
if stater, ok := r.ContractCodeReader.(ContractCodeReaderStater); ok { if stater, ok := r.ContractCodeReader.(ContractCodeReaderStater); ok {

View file

@ -382,3 +382,16 @@ func (r *readerTracker) TouchStorage(addr common.Address, slot common.Hash) {
} }
list[slot] = struct{}{} list[slot] = struct{}{}
} }
func (r *readerTracker) CodeLoads() (count, bytes int) {
return r.Reader.(CodeLoadTracker).CodeLoads()
}
// GetStateStats forwards stats from the wrapped reader; without this, BAL
// blocks would emit zero cache hit/miss counts.
func (r *prefetchStateReader) GetStateStats() StateReaderStats {
if stater, ok := r.StateReader.(StateReaderStater); ok {
return stater.GetStateStats()
}
return StateReaderStats{}
}

View file

@ -263,3 +263,31 @@ func TestTrackerSurvivesStateDBCache(t *testing.T) {
t.Fatal("slot must be tracked on cache hit (storage)") t.Fatal("slot must be tracked on cache hit (storage)")
} }
} }
// TestPrefetchStateReaderForwardsStats locks down that prefetchStateReader
// exposes the underlying stateReaderWithStats counters via GetStateStats.
func TestPrefetchStateReaderForwardsStats(t *testing.T) {
stub := newRefStateReader()
addr := testrand.Address()
cached := newStateReaderWithCache(stub)
withStats := newStateReaderWithStats(cached)
prefetch := newPrefetchStateReaderInternal(withStats, nil, 1)
if _, err := prefetch.Account(addr); err != nil {
t.Fatalf("Account: %v", err)
}
if _, err := prefetch.Account(addr); err != nil {
t.Fatalf("Account (second): %v", err)
}
stats := withStats.GetStateStats()
if stats.AccountCacheHit == 0 || stats.AccountCacheMiss == 0 {
t.Fatalf("inner stats not populated: %+v", stats)
}
gotStats := prefetch.GetStateStats()
if gotStats != stats {
t.Fatalf("forward mismatch: got %+v, want %+v", gotStats, stats)
}
}

View file

@ -16,6 +16,11 @@
package state package state
// CodeLoadTracker exposes a Reader's deduplicated code-load count and bytes.
type CodeLoadTracker interface {
CodeLoads() (count, bytes int)
}
// ContractCodeReaderStats aggregates statistics for the contract code reader. // ContractCodeReaderStats aggregates statistics for the contract code reader.
type ContractCodeReaderStats struct { type ContractCodeReaderStats struct {
CacheHit int64 // Number of cache hits CacheHit int64 // Number of cache hits

View file

@ -44,6 +44,49 @@ import (
// BlockAccessList is the encoding format of AccessListBuilder. // BlockAccessList is the encoding format of AccessListBuilder.
type BlockAccessList []AccountAccess type BlockAccessList []AccountAccess
// UniqueAccountCount returns the number of distinct account addresses in
// the block access list.
func (e BlockAccessList) UniqueAccountCount() int {
return len(e)
}
// UniqueStorageSlotCount returns the total number of distinct (address, slot)
// pairs accessed across all accounts. Reads and writes are disjoint per
// account by spec validation, so we can sum them directly.
func (e BlockAccessList) UniqueStorageSlotCount() int {
var n int
for i := range e {
n += len(e[i].StorageReads) + len(e[i].StorageChanges)
}
return n
}
// WrittenCounts groups per-block aggregate write counts derived from the BAL.
type WrittenCounts struct {
Accounts int
StorageSlots int
Codes int
CodeBytes int
}
// WrittenCounts walks the BAL once and returns the aggregate write counts.
func (e BlockAccessList) WrittenCounts() WrittenCounts {
var w WrittenCounts
for i := range e {
a := &e[i]
if len(a.StorageChanges) > 0 || len(a.BalanceChanges) > 0 ||
len(a.NonceChanges) > 0 || len(a.CodeChanges) > 0 {
w.Accounts++
}
w.StorageSlots += len(a.StorageChanges)
if n := len(a.CodeChanges); n > 0 {
w.Codes++
w.CodeBytes += len(a.CodeChanges[n-1].Code)
}
}
return w
}
func (e BlockAccessList) EncodeRLP(_w io.Writer) error { func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w) w := rlp.NewEncoderBuffer(_w)
l := w.List() l := w.List()