diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index c5145bbfb7..e535d7d892 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -96,6 +96,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.CacheNoPrefetchFlag, utils.CachePreimagesFlag, utils.NoCompactionFlag, + utils.LogSlowBlockFlag, utils.MetricsEnabledFlag, utils.MetricsEnabledExpensiveFlag, utils.MetricsHTTPFlag, diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 851ae1ce0b..b294ee593e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -157,6 +157,7 @@ var ( utils.BeaconGenesisTimeFlag, utils.BeaconCheckpointFlag, utils.BeaconCheckpointFileFlag, + utils.LogSlowBlockFlag, }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 996cb276ee..0d53716f6c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -672,6 +672,12 @@ var ( Usage: "Disables db compaction after import", Category: flags.LoggingCategory, } + LogSlowBlockFlag = &cli.DurationFlag{ + Name: "debug.logslowblock", + Usage: "Block execution time threshold beyond which detailed statistics will be logged (0 means disable)", + Value: ethconfig.Defaults.SlowBlockThreshold, + Category: flags.LoggingCategory, + } // MISC settings SyncTargetFlag = &cli.StringFlag{ @@ -1720,6 +1726,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(LogNoHistoryFlag.Name) { cfg.LogNoHistory = true } + if ctx.IsSet(LogSlowBlockFlag.Name) { + cfg.SlowBlockThreshold = ctx.Duration(LogSlowBlockFlag.Name) + } if ctx.IsSet(LogExportCheckpointsFlag.Name) { cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name) } @@ -2299,6 +2308,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh Preimages: ctx.Bool(CachePreimagesFlag.Name), StateScheme: scheme, StateHistory: ctx.Uint64(StateHistoryFlag.Name), + // Disable transaction indexing/unindexing. TxLookupLimit: -1, @@ -2310,6 +2320,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh // Enable state size tracking if enabled StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name), + + // Configure the slow block statistic logger + SlowBlockThreshold: ctx.Duration(LogSlowBlockFlag.Name), } if options.ArchiveMode && !options.Preimages { options.Preimages = true diff --git a/core/blockchain.go b/core/blockchain.go index b7acd12aca..7fe39e2b65 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -198,6 +198,10 @@ type BlockChainConfig struct { // StateSizeTracking indicates whether the state size tracking is enabled. StateSizeTracking bool + + // SlowBlockThreshold is the block execution time threshold beyond which + // detailed statistics will be logged. + SlowBlockThreshold time.Duration } // DefaultConfig returns the default config. @@ -337,7 +341,8 @@ type BlockChain struct { logger *tracing.Hooks stateSizer *state.SizeTracker // State size tracking - lastForkReadyAlert time.Time // Last time there was a fork readiness print out + lastForkReadyAlert time.Time // Last time there was a fork readiness print out + slowBlockThreshold time.Duration // Block execution time threshold beyond which detailed statistics will be logged } // NewBlockChain returns a fully initialised block chain using information @@ -372,19 +377,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, log.Info("") bc := &BlockChain{ - chainConfig: chainConfig, - cfg: cfg, - db: db, - triedb: triedb, - triegc: prque.New[int64, common.Hash](nil), - chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), - bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), - receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), - engine: engine, - logger: cfg.VmConfig.Tracer, + chainConfig: chainConfig, + cfg: cfg, + db: db, + triedb: triedb, + triegc: prque.New[int64, common.Hash](nil), + chainmu: syncx.NewClosableMutex(), + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), + engine: engine, + logger: cfg.VmConfig.Tracer, + slowBlockThreshold: cfg.SlowBlockThreshold, } bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) if err != nil { @@ -1847,7 +1853,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness // still need re-execution to generate snapshots that are missing case err != nil && !errors.Is(err, ErrKnownBlock): stats.ignored += len(it.chain) - bc.reportBlock(block, nil, err) + bc.reportBadBlock(block, nil, err) return nil, it.index, err } // Track the singleton witness from this chain insertion (if any) @@ -1915,6 +1921,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness if err != nil { return nil, it.index, err } + res.stats.reportMetrics() + + // Log slow block only if a single block is inserted (usually after the + // initial sync) to not overwhelm the users. + if len(chain) == 1 { + res.stats.logSlow(block, bc.slowBlockThreshold) + } + // Report the import stats before returning the various results stats.processed++ stats.usedGas += res.usedGas @@ -1975,15 +1989,20 @@ type blockProcessingResult struct { procTime time.Duration status WriteStatus witness *stateless.Witness + stats *ExecuteStats } func (bpr *blockProcessingResult) Witness() *stateless.Witness { return bpr.witness } +func (bpr *blockProcessingResult) Stats() *ExecuteStats { + return bpr.stats +} + // ProcessBlock executes and validates the given block. If there was no error // it writes the block and associated state to database. -func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) { +func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) { var ( err error startTime = time.Now() @@ -2017,16 +2036,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Upload the statistics of reader at the end defer func() { - stats := prefetch.GetStats() - accountCacheHitPrefetchMeter.Mark(stats.AccountHit) - accountCacheMissPrefetchMeter.Mark(stats.AccountMiss) - storageCacheHitPrefetchMeter.Mark(stats.StorageHit) - storageCacheMissPrefetchMeter.Mark(stats.StorageMiss) - stats = process.GetStats() - accountCacheHitMeter.Mark(stats.AccountHit) - accountCacheMissMeter.Mark(stats.AccountMiss) - storageCacheHitMeter.Mark(stats.StorageHit) - storageCacheMissMeter.Mark(stats.StorageMiss) + pStat := prefetch.GetStats() + accountCacheHitPrefetchMeter.Mark(pStat.AccountCacheHit) + accountCacheMissPrefetchMeter.Mark(pStat.AccountCacheMiss) + storageCacheHitPrefetchMeter.Mark(pStat.StorageCacheHit) + storageCacheMissPrefetchMeter.Mark(pStat.StorageCacheMiss) + + rStat := process.GetStats() + accountCacheHitMeter.Mark(rStat.AccountCacheHit) + accountCacheMissMeter.Mark(rStat.AccountCacheMiss) + storageCacheHitMeter.Mark(rStat.StorageCacheHit) + storageCacheMissMeter.Mark(rStat.StorageCacheMiss) + + if result != nil { + result.stats.StatePrefetchCacheStats = pStat + result.stats.StateReadCacheStats = rStat + } }() go func(start time.Time, throwaway *state.StateDB, block *types.Block) { @@ -2083,14 +2108,14 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s pstart := time.Now() res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig) if err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } ptime := time.Since(pstart) vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, res, false); err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } vtime := time.Since(vstart) @@ -2124,26 +2149,28 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } } - xvtime := time.Since(xvstart) - proctime := time.Since(startTime) // processing + validation + cross validation - + var ( + xvtime = time.Since(xvstart) + proctime = time.Since(startTime) // processing + validation + cross validation + stats = &ExecuteStats{} + ) // Update the metrics touched during block processing and validation - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) - if statedb.AccountLoaded != 0 { - accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded)) - } - if statedb.StorageLoaded != 0 { - storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded)) - } - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) - accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) - triehash := statedb.AccountHashes // The time spent on tries hashing - trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update - blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing - blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation - blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation + stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing) + stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing) + stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation) + stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation) + stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation) + + stats.AccountLoaded = statedb.AccountLoaded + stats.AccountUpdated = statedb.AccountUpdated + stats.AccountDeleted = statedb.AccountDeleted + stats.StorageLoaded = statedb.StorageLoaded + stats.StorageUpdated = int(statedb.StorageUpdated.Load()) + stats.StorageDeleted = int(statedb.StorageDeleted.Load()) + + stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing + stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation + stats.CrossValidation = xvtime // The time spent on stateless cross validation // Write the block to the chain and get the status. var ( @@ -2165,24 +2192,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Update the metrics touched during block commit - accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them - storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them - snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them + stats.AccountCommits = statedb.AccountCommits // Account commits are complete, we can mark them + stats.StorageCommits = statedb.StorageCommits // Storage commits are complete, we can mark them + stats.SnapshotCommit = statedb.SnapshotCommits // Snapshot commits are complete, we can mark them + stats.TrieDBCommit = statedb.TrieDBCommits // Trie database commits are complete, we can mark them + stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits - blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) elapsed := time.Since(startTime) + 1 // prevent zero division - blockInsertTimer.Update(elapsed) - - // TODO(rjl493456442) generalize the ResettingTimer - mgasps := float64(res.GasUsed) * 1000 / float64(elapsed) - chainMgaspsMeter.Update(time.Duration(mgasps)) + stats.TotalTime = elapsed + stats.MgasPerSecond = float64(res.GasUsed) * 1000 / float64(elapsed) return &blockProcessingResult{ usedGas: res.GasUsed, procTime: proctime, status: status, witness: witness, + stats: stats, }, nil } @@ -2667,8 +2692,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } -// reportBlock logs a bad block error. -func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) { +// reportBadBlock logs a bad block error. +func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err error) { var receipts types.Receipts if res != nil { receipts = res.Receipts diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go new file mode 100644 index 0000000000..0cebebc20a --- /dev/null +++ b/core/blockchain_stats.go @@ -0,0 +1,138 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "fmt" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// ExecuteStats includes all the statistics of a block execution in details. +type ExecuteStats struct { + // State read times + AccountReads time.Duration // Time spent on the account reads + StorageReads time.Duration // Time spent on the storage reads + AccountHashes time.Duration // Time spent on the account trie hash + AccountUpdates time.Duration // Time spent on the account trie update + AccountCommits time.Duration // Time spent on the account trie commit + StorageUpdates time.Duration // Time spent on the storage trie update + StorageCommits time.Duration // Time spent on the storage trie commit + + AccountLoaded int // Number of accounts loaded + AccountUpdated int // Number of accounts updated + AccountDeleted int // Number of accounts deleted + StorageLoaded int // Number of storage slots loaded + StorageUpdated int // Number of storage slots updated + StorageDeleted int // Number of storage slots deleted + + Execution time.Duration // Time spent on the EVM execution + Validation time.Duration // Time spent on the block validation + CrossValidation time.Duration // Optional, time spent on the block cross validation + SnapshotCommit time.Duration // Time spent on snapshot commit + TrieDBCommit time.Duration // Time spent on database commit + BlockWrite time.Duration // Time spent on block write + TotalTime time.Duration // The total time spent on block execution + MgasPerSecond float64 // The million gas processed per second + + // Cache hit rates + StateReadCacheStats state.ReaderStats + StatePrefetchCacheStats state.ReaderStats +} + +// reportMetrics uploads execution statistics to the metrics system. +func (s *ExecuteStats) reportMetrics() { + accountReadTimer.Update(s.AccountReads) // Account reads are complete(in processing) + storageReadTimer.Update(s.StorageReads) // Storage reads are complete(in processing) + if s.AccountLoaded != 0 { + accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded)) + } + if s.StorageLoaded != 0 { + storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded)) + } + + accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation) + storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation) + accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation) + + accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them + storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them + + blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing + blockValidationTimer.Update(s.Validation) // The time spent on block validation + blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation + snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them + triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them + blockWriteTimer.Update(s.BlockWrite) // The time spent on block write + blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution + chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer + + // Cache hit rates + accountCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheHit) + accountCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheMiss) + storageCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheHit) + storageCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheMiss) + + accountCacheHitMeter.Mark(s.StateReadCacheStats.AccountCacheHit) + accountCacheMissMeter.Mark(s.StateReadCacheStats.AccountCacheMiss) + storageCacheHitMeter.Mark(s.StateReadCacheStats.StorageCacheHit) + storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss) +} + +// logSlow prints the detailed execution statistics if the block is regarded as slow. +func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Duration) { + if slowBlockThreshold == 0 { + return + } + if s.TotalTime < slowBlockThreshold { + return + } + msg := fmt.Sprintf(` +########## SLOW BLOCK ######### +Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v + +EVM execution: %v +Validation: %v +Account read: %v(%d) +Storage read: %v(%d) +Account hash: %v +Storage hash: %v +DB commit: %v +Block write: %v + +%s +############################## +`, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime), + common.PrettyDuration(s.Execution), common.PrettyDuration(s.Validation+s.CrossValidation), + common.PrettyDuration(s.AccountReads), s.AccountLoaded, + common.PrettyDuration(s.StorageReads), s.StorageLoaded, + common.PrettyDuration(s.AccountHashes+s.AccountCommits+s.AccountUpdates), + common.PrettyDuration(s.StorageCommits+s.StorageUpdates), + common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), common.PrettyDuration(s.BlockWrite), + s.StateReadCacheStats) + for _, line := range strings.Split(msg, "\n") { + if line == "" { + continue + } + log.Info(line) + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index b749798f9c..3e3053d9bf 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -162,12 +162,12 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { } res, err := blockchain.processor.Process(block, statedb, vm.Config{}) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } err = blockchain.validator.ValidateState(block, statedb, res, false) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } diff --git a/core/state/reader.go b/core/state/reader.go index 93083c8ae2..21e76c5b66 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -18,6 +18,7 @@ package state import ( "errors" + "fmt" "sync" "sync/atomic" @@ -88,10 +89,29 @@ type Reader interface { // ReaderStats wraps the statistics of reader. type ReaderStats struct { - AccountHit int64 - AccountMiss int64 - StorageHit int64 - StorageMiss int64 + // Cache stats + AccountCacheHit int64 + AccountCacheMiss int64 + StorageCacheHit int64 + StorageCacheMiss int64 +} + +// String implements fmt.Stringer, returning string format statistics. +func (s ReaderStats) String() string { + var ( + accountCacheHitRate float64 + storageCacheHitRate float64 + ) + if s.AccountCacheHit > 0 { + accountCacheHitRate = float64(s.AccountCacheHit) / float64(s.AccountCacheHit+s.AccountCacheMiss) * 100 + } + if s.StorageCacheHit > 0 { + storageCacheHitRate = float64(s.StorageCacheHit) / float64(s.StorageCacheHit+s.StorageCacheMiss) * 100 + } + msg := fmt.Sprintf("Reader statistics\n") + msg += fmt.Sprintf("account: hit: %d, miss: %d, rate: %.2f\n", s.AccountCacheHit, s.AccountCacheMiss, accountCacheHitRate) + msg += fmt.Sprintf("storage: hit: %d, miss: %d, rate: %.2f\n", s.StorageCacheHit, s.StorageCacheMiss, storageCacheHitRate) + return msg } // ReaderWithStats wraps the additional method to retrieve the reader statistics from. @@ -544,10 +564,11 @@ func (r *readerWithCache) Storage(addr common.Address, slot common.Hash) (common type readerWithCacheStats struct { *readerWithCache - accountHit atomic.Int64 - accountMiss atomic.Int64 - storageHit atomic.Int64 - storageMiss atomic.Int64 + + accountCacheHit atomic.Int64 + accountCacheMiss atomic.Int64 + storageCacheHit atomic.Int64 + storageCacheMiss atomic.Int64 } // newReaderWithCacheStats constructs the reader with additional statistics tracked. @@ -567,9 +588,9 @@ func (r *readerWithCacheStats) Account(addr common.Address) (*types.StateAccount return nil, err } if incache { - r.accountHit.Add(1) + r.accountCacheHit.Add(1) } else { - r.accountMiss.Add(1) + r.accountCacheMiss.Add(1) } return account, nil } @@ -585,9 +606,9 @@ func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (c return common.Hash{}, err } if incache { - r.storageHit.Add(1) + r.storageCacheHit.Add(1) } else { - r.storageMiss.Add(1) + r.storageCacheMiss.Add(1) } return value, nil } @@ -595,9 +616,9 @@ func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (c // GetStats implements ReaderWithStats, returning the statistics of state reader. func (r *readerWithCacheStats) GetStats() ReaderStats { return ReaderStats{ - AccountHit: r.accountHit.Load(), - AccountMiss: r.accountMiss.Load(), - StorageHit: r.storageHit.Load(), - StorageMiss: r.storageMiss.Load(), + AccountCacheHit: r.accountCacheHit.Load(), + AccountCacheMiss: r.accountCacheMiss.Load(), + StorageCacheHit: r.storageCacheHit.Load(), + StorageCacheMiss: r.storageCacheMiss.Load(), } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 364bc40850..73d4af7dcf 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -141,10 +141,11 @@ type StateDB struct { witnessStats *stateless.WitnessStats // Measurements gathered during execution for debugging purposes - AccountReads time.Duration - AccountHashes time.Duration - AccountUpdates time.Duration - AccountCommits time.Duration + AccountReads time.Duration + AccountHashes time.Duration + AccountUpdates time.Duration + AccountCommits time.Duration + StorageReads time.Duration StorageUpdates time.Duration StorageCommits time.Duration diff --git a/eth/api_debug.go b/eth/api_debug.go index 892e103213..db1b842e90 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -499,17 +499,14 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness if err != nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v not found", bn) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } @@ -519,16 +516,13 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit if block == nil { return &stateless.ExtWitness{}, fmt.Errorf("block hash %x not found", hash) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } diff --git a/eth/backend.go b/eth/backend.go index 8509561822..95ae9d4a41 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -244,6 +244,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // - DATADIR/triedb/verkle.journal TrieJournalDirectory: stack.ResolvePath("triedb"), StateSizeTracking: config.EnableStateSizeTracking, + SlowBlockThreshold: config.SlowBlockThreshold, } ) if config.VMTrace != "" { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index c4a0956b3b..d6ed2c2576 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -72,6 +72,7 @@ var Defaults = Config{ RPCTxFeeCap: 1, // 1 ether TxSyncDefaultTimeout: 20 * time.Second, TxSyncMaxTimeout: 1 * time.Minute, + SlowBlockThreshold: time.Second * 2, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -118,6 +119,10 @@ type Config struct { // presence of these blocks for every new peer connection. RequiredBlocks map[uint64]common.Hash `toml:"-"` + // SlowBlockThreshold is the block execution speed threshold (Mgas/s) + // below which detailed statistics are logged. + SlowBlockThreshold time.Duration `toml:",omitempty"` + // Database options SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 6f18dc34c5..97c5db3ecd 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) { StateHistory uint64 `toml:",omitempty"` StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold time.Duration `toml:",omitempty"` SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int @@ -82,6 +83,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.StateHistory = c.StateHistory enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks + enc.SlowBlockThreshold = c.SlowBlockThreshold enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache @@ -135,6 +137,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { StateHistory *uint64 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold *time.Duration `toml:",omitempty"` SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int @@ -219,6 +222,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks } + if dec.SlowBlockThreshold != nil { + c.SlowBlockThreshold = *dec.SlowBlockThreshold + } if dec.SkipBcVersionCheck != nil { c.SkipBcVersionCheck = *dec.SkipBcVersionCheck }