core, miner, trie: relocate witness stats (#34106)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run

This PR relocates the witness statistics into the witness itself, making
it more self-contained.
This commit is contained in:
rjl493456442 2026-03-28 00:06:46 +08:00 committed by GitHub
parent acdd139717
commit c3467dd8b5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 63 additions and 50 deletions

View file

@ -2170,24 +2170,18 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
// If we are past Byzantium, enable prefetching to pull in trie node paths
// while processing transactions. Before Byzantium the prefetcher is mostly
// useless due to the intermediate root hashing after each transaction.
var (
witness *stateless.Witness
witnessStats *stateless.WitnessStats
)
var witness *stateless.Witness
if bc.chainConfig.IsByzantium(block.Number()) {
// Generate witnesses either if we're self-testing, or if it's the
// only block being inserted. A bit crude, but witnesses are huge,
// so we refuse to make an entire chain of them.
if config.StatelessSelfValidation || config.MakeWitness {
witness, err = stateless.NewWitness(block.Header(), bc)
witness, err = stateless.NewWitness(block.Header(), bc, config.EnableWitnessStats)
if err != nil {
return nil, err
}
if config.EnableWitnessStats {
witnessStats = stateless.NewWitnessStats()
}
}
statedb.StartPrefetcher("chain", witness, witnessStats)
statedb.StartPrefetcher("chain", witness)
defer statedb.StopPrefetcher()
}
@ -2306,8 +2300,8 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.DatabaseCommits
}
// Report the collected witness statistics
if witnessStats != nil {
witnessStats.ReportMetrics(block.NumberU64())
if witness != nil {
witness.ReportMetrics(block.NumberU64())
}
elapsed := time.Since(startTime) + 1 // prevent zero division
stats.TotalTime = elapsed

View file

@ -135,8 +135,7 @@ type StateDB struct {
journal *journal
// State witness if cross validation is needed
witness *stateless.Witness
witnessStats *stateless.WitnessStats
witness *stateless.Witness
// Measurements gathered during execution for debugging purposes
AccountReads time.Duration
@ -201,13 +200,12 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness, witnessStats *stateless.WitnessStats) {
func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness) {
// Terminate any previously running prefetcher
s.StopPrefetcher()
// Enable witness collection if requested
s.witness = witness
s.witnessStats = witnessStats
// With the switch to the Proof-of-Stake consensus algorithm, block production
// rewards are now handled at the consensus layer. Consequently, a block may
@ -913,7 +911,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// If witness building is enabled and the state object has a trie,
// gather the witnesses for its specific storage trie
if s.witness != nil && obj.trie != nil {
s.witness.AddState(obj.trie.Witness())
s.witness.AddState(obj.trie.Witness(), obj.addrHash())
}
return nil
})
@ -930,17 +928,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
witness := trie.Witness()
s.witness.AddState(witness)
if s.witnessStats != nil {
s.witnessStats.Add(witness, obj.addrHash())
}
s.witness.AddState(trie.Witness(), obj.addrHash())
} else if obj.trie != nil {
witness := obj.trie.Witness()
s.witness.AddState(witness)
if s.witnessStats != nil {
s.witnessStats.Add(witness, obj.addrHash())
}
s.witness.AddState(obj.trie.Witness(), obj.addrHash())
}
}
// Pull in only-read and non-destructed trie witnesses
@ -954,17 +944,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
witness := trie.Witness()
s.witness.AddState(witness)
if s.witnessStats != nil {
s.witnessStats.Add(witness, obj.addrHash())
}
s.witness.AddState(trie.Witness(), obj.addrHash())
} else if obj.trie != nil {
witness := obj.trie.Witness()
s.witness.AddState(witness)
if s.witnessStats != nil {
s.witnessStats.Add(witness, obj.addrHash())
}
s.witness.AddState(obj.trie.Witness(), obj.addrHash())
}
}
}
@ -1037,11 +1019,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
witness := s.trie.Witness()
s.witness.AddState(witness)
if s.witnessStats != nil {
s.witnessStats.Add(witness, common.Hash{})
}
s.witness.AddState(s.trie.Witness(), common.Hash{})
}
return hash
}

View file

@ -54,6 +54,13 @@ func NewWitnessStats() *WitnessStats {
}
}
func (s *WitnessStats) copy() *WitnessStats {
return &WitnessStats{
accountTrie: s.accountTrie.Copy(),
storageTrie: s.storageTrie.Copy(),
}
}
func (s *WitnessStats) init() {
if s.accountTrie == nil {
s.accountTrie = trie.NewLevelStats()

View file

@ -42,12 +42,13 @@ type Witness struct {
Codes map[string]struct{} // Set of bytecodes ran or accessed
State map[string]struct{} // Set of MPT state trie nodes (account and storage together)
chain HeaderReader // Chain reader to convert block hash ops to header proofs
lock sync.Mutex // Lock to allow concurrent state insertions
chain HeaderReader // Chain reader to convert block hash ops to header proofs
stats *WitnessStats // Optional statistics collector
lock sync.Mutex // Lock to allow concurrent state insertions
}
// NewWitness creates an empty witness ready for population.
func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) {
func NewWitness(context *types.Header, chain HeaderReader, enableStats bool) (*Witness, error) {
// When building witnesses, retrieve the parent header, which will *always*
// be included to act as a trustless pre-root hash container
var headers []*types.Header
@ -59,13 +60,17 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) {
headers = append(headers, parent)
}
// Create the witness with a reconstructed gutted out block
return &Witness{
w := &Witness{
context: context,
Headers: headers,
Codes: make(map[string]struct{}),
State: make(map[string]struct{}),
chain: chain,
}, nil
}
if enableStats {
w.stats = NewWitnessStats()
}
return w, nil
}
// AddBlockHash adds a "blockhash" to the witness with the designated offset from
@ -87,8 +92,11 @@ func (w *Witness) AddCode(code []byte) {
w.Codes[string(code)] = struct{}{}
}
// AddState inserts a batch of MPT trie nodes into the witness.
func (w *Witness) AddState(nodes map[string][]byte) {
// AddState inserts a batch of MPT trie nodes into the witness. The owner
// identifies which trie the nodes belong to: the zero hash for the account
// trie, or the hashed address for a storage trie. This is used for optional
// statistics collection.
func (w *Witness) AddState(nodes map[string][]byte, owner common.Hash) {
if len(nodes) == 0 {
return
}
@ -98,6 +106,17 @@ func (w *Witness) AddState(nodes map[string][]byte) {
for _, value := range nodes {
w.State[string(value)] = struct{}{}
}
if w.stats != nil {
w.stats.Add(nodes, owner)
}
}
// ReportMetrics reports the collected statistics to the global metrics registry.
func (w *Witness) ReportMetrics(blockNumber uint64) {
if w.stats == nil {
return
}
w.stats.ReportMetrics(blockNumber)
}
func (w *Witness) AddKey() {
@ -113,6 +132,9 @@ func (w *Witness) Copy() *Witness {
State: maps.Clone(w.State),
chain: w.chain,
}
if w.stats != nil {
cpy.stats = w.stats.copy()
}
if w.context != nil {
cpy.context = types.CopyHeader(w.context)
}

View file

@ -330,12 +330,12 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
}
var bundle *stateless.Witness
if witness {
bundle, err = stateless.NewWitness(header, miner.chain)
bundle, err = stateless.NewWitness(header, miner.chain, false)
if err != nil {
return nil, err
}
}
state.StartPrefetcher("miner", bundle, nil)
state.StartPrefetcher("miner", bundle)
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),

View file

@ -36,6 +36,18 @@ func NewLevelStats() *LevelStats {
return &LevelStats{}
}
// Copy returns a deep copy of the statistics.
func (s *LevelStats) Copy() *LevelStats {
cpy := NewLevelStats()
for i := range s.level {
cpy.level[i].short.Store(s.level[i].short.Load())
cpy.level[i].full.Store(s.level[i].full.Load())
cpy.level[i].value.Store(s.level[i].value.Load())
cpy.level[i].size.Store(s.level[i].size.Load())
}
return cpy
}
// MaxDepth iterates each level and finds the deepest level with at least one
// trie node.
func (s *LevelStats) MaxDepth() int {