go-ethereum/trie/bintrie/stem_node.go
CPerezz 61bfacc52f
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
trie/bintrie: skip clean nodes in CollectNodes to reduce commit write amplification (#34754)
## Problem

`BinaryTrie.Commit` unconditionally walked every resolved in-memory node
and flushed it into the `NodeSet`, producing one Pebble write per
resolved internal + stem node on every block — even when the node's
on-disk blob was bitwise identical to the previous commit. On a warm
400M-state workload this meant tens of thousands of redundant 65-byte
writes per block, compounding Pebble compaction pressure on every
commit.

The existing `mustRecompute` flag tracks *hash* staleness, not
*disk-blob* staleness: after `Hash()` completes, `mustRecompute` is
cleared even though the fresh blob has not been persisted. It is
therefore insufficient for a skip-flush optimization.

## Fix

Mirror the MPT committer pattern (`trie/committer.go:51-56`) by adding a
`dirty` flag on `InternalNode` and `StemNode` with the semantics *the
on-disk blob is stale*. The flag is:

- set to `true` wherever the node is created or structurally modified
(the same call sites that already set `mustRecompute = true`);
- set to `false` only after the node has been passed to the `flushfn`
inside `CollectNodes`;
- left `false` on nodes produced by `DeserializeNodeWithHash`, matching
the *loaded from disk, already persisted* semantics.

`CollectNodes` short-circuits on `!dirty` subtrees. The propagation
invariant (an ancestor of any dirty node is itself dirty) is already
maintained by the existing `InsertValuesAtStem` / `Insert` paths, which
now mirror every `mustRecompute = true` setter with a `dirty = true`
setter.

## Benchmark

New `BenchmarkCollectNodes_SparseWrite` measures commit cost when only
one leaf changes between blocks — the common case for state updates.
10,000-stem trie, one-leaf modification + Commit per iteration, Apple M4
Pro:

| | before | after | delta |
|---|---|---|---|
| time / op | 12,653,000 ns | 7,336 ns | **~1,725×** |
| bytes / op | 107,224,740 B | 37,774 B | **~2,839×** |
| allocs / op | 80,953 | 134 | **~604×** |

End-to-end impact on a real workload depends on the
resolved-footprint-to-dirty-path ratio; the new
`TestBinaryTrieCommitIncremental` provides a structural regression guard
(asserts that a Commit following a single-leaf modification flushes a
root-to-leaf path, not the whole tree).

---

Found all of this stuff while bloating my #34706 DB to make some
benchmarks. And saw we were spending A LOT OF TIME on hashing.
Hope this helps the perf a bit. Will rebase the flat-state PR on top of
this once merged.
2026-04-18 11:42:58 +02:00

252 lines
6.6 KiB
Go

// Copyright 2025 go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bintrie
import (
"bytes"
"errors"
"fmt"
"slices"
"github.com/ethereum/go-ethereum/common"
)
// StemNode represents a group of `NodeWith` values sharing the same stem.
type StemNode struct {
Stem []byte // Stem path to get to StemNodeWidth values
Values [][]byte // All values, indexed by the last byte of the key.
depth int // Depth of the node
mustRecompute bool // true if the hash needs to be recomputed
dirty bool // true if the node's on-disk blob is stale (needs flush)
hash common.Hash // cached hash when mustRecompute == false
}
// Get retrieves the value for the given key.
func (bt *StemNode) Get(key []byte, _ NodeResolverFn) ([]byte, error) {
if !bytes.Equal(bt.Stem, key[:StemSize]) {
return nil, nil
}
return bt.Values[key[StemSize]], nil
}
// Insert inserts a new key-value pair into the node.
func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
if !bytes.Equal(bt.Stem, key[:StemSize]) {
bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
n := &InternalNode{depth: bt.depth, mustRecompute: true, dirty: true}
bt.depth++
// bt is re-parented under n and sits at a new path — rewrite its blob.
bt.mustRecompute = true
bt.dirty = true
var child, other *BinaryNode
if bitStem == 0 {
n.left = bt
child = &n.left
other = &n.right
} else {
n.right = bt
child = &n.right
other = &n.left
}
bitKey := key[n.depth/8] >> (7 - (n.depth % 8)) & 1
if bitKey == bitStem {
var err error
*child, err = (*child).Insert(key, value, nil, depth+1)
if err != nil {
return n, fmt.Errorf("insert error: %w", err)
}
*other = Empty{}
} else {
var values [StemNodeWidth][]byte
values[key[StemSize]] = value
*other = &StemNode{
Stem: slices.Clone(key[:StemSize]),
Values: values[:],
depth: depth + 1,
mustRecompute: true,
dirty: true,
}
}
return n, nil
}
if len(value) != HashSize {
return bt, errors.New("invalid insertion: value length")
}
bt.Values[key[StemSize]] = value
bt.mustRecompute = true
bt.dirty = true
return bt, nil
}
// Copy creates a deep copy of the node.
func (bt *StemNode) Copy() BinaryNode {
var values [StemNodeWidth][]byte
for i, v := range bt.Values {
values[i] = slices.Clone(v)
}
return &StemNode{
Stem: slices.Clone(bt.Stem),
Values: values[:],
depth: bt.depth,
hash: bt.hash,
mustRecompute: bt.mustRecompute,
dirty: bt.dirty,
}
}
// GetHeight returns the height of the node.
func (bt *StemNode) GetHeight() int {
return 1
}
// Hash returns the hash of the node.
func (bt *StemNode) Hash() common.Hash {
if !bt.mustRecompute {
return bt.hash
}
var data [StemNodeWidth]common.Hash
h := newSha256()
defer returnSha256(h)
for i, v := range bt.Values {
if v != nil {
h.Reset()
h.Write(v)
h.Sum(data[i][:0])
}
}
h.Reset()
for level := 1; level <= 8; level++ {
for i := range StemNodeWidth / (1 << level) {
h.Reset()
if data[i*2] == (common.Hash{}) && data[i*2+1] == (common.Hash{}) {
data[i] = common.Hash{}
continue
}
h.Write(data[i*2][:])
h.Write(data[i*2+1][:])
data[i] = common.Hash(h.Sum(nil))
}
}
h.Reset()
h.Write(bt.Stem)
h.Write([]byte{0})
h.Write(data[0][:])
bt.hash = common.BytesToHash(h.Sum(nil))
bt.mustRecompute = false
return bt.hash
}
// CollectNodes flushes the stem via the collector when dirty; clean stems
// are skipped.
func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error {
if !bt.dirty {
return nil
}
flush(path, bt)
bt.dirty = false
return nil
}
// GetValuesAtStem retrieves the group of values located at the given stem key.
func (bt *StemNode) GetValuesAtStem(stem []byte, _ NodeResolverFn) ([][]byte, error) {
if !bytes.Equal(bt.Stem, stem) {
return nil, nil
}
return bt.Values[:], nil
}
// InsertValuesAtStem inserts a full value group at the given stem in the internal node.
// Already-existing values will be overwritten.
func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
if !bytes.Equal(bt.Stem, key[:StemSize]) {
bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
n := &InternalNode{depth: bt.depth, mustRecompute: true, dirty: true}
bt.depth++
// bt is re-parented under n and sits at a new path — rewrite its blob.
bt.mustRecompute = true
bt.dirty = true
var child, other *BinaryNode
if bitStem == 0 {
n.left = bt
child = &n.left
other = &n.right
} else {
n.right = bt
child = &n.right
other = &n.left
}
bitKey := key[n.depth/8] >> (7 - (n.depth % 8)) & 1
if bitKey == bitStem {
var err error
*child, err = (*child).InsertValuesAtStem(key, values, nil, depth+1)
if err != nil {
return n, fmt.Errorf("insert error: %w", err)
}
*other = Empty{}
} else {
*other = &StemNode{
Stem: slices.Clone(key[:StemSize]),
Values: values,
depth: n.depth + 1,
mustRecompute: true,
dirty: true,
}
}
return n, nil
}
// same stem, just merge the two value lists
for i, v := range values {
if v != nil {
bt.Values[i] = v
bt.mustRecompute = true
bt.dirty = true
}
}
return bt, nil
}
func (bt *StemNode) toDot(parent, path string) string {
me := fmt.Sprintf("stem%s", path)
ret := fmt.Sprintf("%s [label=\"stem=%x c=%x\"]\n", me, bt.Stem, bt.Hash())
ret = fmt.Sprintf("%s %s -> %s\n", ret, parent, me)
for i, v := range bt.Values {
if v != nil {
ret = fmt.Sprintf("%s%s%x [label=\"%x\"]\n", ret, me, i, v)
ret = fmt.Sprintf("%s%s -> %s%x\n", ret, me, me, i)
}
}
return ret
}
// Key returns the full key for the given index.
func (bt *StemNode) Key(i int) []byte {
var ret [HashSize]byte
copy(ret[:], bt.Stem)
ret[StemSize] = byte(i)
return ret[:]
}