go-ethereum/trie/bintrie/binary_node.go
CPerezz 61bfacc52f
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
trie/bintrie: skip clean nodes in CollectNodes to reduce commit write amplification (#34754)
## Problem

`BinaryTrie.Commit` unconditionally walked every resolved in-memory node
and flushed it into the `NodeSet`, producing one Pebble write per
resolved internal + stem node on every block — even when the node's
on-disk blob was bitwise identical to the previous commit. On a warm
400M-state workload this meant tens of thousands of redundant 65-byte
writes per block, compounding Pebble compaction pressure on every
commit.

The existing `mustRecompute` flag tracks *hash* staleness, not
*disk-blob* staleness: after `Hash()` completes, `mustRecompute` is
cleared even though the fresh blob has not been persisted. It is
therefore insufficient for a skip-flush optimization.

## Fix

Mirror the MPT committer pattern (`trie/committer.go:51-56`) by adding a
`dirty` flag on `InternalNode` and `StemNode` with the semantics *the
on-disk blob is stale*. The flag is:

- set to `true` wherever the node is created or structurally modified
(the same call sites that already set `mustRecompute = true`);
- set to `false` only after the node has been passed to the `flushfn`
inside `CollectNodes`;
- left `false` on nodes produced by `DeserializeNodeWithHash`, matching
the *loaded from disk, already persisted* semantics.

`CollectNodes` short-circuits on `!dirty` subtrees. The propagation
invariant (an ancestor of any dirty node is itself dirty) is already
maintained by the existing `InsertValuesAtStem` / `Insert` paths, which
now mirror every `mustRecompute = true` setter with a `dirty = true`
setter.

## Benchmark

New `BenchmarkCollectNodes_SparseWrite` measures commit cost when only
one leaf changes between blocks — the common case for state updates.
10,000-stem trie, one-leaf modification + Commit per iteration, Apple M4
Pro:

| | before | after | delta |
|---|---|---|---|
| time / op | 12,653,000 ns | 7,336 ns | **~1,725×** |
| bytes / op | 107,224,740 B | 37,774 B | **~2,839×** |
| allocs / op | 80,953 | 134 | **~604×** |

End-to-end impact on a real workload depends on the
resolved-footprint-to-dirty-path ratio; the new
`TestBinaryTrieCommitIncremental` provides a structural regression guard
(asserts that a Commit following a single-leaf modification flushes a
root-to-leaf path, not the whole tree).

---

Found all of this stuff while bloating my #34706 DB to make some
benchmarks. And saw we were spending A LOT OF TIME on hashing.
Hope this helps the perf a bit. Will rebase the flat-state PR on top of
this once merged.
2026-04-18 11:42:58 +02:00

155 lines
5.1 KiB
Go

// Copyright 2025 go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bintrie
import (
"errors"
"github.com/ethereum/go-ethereum/common"
)
type (
NodeFlushFn func([]byte, BinaryNode)
NodeResolverFn func([]byte, common.Hash) ([]byte, error)
)
// zero is the zero value for a 32-byte array.
var zero [32]byte
const (
StemNodeWidth = 256 // Number of child per leaf node
StemSize = 31 // Number of bytes to travel before reaching a group of leaves
NodeTypeBytes = 1 // Size of node type prefix in serialization
HashSize = 32 // Size of a hash in bytes
BitmapSize = 32 // Size of the bitmap in a stem node
)
const (
nodeTypeStem = iota + 1 // Stem node, contains a stem and a bitmap of values
nodeTypeInternal
)
// BinaryNode is an interface for a binary trie node.
type BinaryNode interface {
Get([]byte, NodeResolverFn) ([]byte, error)
Insert([]byte, []byte, NodeResolverFn, int) (BinaryNode, error)
Copy() BinaryNode
Hash() common.Hash
GetValuesAtStem([]byte, NodeResolverFn) ([][]byte, error)
InsertValuesAtStem([]byte, [][]byte, NodeResolverFn, int) (BinaryNode, error)
CollectNodes([]byte, NodeFlushFn) error
toDot(parent, path string) string
GetHeight() int
}
// SerializeNode serializes a binary trie node into a byte slice.
func SerializeNode(node BinaryNode) []byte {
switch n := (node).(type) {
case *InternalNode:
// InternalNode: 1 byte type + 32 bytes left hash + 32 bytes right hash
var serialized [NodeTypeBytes + HashSize + HashSize]byte
serialized[0] = nodeTypeInternal
copy(serialized[1:33], n.left.Hash().Bytes())
copy(serialized[33:65], n.right.Hash().Bytes())
return serialized[:]
case *StemNode:
// StemNode: 1 byte type + 31 bytes stem + 32 bytes bitmap + 256*32 bytes values
var serialized [NodeTypeBytes + StemSize + BitmapSize + StemNodeWidth*HashSize]byte
serialized[0] = nodeTypeStem
copy(serialized[NodeTypeBytes:NodeTypeBytes+StemSize], n.Stem)
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
for i, v := range n.Values {
if v != nil {
bitmap[i/8] |= 1 << (7 - (i % 8))
copy(serialized[offset:offset+HashSize], v)
offset += HashSize
}
}
// Only return the actual data, not the entire array
return serialized[:offset]
default:
panic("invalid node type")
}
}
var invalidSerializedLength = errors.New("invalid serialized node length")
// DeserializeNode deserializes a binary trie node from a byte slice. The
// hash will be recomputed from the deserialized data.
func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) {
return deserializeNode(serialized, depth, common.Hash{}, true, true)
}
// DeserializeNodeWithHash deserializes a binary trie node from a byte slice, using the provided hash.
func DeserializeNodeWithHash(serialized []byte, depth int, hn common.Hash) (BinaryNode, error) {
return deserializeNode(serialized, depth, hn, false, false)
}
func deserializeNode(serialized []byte, depth int, hn common.Hash, mustRecompute, dirty bool) (BinaryNode, error) {
if len(serialized) == 0 {
return Empty{}, nil
}
switch serialized[0] {
case nodeTypeInternal:
if len(serialized) != 65 {
return nil, invalidSerializedLength
}
return &InternalNode{
depth: depth,
left: HashedNode(common.BytesToHash(serialized[1:33])),
right: HashedNode(common.BytesToHash(serialized[33:65])),
hash: hn,
mustRecompute: mustRecompute,
dirty: dirty,
}, nil
case nodeTypeStem:
if len(serialized) < 64 {
return nil, invalidSerializedLength
}
var values [StemNodeWidth][]byte
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
for i := range StemNodeWidth {
if bitmap[i/8]>>(7-(i%8))&1 == 1 {
if len(serialized) < offset+HashSize {
return nil, invalidSerializedLength
}
values[i] = serialized[offset : offset+HashSize]
offset += HashSize
}
}
return &StemNode{
Stem: serialized[NodeTypeBytes : NodeTypeBytes+StemSize],
Values: values[:],
depth: depth,
hash: hn,
mustRecompute: mustRecompute,
dirty: dirty,
}, nil
default:
return nil, errors.New("invalid node type")
}
}
// ToDot converts the binary trie to a DOT language representation. Useful for debugging.
func ToDot(root BinaryNode) string {
return root.toDot("", "")
}