This commit is contained in:
Guillaume Ballet 2026-02-25 22:57:19 +01:00 committed by GitHub
commit 20c8d07b70
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 779 additions and 99 deletions

View file

@ -455,7 +455,7 @@ func BinKeys(ctx *cli.Context) error {
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
defer db.Close()
bt, err := genBinTrieFromAlloc(alloc, db)
bt, err := genBinTrieFromAlloc(alloc, db, db.BinTrieGroupDepth())
if err != nil {
return fmt.Errorf("error generating bt: %w", err)
}
@ -499,7 +499,7 @@ func BinTrieRoot(ctx *cli.Context) error {
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
defer db.Close()
bt, err := genBinTrieFromAlloc(alloc, db)
bt, err := genBinTrieFromAlloc(alloc, db, db.BinTrieGroupDepth())
if err != nil {
return fmt.Errorf("error generating bt: %w", err)
}
@ -509,8 +509,8 @@ func BinTrieRoot(ctx *cli.Context) error {
}
// TODO(@CPerezz): Should this go to `bintrie` module?
func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase) (*bintrie.BinaryTrie, error) {
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db)
func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase, groupDepth int) (*bintrie.BinaryTrie, error) {
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db, groupDepth)
if err != nil {
return nil, err
}

View file

@ -96,6 +96,7 @@ var (
utils.StateHistoryFlag,
utils.TrienodeHistoryFlag,
utils.TrienodeHistoryFullValueCheckpointFlag,
utils.BinTrieGroupDepthFlag,
utils.LightKDFFlag,
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag, // deprecated

View file

@ -299,6 +299,12 @@ var (
Value: ethconfig.Defaults.EnableStateSizeTracking,
Category: flags.StateCategory,
}
BinTrieGroupDepthFlag = &cli.IntFlag{
Name: "bintrie.groupdepth",
Usage: "Number of levels per serialized group in binary trie (1-8, default 8). Lower values create smaller groups with more nodes.",
Value: 8,
Category: flags.StateCategory,
}
StateHistoryFlag = &cli.Uint64Flag{
Name: "history.state",
Usage: "Number of recent blocks to retain state history for, only relevant in state.scheme=path (default = 90,000 blocks, 0 = entire chain)",
@ -1815,7 +1821,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.TrienodeHistory = ctx.Int64(TrienodeHistoryFlag.Name)
}
if ctx.IsSet(TrienodeHistoryFullValueCheckpointFlag.Name) {
cfg.NodeFullValueCheckpoint = uint32(ctx.Uint(TrienodeHistoryFullValueCheckpointFlag.Name))
cfg.TrienodeHistory = ctx.Int64(TrienodeHistoryFullValueCheckpointFlag.Name)
}
if ctx.IsSet(StateSchemeFlag.Name) {
cfg.StateScheme = ctx.String(StateSchemeFlag.Name)
@ -2433,6 +2439,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
StateHistory: ctx.Uint64(StateHistoryFlag.Name),
TrienodeHistory: ctx.Int64(TrienodeHistoryFlag.Name),
NodeFullValueCheckpoint: uint32(ctx.Uint(TrienodeHistoryFullValueCheckpointFlag.Name)),
BinTrieGroupDepth: ctx.Int(BinTrieGroupDepthFlag.Name),
// Disable transaction indexing/unindexing.
TxLookupLimit: -1,

View file

@ -172,9 +172,10 @@ type BlockChainConfig struct {
TrieNoAsyncFlush bool // Whether the asynchronous buffer flushing is disallowed
TrieJournalDirectory string // Directory path to the journal used for persisting trie data across node restarts
Preimages bool // Whether to store preimage of trie key to the disk
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
ArchiveMode bool // Whether to enable the archive mode
Preimages bool // Whether to store preimage of trie key to the disk
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
ArchiveMode bool // Whether to enable the archive mode
BinTrieGroupDepth int // Number of levels per serialized group in binary trie (1-8)
// Number of blocks from the chain head for which state histories are retained.
// If set to 0, all state histories across the entire chain will be retained;
@ -259,8 +260,9 @@ func (cfg BlockChainConfig) WithNoAsyncFlush(on bool) *BlockChainConfig {
// triedbConfig derives the configures for trie database.
func (cfg *BlockChainConfig) triedbConfig(isVerkle bool) *triedb.Config {
config := &triedb.Config{
Preimages: cfg.Preimages,
IsVerkle: isVerkle,
Preimages: cfg.Preimages,
IsVerkle: isVerkle,
BinTrieGroupDepth: cfg.BinTrieGroupDepth,
}
if cfg.StateScheme == rawdb.HashScheme {
config.HashDB = &hashdb.Config{

View file

@ -246,7 +246,7 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if ts.Transitioned() {
// Use BinaryTrie instead of VerkleTrie when IsVerkle is set
// (IsVerkle actually means Binary Trie mode in this codebase)
return bintrie.NewBinaryTrie(root, db.triedb)
return bintrie.NewBinaryTrie(root, db.triedb, db.triedb.BinTrieGroupDepth())
}
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)

View file

@ -322,7 +322,7 @@ func newTrieReader(root common.Hash, db *triedb.Database) (*trieReader, error) {
tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
} else {
// When IsVerkle() is true, create a BinaryTrie wrapped in TransitionTrie
binTrie, binErr := bintrie.NewBinaryTrie(root, db)
binTrie, binErr := bintrie.NewBinaryTrie(root, db, db.BinTrieGroupDepth())
if binErr != nil {
return nil, binErr
}

View file

@ -232,6 +232,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
StateHistory: config.StateHistory,
TrienodeHistory: config.TrienodeHistory,
NodeFullValueCheckpoint: config.NodeFullValueCheckpoint,
BinTrieGroupDepth: config.BinTrieGroupDepth,
StateScheme: scheme,
ChainHistoryMode: config.HistoryMode,
TxLookupLimit: int64(min(config.TransactionHistory, math.MaxInt64)),

View file

@ -59,6 +59,7 @@ var Defaults = Config{
StateHistory: pathdb.Defaults.StateHistory,
TrienodeHistory: pathdb.Defaults.TrienodeHistory,
NodeFullValueCheckpoint: pathdb.Defaults.FullValueCheckpoint,
BinTrieGroupDepth: 8, // byte-aligned groups by default
DatabaseCache: 512,
TrieCleanCache: 154,
TrieDirtyCache: 256,
@ -125,6 +126,11 @@ type Config struct {
// consistent with persistent state.
StateScheme string `toml:",omitempty"`
// BinTrieGroupDepth is the number of levels per serialized group in binary trie.
// Valid values are 1-8, with 8 being the default (byte-aligned groups).
// Lower values create smaller groups with more nodes.
BinTrieGroupDepth int `toml:",omitempty"`
// RequiredBlocks is a set of block number -> hash mappings which must be in the
// canonical chain of all remote peers. Setting the option makes geth verify the
// presence of these blocks for every new peer connection.

View file

@ -34,6 +34,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
TrienodeHistory int64 `toml:",omitempty"`
NodeFullValueCheckpoint uint32 `toml:",omitempty"`
StateScheme string `toml:",omitempty"`
BinTrieGroupDepth int `toml:",omitempty"`
RequiredBlocks map[uint64]common.Hash `toml:"-"`
SlowBlockThreshold time.Duration `toml:",omitempty"`
SkipBcVersionCheck bool `toml:"-"`
@ -87,6 +88,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.TrienodeHistory = c.TrienodeHistory
enc.NodeFullValueCheckpoint = c.NodeFullValueCheckpoint
enc.StateScheme = c.StateScheme
enc.BinTrieGroupDepth = c.BinTrieGroupDepth
enc.RequiredBlocks = c.RequiredBlocks
enc.SlowBlockThreshold = c.SlowBlockThreshold
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
@ -144,6 +146,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TrienodeHistory *int64 `toml:",omitempty"`
NodeFullValueCheckpoint *uint32 `toml:",omitempty"`
StateScheme *string `toml:",omitempty"`
BinTrieGroupDepth *int `toml:",omitempty"`
RequiredBlocks map[uint64]common.Hash `toml:"-"`
SlowBlockThreshold *time.Duration `toml:",omitempty"`
SkipBcVersionCheck *bool `toml:"-"`
@ -234,6 +237,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.StateScheme != nil {
c.StateScheme = *dec.StateScheme
}
if dec.BinTrieGroupDepth != nil {
c.BinTrieGroupDepth = *dec.BinTrieGroupDepth
}
if dec.RequiredBlocks != nil {
c.RequiredBlocks = dec.RequiredBlocks
}

View file

@ -31,13 +31,30 @@ type (
var zero [32]byte
const (
StemNodeWidth = 256 // Number of child per leaf node
StemSize = 31 // Number of bytes to travel before reaching a group of leaves
NodeTypeBytes = 1 // Size of node type prefix in serialization
HashSize = 32 // Size of a hash in bytes
BitmapSize = 32 // Size of the bitmap in a stem node
StemNodeWidth = 256 // Number of child per leaf node
StemSize = 31 // Number of bytes to travel before reaching a group of leaves
NodeTypeBytes = 1 // Size of node type prefix in serialization
HashSize = 32 // Size of a hash in bytes
StemBitmapSize = 32 // Size of the bitmap in a stem node (256 values = 32 bytes)
// MaxGroupDepth is the maximum allowed group depth for InternalNode serialization.
// Valid group depths are 1-8, where depth N means 2^N bottom-layer positions.
// Serialization format for InternalNode groups:
// [1 byte type] [1 byte group depth (1-8)] [variable bitmap] [N × 32 byte hashes]
// The bitmap has 2^groupDepth bits (BitmapSizeForDepth bytes), indicating which
// bottom-layer children are present. Only present children's hashes are stored.
MaxGroupDepth = 8
)
// BitmapSizeForDepth returns the bitmap size in bytes for a given group depth.
// For depths 1-3, returns 1 byte. For depths 4-8, returns 2^(depth-3) bytes.
func BitmapSizeForDepth(groupDepth int) int {
if groupDepth <= 3 {
return 1
}
return 1 << (groupDepth - 3)
}
const (
nodeTypeStem = iota + 1 // Stem node, contains a stem and a bitmap of values
nodeTypeInternal
@ -51,29 +68,88 @@ type BinaryNode interface {
Hash() common.Hash
GetValuesAtStem([]byte, NodeResolverFn) ([][]byte, error)
InsertValuesAtStem([]byte, [][]byte, NodeResolverFn, int) (BinaryNode, error)
CollectNodes([]byte, NodeFlushFn) error
CollectNodes([]byte, NodeFlushFn, int) error // groupDepth parameter for serialization
toDot(parent, path string) string
GetHeight() int
}
// serializeSubtree recursively collects child hashes from a subtree of InternalNodes.
// It traverses up to `remainingDepth` levels, storing hashes of bottom-layer children.
// position tracks the current index (0 to 2^groupDepth - 1) for bitmap placement.
// hashes collects the hashes of present children, bitmap tracks which positions are present.
func serializeSubtree(node BinaryNode, remainingDepth int, position int, bitmap []byte, hashes *[]common.Hash) {
if remainingDepth == 0 {
// Bottom layer: store hash if not empty
switch node.(type) {
case Empty:
// Leave bitmap bit unset, don't add hash
return
default:
// StemNode, HashedNode, or InternalNode at boundary: store hash
bitmap[position/8] |= 1 << (7 - (position % 8))
*hashes = append(*hashes, node.Hash())
}
return
}
switch n := node.(type) {
case *InternalNode:
// Recurse into left (bit 0) and right (bit 1) children
leftPos := position * 2
rightPos := position*2 + 1
serializeSubtree(n.left, remainingDepth-1, leftPos, bitmap, hashes)
serializeSubtree(n.right, remainingDepth-1, rightPos, bitmap, hashes)
case Empty:
// Empty subtree: all positions in this subtree are empty (bits already 0)
return
default:
// StemNode or HashedNode before reaching bottom: store hash at current position
// This creates a variable-depth group where this branch terminates early.
// We need to mark this single position and all its would-be descendants as "this hash".
// For simplicity, we store the hash at the first leaf position of this subtree.
firstLeafPos := position << remainingDepth
bitmap[firstLeafPos/8] |= 1 << (7 - (firstLeafPos % 8))
*hashes = append(*hashes, node.Hash())
}
}
// SerializeNode serializes a binary trie node into a byte slice.
func SerializeNode(node BinaryNode) []byte {
// groupDepth specifies how many levels to include in an InternalNode group (1-8).
func SerializeNode(node BinaryNode, groupDepth int) []byte {
if groupDepth < 1 || groupDepth > MaxGroupDepth {
panic("groupDepth must be between 1 and 8")
}
switch n := (node).(type) {
case *InternalNode:
// InternalNode: 1 byte type + 32 bytes left hash + 32 bytes right hash
var serialized [NodeTypeBytes + HashSize + HashSize]byte
// InternalNode group: 1 byte type + 1 byte group depth + variable bitmap + N×32 byte hashes
bitmapSize := BitmapSizeForDepth(groupDepth)
bitmap := make([]byte, bitmapSize)
var hashes []common.Hash
serializeSubtree(n, groupDepth, 0, bitmap, &hashes)
// Build serialized output
serializedLen := NodeTypeBytes + 1 + bitmapSize + len(hashes)*HashSize
serialized := make([]byte, serializedLen)
serialized[0] = nodeTypeInternal
copy(serialized[1:33], n.left.Hash().Bytes())
copy(serialized[33:65], n.right.Hash().Bytes())
return serialized[:]
serialized[1] = byte(groupDepth)
copy(serialized[2:2+bitmapSize], bitmap)
offset := NodeTypeBytes + 1 + bitmapSize
for _, h := range hashes {
copy(serialized[offset:offset+HashSize], h.Bytes())
offset += HashSize
}
return serialized
case *StemNode:
// StemNode: 1 byte type + 31 bytes stem + 32 bytes bitmap + 256*32 bytes values
var serialized [NodeTypeBytes + StemSize + BitmapSize + StemNodeWidth*HashSize]byte
var serialized [NodeTypeBytes + StemSize + StemBitmapSize + StemNodeWidth*HashSize]byte
serialized[0] = nodeTypeStem
copy(serialized[NodeTypeBytes:NodeTypeBytes+StemSize], n.Stem)
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+StemBitmapSize]
offset := NodeTypeBytes + StemSize + StemBitmapSize
for i, v := range n.Values {
if v != nil {
bitmap[i/8] |= 1 << (7 - (i % 8))
@ -90,6 +166,51 @@ func SerializeNode(node BinaryNode) []byte {
var invalidSerializedLength = errors.New("invalid serialized node length")
// deserializeSubtree reconstructs an InternalNode subtree from grouped serialization.
// remainingDepth is how many more levels to build, position is current index in the bitmap,
// nodeDepth is the actual trie depth for the node being created.
// hashIdx tracks the current position in the hash data (incremented as hashes are consumed).
func deserializeSubtree(remainingDepth int, position int, nodeDepth int, bitmap []byte, hashData []byte, hashIdx *int) (BinaryNode, error) {
if remainingDepth == 0 {
// Bottom layer: check bitmap and return HashedNode or Empty
if bitmap[position/8]>>(7-(position%8))&1 == 1 {
if len(hashData) < (*hashIdx+1)*HashSize {
return nil, invalidSerializedLength
}
hash := common.BytesToHash(hashData[*hashIdx*HashSize : (*hashIdx+1)*HashSize])
*hashIdx++
return HashedNode(hash), nil
}
return Empty{}, nil
}
// Check if this entire subtree is empty by examining all relevant bitmap bits
leftPos := position * 2
rightPos := position*2 + 1
left, err := deserializeSubtree(remainingDepth-1, leftPos, nodeDepth+1, bitmap, hashData, hashIdx)
if err != nil {
return nil, err
}
right, err := deserializeSubtree(remainingDepth-1, rightPos, nodeDepth+1, bitmap, hashData, hashIdx)
if err != nil {
return nil, err
}
// If both children are empty, return Empty
_, leftEmpty := left.(Empty)
_, rightEmpty := right.(Empty)
if leftEmpty && rightEmpty {
return Empty{}, nil
}
return &InternalNode{
depth: nodeDepth,
left: left,
right: right,
}, nil
}
// DeserializeNode deserializes a binary trie node from a byte slice.
func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) {
if len(serialized) == 0 {
@ -98,21 +219,31 @@ func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) {
switch serialized[0] {
case nodeTypeInternal:
if len(serialized) != 65 {
// Grouped format: 1 byte type + 1 byte group depth + variable bitmap + N×32 byte hashes
if len(serialized) < NodeTypeBytes+1 {
return nil, invalidSerializedLength
}
return &InternalNode{
depth: depth,
left: HashedNode(common.BytesToHash(serialized[1:33])),
right: HashedNode(common.BytesToHash(serialized[33:65])),
}, nil
groupDepth := int(serialized[1])
if groupDepth < 1 || groupDepth > MaxGroupDepth {
return nil, errors.New("invalid group depth")
}
bitmapSize := BitmapSizeForDepth(groupDepth)
if len(serialized) < NodeTypeBytes+1+bitmapSize {
return nil, invalidSerializedLength
}
bitmap := serialized[2 : 2+bitmapSize]
hashData := serialized[2+bitmapSize:]
// Count present children from bitmap
hashIdx := 0
return deserializeSubtree(groupDepth, 0, depth, bitmap, hashData, &hashIdx)
case nodeTypeStem:
if len(serialized) < 64 {
return nil, invalidSerializedLength
}
var values [StemNodeWidth][]byte
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+StemBitmapSize]
offset := NodeTypeBytes + StemSize + StemBitmapSize
for i := range StemNodeWidth {
if bitmap[i/8]>>(7-(i%8))&1 == 1 {

View file

@ -24,56 +24,98 @@ import (
)
// TestSerializeDeserializeInternalNode tests serialization and deserialization of InternalNode
// with the grouped subtree format. A single InternalNode with HashedNode children serializes
// as a depth-8 group where the children appear at their first leaf positions.
func TestSerializeDeserializeInternalNode(t *testing.T) {
// Create an internal node with two hashed children
leftHash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
rightHash := common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321")
node := &InternalNode{
depth: 5,
depth: 0, // Use depth 0 (byte-aligned) for this test
left: HashedNode(leftHash),
right: HashedNode(rightHash),
}
// Serialize the node
serialized := SerializeNode(node)
// Serialize the node with default group depth of 8
serialized := SerializeNode(node, MaxGroupDepth)
// Check the serialized format
// Check the serialized format: type byte + group depth byte + 32 byte bitmap + N*32 byte hashes
if serialized[0] != nodeTypeInternal {
t.Errorf("Expected type byte to be %d, got %d", nodeTypeInternal, serialized[0])
}
if len(serialized) != 65 {
t.Errorf("Expected serialized length to be 65, got %d", len(serialized))
if serialized[1] != MaxGroupDepth {
t.Errorf("Expected group depth to be %d, got %d", MaxGroupDepth, serialized[1])
}
// Expected length: 1 (type) + 1 (group depth) + 32 (bitmap) + 2*32 (two hashes) = 98 bytes
bitmapSize := BitmapSizeForDepth(MaxGroupDepth)
expectedLen := NodeTypeBytes + 1 + bitmapSize + 2*HashSize
if len(serialized) != expectedLen {
t.Errorf("Expected serialized length to be %d, got %d", expectedLen, len(serialized))
}
// The left child (HashedNode) terminates at remainingDepth=7, so it's placed at position 0<<7 = 0
// The right child (HashedNode) terminates at remainingDepth=7, so it's placed at position 1<<7 = 128
bitmap := serialized[2 : 2+bitmapSize]
if bitmap[0]&0x80 == 0 { // bit 0 (MSB of byte 0)
t.Error("Expected bit 0 to be set in bitmap (left child)")
}
if bitmap[16]&0x80 == 0 { // bit 128 (MSB of byte 16)
t.Error("Expected bit 128 to be set in bitmap (right child)")
}
// Deserialize the node
deserialized, err := DeserializeNode(serialized, 5)
deserialized, err := DeserializeNode(serialized, 0)
if err != nil {
t.Fatalf("Failed to deserialize node: %v", err)
}
// Check that it's an internal node
// With grouped format, deserialization creates a tree of InternalNodes down to the hashes.
// The root should be an InternalNode, and we should be able to navigate down 8 levels
// to find the HashedNode children.
internalNode, ok := deserialized.(*InternalNode)
if !ok {
t.Fatalf("Expected InternalNode, got %T", deserialized)
}
// Check the depth
if internalNode.depth != 5 {
t.Errorf("Expected depth 5, got %d", internalNode.depth)
if internalNode.depth != 0 {
t.Errorf("Expected depth 0, got %d", internalNode.depth)
}
// Check the left and right hashes
if internalNode.left.Hash() != leftHash {
t.Errorf("Left hash mismatch: expected %x, got %x", leftHash, internalNode.left.Hash())
// Navigate to position 0 (8 left turns) to find the left hash
node0 := navigateToLeaf(internalNode, 0, 8)
if node0.Hash() != leftHash {
t.Errorf("Left hash mismatch: expected %x, got %x", leftHash, node0.Hash())
}
if internalNode.right.Hash() != rightHash {
t.Errorf("Right hash mismatch: expected %x, got %x", rightHash, internalNode.right.Hash())
// Navigate to position 128 (right, then 7 lefts) to find the right hash
node128 := navigateToLeaf(internalNode, 128, 8)
if node128.Hash() != rightHash {
t.Errorf("Right hash mismatch: expected %x, got %x", rightHash, node128.Hash())
}
}
// navigateToLeaf navigates to a specific position in the tree (used by grouped serialization tests)
func navigateToLeaf(node BinaryNode, position, depth int) BinaryNode {
for d := 0; d < depth; d++ {
in, ok := node.(*InternalNode)
if !ok {
return node
}
// Check bit at position (depth-1-d) to determine left or right
bit := (position >> (depth - 1 - d)) & 1
if bit == 0 {
node = in.left
} else {
node = in.right
}
}
return node
}
// TestSerializeDeserializeStemNode tests serialization and deserialization of StemNode
func TestSerializeDeserializeStemNode(t *testing.T) {
// Create a stem node with some values
@ -94,8 +136,8 @@ func TestSerializeDeserializeStemNode(t *testing.T) {
depth: 10,
}
// Serialize the node
serialized := SerializeNode(node)
// Serialize the node (groupDepth doesn't affect StemNode serialization)
serialized := SerializeNode(node, MaxGroupDepth)
// Check the serialized format
if serialized[0] != nodeTypeStem {
@ -173,8 +215,8 @@ func TestDeserializeInvalidType(t *testing.T) {
// TestDeserializeInvalidLength tests deserialization with invalid data length
func TestDeserializeInvalidLength(t *testing.T) {
// InternalNode with type byte 1 but wrong length
invalidData := []byte{nodeTypeInternal, 0, 0} // Too short for internal node
// InternalNode with valid type byte and group depth but too short for bitmap
invalidData := []byte{nodeTypeInternal, 8, 0, 0} // Too short for bitmap (needs 32 bytes)
_, err := DeserializeNode(invalidData, 0)
if err == nil {

View file

@ -59,7 +59,7 @@ func (e Empty) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn,
}, nil
}
func (e Empty) CollectNodes(_ []byte, _ NodeFlushFn) error {
func (e Empty) CollectNodes(_ []byte, _ NodeFlushFn, _ int) error {
return nil
}

View file

@ -186,7 +186,7 @@ func TestEmptyCollectNodes(t *testing.T) {
collected = append(collected, n)
}
err := node.CollectNodes([]byte{0, 1, 0}, flushFn)
err := node.CollectNodes([]byte{0, 1, 0}, flushFn, MaxGroupDepth)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}

View file

@ -0,0 +1,412 @@
package bintrie
import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/common"
)
// TestGroupedSerializationDebug helps understand the grouped serialization format
func TestGroupedSerializationDebug(t *testing.T) {
leftHash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
rightHash := common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321")
node := &InternalNode{
depth: 0,
left: HashedNode(leftHash),
right: HashedNode(rightHash),
}
serialized := SerializeNode(node, MaxGroupDepth)
t.Logf("Serialized length: %d", len(serialized))
t.Logf("Type: %d, GroupDepth: %d", serialized[0], serialized[1])
bitmapSize := BitmapSizeForDepth(MaxGroupDepth)
bitmap := serialized[2 : 2+bitmapSize]
t.Logf("Bitmap: %x", bitmap)
// Count and show set bits
for i := 0; i < 256; i++ {
if bitmap[i/8]>>(7-(i%8))&1 == 1 {
t.Logf("Bit %d is set", i)
}
}
// Deserialize
deserialized, err := DeserializeNode(serialized, 0)
if err != nil {
t.Fatalf("Error: %v", err)
}
t.Logf("Deserialized type: %T", deserialized)
// Walk the tree and print structure
printTree(t, deserialized, 0, "root")
}
func printTree(t *testing.T, node BinaryNode, depth int, path string) {
indent := ""
for i := 0; i < depth; i++ {
indent += " "
}
switch n := node.(type) {
case *InternalNode:
t.Logf("%s%s: InternalNode (depth=%d)", indent, path, n.depth)
printTree(t, n.left, depth+1, path+"/L")
printTree(t, n.right, depth+1, path+"/R")
case HashedNode:
t.Logf("%s%s: HashedNode(%x)", indent, path, common.Hash(n))
case Empty:
t.Logf("%s%s: Empty", indent, path)
default:
t.Logf("%s%s: %T", indent, path, node)
}
}
// TestFullDepth8Tree tests a full 8-level tree (all 256 bottom positions filled)
func TestFullDepth8Tree(t *testing.T) {
// Build a full 8-level tree
root := buildFullTree(0, 8)
serialized := SerializeNode(root, MaxGroupDepth)
t.Logf("Full tree serialized length: %d", len(serialized))
t.Logf("Expected: 1 + 1 + 32 + 256*32 = %d", 1+1+32+256*32)
// Count set bits in bitmap
bitmapSize := BitmapSizeForDepth(MaxGroupDepth)
bitmap := serialized[2 : 2+bitmapSize]
count := 0
for i := 0; i < 256; i++ {
if bitmap[i/8]>>(7-(i%8))&1 == 1 {
count++
}
}
t.Logf("Set bits in bitmap: %d", count)
// Deserialize and verify structure
deserialized, err := DeserializeNode(serialized, 0)
if err != nil {
t.Fatalf("Error: %v", err)
}
// Verify it's an InternalNode with depth 0
in, ok := deserialized.(*InternalNode)
if !ok {
t.Fatalf("Expected InternalNode, got %T", deserialized)
}
if in.depth != 0 {
t.Errorf("Expected depth 0, got %d", in.depth)
}
// Count leaves at depth 8
leafCount := countLeavesAtDepth(deserialized, 8, 0)
t.Logf("Leaves at depth 8: %d", leafCount)
if leafCount != 256 {
t.Errorf("Expected 256 leaves, got %d", leafCount)
}
}
func buildFullTree(depth, maxDepth int) BinaryNode {
if depth == maxDepth {
// Create a unique hash for this position
var h common.Hash
h[0] = byte(depth)
h[1] = byte(depth >> 8)
return HashedNode(h)
}
return &InternalNode{
depth: depth,
left: buildFullTree(depth+1, maxDepth),
right: buildFullTree(depth+1, maxDepth),
}
}
func countLeavesAtDepth(node BinaryNode, targetDepth, currentDepth int) int {
if currentDepth == targetDepth {
if _, ok := node.(Empty); ok {
return 0
}
return 1
}
in, ok := node.(*InternalNode)
if !ok {
return 0 // Terminated early
}
return countLeavesAtDepth(in.left, targetDepth, currentDepth+1) +
countLeavesAtDepth(in.right, targetDepth, currentDepth+1)
}
// TestRoundTripPreservesHashes tests that round-trip preserves the original hashes
func TestRoundTripPreservesHashes(t *testing.T) {
// Build a tree with known hashes at specific positions
hashes := make([]common.Hash, 256)
for i := range hashes {
hashes[i] = common.BytesToHash([]byte(fmt.Sprintf("hash-%d", i)))
}
root := buildTreeWithHashes(0, 8, 0, hashes)
serialized := SerializeNode(root, MaxGroupDepth)
deserialized, err := DeserializeNode(serialized, 0)
if err != nil {
t.Fatalf("Error: %v", err)
}
// Verify each hash at depth 8
for i := 0; i < 256; i++ {
node := navigateToLeaf(deserialized, i, 8)
if node == nil {
t.Errorf("Position %d: node is nil", i)
continue
}
if node.Hash() != hashes[i] {
t.Errorf("Position %d: hash mismatch, expected %x, got %x", i, hashes[i], node.Hash())
}
}
}
func buildTreeWithHashes(depth, maxDepth, position int, hashes []common.Hash) BinaryNode {
if depth == maxDepth {
return HashedNode(hashes[position])
}
return &InternalNode{
depth: depth,
left: buildTreeWithHashes(depth+1, maxDepth, position*2, hashes),
right: buildTreeWithHashes(depth+1, maxDepth, position*2+1, hashes),
}
}
// TestCollectNodesGrouping verifies that CollectNodes only flushes at group boundaries
// and that the serialized/deserialized tree matches the original.
func TestCollectNodesGrouping(t *testing.T) {
// Build a tree that spans multiple groups (16 levels = 2 groups)
// This creates a tree where:
// - Group 1: depths 0-7 (root group)
// - Group 2: depths 8-15 (leaf groups, up to 256 of them)
// Use unique hashes at leaves so we get unique serialized blobs
root := buildDeepTreeUnique(0, 16, 0)
// Compute the root hash before collection
originalRootHash := root.Hash()
// Collect and serialize all nodes, storing by hash
serializedNodes := make(map[common.Hash][]byte)
var collectedNodes []struct {
path []byte
node BinaryNode
}
err := root.CollectNodes(nil, func(path []byte, node BinaryNode) {
pathCopy := make([]byte, len(path))
copy(pathCopy, path)
collectedNodes = append(collectedNodes, struct {
path []byte
node BinaryNode
}{pathCopy, node})
// Serialize and store by hash
serialized := SerializeNode(node, MaxGroupDepth)
serializedNodes[node.Hash()] = serialized
}, MaxGroupDepth)
if err != nil {
t.Fatalf("CollectNodes failed: %v", err)
}
// Count nodes by depth
depthCounts := make(map[int]int)
for _, cn := range collectedNodes {
switch n := cn.node.(type) {
case *InternalNode:
depthCounts[n.depth]++
case *StemNode:
t.Logf("Collected StemNode at path len %d", len(cn.path))
}
}
// With a 16-level tree:
// - 1 node at depth 0 (the root group)
// - 256 nodes at depth 8 (the second-level groups)
// Total: 257 InternalNode groups
if depthCounts[0] != 1 {
t.Errorf("Expected 1 node at depth 0, got %d", depthCounts[0])
}
if depthCounts[8] != 256 {
t.Errorf("Expected 256 nodes at depth 8, got %d", depthCounts[8])
}
t.Logf("Total collected nodes: %d", len(collectedNodes))
t.Logf("Total serialized blobs: %d", len(serializedNodes))
t.Logf("Depth counts: %v", depthCounts)
// Now deserialize starting from the root hash
// Create a resolver that looks up serialized data by hash
resolver := func(path []byte, hash common.Hash) ([]byte, error) {
if data, ok := serializedNodes[hash]; ok {
return data, nil
}
return nil, fmt.Errorf("node not found: %x", hash)
}
// Deserialize the root
rootData, ok := serializedNodes[originalRootHash]
if !ok {
t.Fatalf("Root hash not found in serialized nodes: %x", originalRootHash)
}
deserializedRoot, err := DeserializeNode(rootData, 0)
if err != nil {
t.Fatalf("Failed to deserialize root: %v", err)
}
// Verify the deserialized root hash matches
if deserializedRoot.Hash() != originalRootHash {
t.Errorf("Deserialized root hash mismatch: expected %x, got %x", originalRootHash, deserializedRoot.Hash())
}
// Traverse both trees and compare structure at all 16 levels
// We need to resolve HashedNodes in the deserialized tree to compare deeper
err = compareTreesWithResolver(t, root, deserializedRoot, resolver, 0, 16, "root")
if err != nil {
t.Errorf("Tree comparison failed: %v", err)
}
t.Log("Tree comparison passed - deserialized tree matches original")
}
// compareTreesWithResolver compares two trees, resolving HashedNodes as needed
func compareTreesWithResolver(t *testing.T, original, deserialized BinaryNode, resolver NodeResolverFn, depth, maxDepth int, path string) error {
if depth >= maxDepth {
// At leaf level, just compare hashes
if original.Hash() != deserialized.Hash() {
return fmt.Errorf("hash mismatch at %s: original=%x, deserialized=%x", path, original.Hash(), deserialized.Hash())
}
return nil
}
// Get the actual nodes (resolve HashedNodes if needed)
origNode := original
deserNode := deserialized
// Resolve deserialized HashedNode if needed
if h, ok := deserNode.(HashedNode); ok {
data, err := resolver(nil, common.Hash(h))
if err != nil {
return fmt.Errorf("failed to resolve deserialized node at %s: %v", path, err)
}
deserNode, err = DeserializeNode(data, depth)
if err != nil {
return fmt.Errorf("failed to deserialize node at %s: %v", path, err)
}
}
// Both should be InternalNodes at this point
origInternal, origOk := origNode.(*InternalNode)
deserInternal, deserOk := deserNode.(*InternalNode)
if !origOk || !deserOk {
// Check if both are the same type
if fmt.Sprintf("%T", origNode) != fmt.Sprintf("%T", deserNode) {
return fmt.Errorf("type mismatch at %s: original=%T, deserialized=%T", path, origNode, deserNode)
}
// Both are non-InternalNode, compare hashes
if origNode.Hash() != deserNode.Hash() {
return fmt.Errorf("hash mismatch at %s: original=%x, deserialized=%x", path, origNode.Hash(), deserNode.Hash())
}
return nil
}
// Compare depths
if origInternal.depth != deserInternal.depth {
return fmt.Errorf("depth mismatch at %s: original=%d, deserialized=%d", path, origInternal.depth, deserInternal.depth)
}
// Recursively compare children
if err := compareTreesWithResolver(t, origInternal.left, deserInternal.left, resolver, depth+1, maxDepth, path+"/L"); err != nil {
return err
}
if err := compareTreesWithResolver(t, origInternal.right, deserInternal.right, resolver, depth+1, maxDepth, path+"/R"); err != nil {
return err
}
return nil
}
// buildDeepTreeUnique builds a tree where each leaf has a unique hash based on its position
func buildDeepTreeUnique(depth, maxDepth, position int) BinaryNode {
if depth == maxDepth {
// Create a unique hash based on position in the tree
var h common.Hash
h[0] = byte(position)
h[1] = byte(position >> 8)
h[2] = byte(position >> 16)
h[3] = byte(position >> 24)
return HashedNode(h)
}
return &InternalNode{
depth: depth,
left: buildDeepTreeUnique(depth+1, maxDepth, position*2),
right: buildDeepTreeUnique(depth+1, maxDepth, position*2+1),
}
}
// TestVariableGroupDepth tests serialization with different group depths (1-8)
func TestVariableGroupDepth(t *testing.T) {
for groupDepth := 1; groupDepth <= MaxGroupDepth; groupDepth++ {
t.Run(fmt.Sprintf("groupDepth=%d", groupDepth), func(t *testing.T) {
// Build a tree with depth equal to groupDepth * 2 (two full groups)
treeDepth := groupDepth * 2
root := buildDeepTreeUnique(0, treeDepth, 0)
originalHash := root.Hash()
// Serialize with this group depth
serialized := SerializeNode(root, groupDepth)
// Verify header
if serialized[0] != nodeTypeInternal {
t.Errorf("Expected type byte %d, got %d", nodeTypeInternal, serialized[0])
}
if int(serialized[1]) != groupDepth {
t.Errorf("Expected group depth %d, got %d", groupDepth, serialized[1])
}
// Verify bitmap size
expectedBitmapSize := BitmapSizeForDepth(groupDepth)
expectedMinLen := 1 + 1 + expectedBitmapSize // type + depth + bitmap
if len(serialized) < expectedMinLen {
t.Errorf("Serialized data too short: got %d, expected at least %d", len(serialized), expectedMinLen)
}
// Deserialize and verify hash matches
deserialized, err := DeserializeNode(serialized, 0)
if err != nil {
t.Fatalf("DeserializeNode failed: %v", err)
}
if deserialized.Hash() != originalHash {
t.Errorf("Hash mismatch after round-trip: expected %x, got %x", originalHash, deserialized.Hash())
}
// Collect nodes and verify correct grouping
var collectedDepths []int
err = root.CollectNodes(nil, func(path []byte, node BinaryNode) {
if in, ok := node.(*InternalNode); ok {
collectedDepths = append(collectedDepths, in.depth)
}
}, groupDepth)
if err != nil {
t.Fatalf("CollectNodes failed: %v", err)
}
// Verify all collected nodes are at group boundaries
for _, depth := range collectedDepths {
if depth%groupDepth != 0 {
t.Errorf("Collected node at depth %d, but groupDepth is %d (not a boundary)", depth, groupDepth)
}
}
t.Logf("groupDepth=%d: serialized=%d bytes, collected=%d nodes at depths %v",
groupDepth, len(serialized), len(collectedDepths), collectedDepths)
})
}
}

View file

@ -80,7 +80,7 @@ func (h HashedNode) toDot(parent string, path string) string {
return ret
}
func (h HashedNode) CollectNodes([]byte, NodeFlushFn) error {
func (h HashedNode) CollectNodes([]byte, NodeFlushFn, int) error {
// HashedNodes are already persisted in the database and don't need to be collected.
return nil
}

View file

@ -135,7 +135,7 @@ func TestHashedNodeInsertValuesAtStem(t *testing.T) {
}
// Serialize the node
serialized := SerializeNode(originalNode)
serialized := SerializeNode(originalNode, MaxGroupDepth)
// Create a mock resolver that returns the serialized node
validResolver := func(path []byte, hash common.Hash) ([]byte, error) {

View file

@ -184,31 +184,85 @@ func (bt *InternalNode) InsertValuesAtStem(stem []byte, values [][]byte, resolve
return bt, err
}
// CollectNodes collects all child nodes at a given path, and flushes it
// into the provided node collector.
func (bt *InternalNode) CollectNodes(path []byte, flushfn NodeFlushFn) error {
if bt.left != nil {
var p [256]byte
copy(p[:], path)
childpath := p[:len(path)]
childpath = append(childpath, 0)
if err := bt.left.CollectNodes(childpath, flushfn); err != nil {
// CollectNodes collects all child nodes at group boundaries (every groupDepth levels),
// and flushes them into the provided node collector. Each flush serializes a groupDepth-level
// subtree group. Nodes within a group are not flushed individually.
func (bt *InternalNode) CollectNodes(path []byte, flushfn NodeFlushFn, groupDepth int) error {
if groupDepth < 1 || groupDepth > MaxGroupDepth {
return errors.New("groupDepth must be between 1 and 8")
}
// Only flush at group boundaries (depth % groupDepth == 0)
if bt.depth%groupDepth == 0 {
// We're at a group boundary - first collect any nodes in deeper groups,
// then flush this group
if err := bt.collectChildGroups(path, flushfn, groupDepth, groupDepth-1); err != nil {
return err
}
flushfn(path, bt)
return nil
}
// Not at a group boundary - this shouldn't happen if we're called correctly from root
// but handle it by continuing to traverse
return bt.collectChildGroups(path, flushfn, groupDepth, groupDepth-(bt.depth%groupDepth)-1)
}
// collectChildGroups traverses within a group to find and collect nodes in the next group.
// remainingLevels is how many more levels below the current node until we reach the group boundary.
// When remainingLevels=0, the current node's children are at the next group boundary.
func (bt *InternalNode) collectChildGroups(path []byte, flushfn NodeFlushFn, groupDepth int, remainingLevels int) error {
if remainingLevels == 0 {
// Current node is at depth (groupBoundary - 1), its children are at the next group boundary
if bt.left != nil {
if err := bt.left.CollectNodes(appendBit(path, 0), flushfn, groupDepth); err != nil {
return err
}
}
if bt.right != nil {
if err := bt.right.CollectNodes(appendBit(path, 1), flushfn, groupDepth); err != nil {
return err
}
}
return nil
}
// Continue traversing within the group
if bt.left != nil {
switch n := bt.left.(type) {
case *InternalNode:
if err := n.collectChildGroups(appendBit(path, 0), flushfn, groupDepth, remainingLevels-1); err != nil {
return err
}
default:
// StemNode, HashedNode, or Empty - they handle their own collection
if err := bt.left.CollectNodes(appendBit(path, 0), flushfn, groupDepth); err != nil {
return err
}
}
}
if bt.right != nil {
var p [256]byte
copy(p[:], path)
childpath := p[:len(path)]
childpath = append(childpath, 1)
if err := bt.right.CollectNodes(childpath, flushfn); err != nil {
return err
switch n := bt.right.(type) {
case *InternalNode:
if err := n.collectChildGroups(appendBit(path, 1), flushfn, groupDepth, remainingLevels-1); err != nil {
return err
}
default:
// StemNode, HashedNode, or Empty - they handle their own collection
if err := bt.right.CollectNodes(appendBit(path, 1), flushfn, groupDepth); err != nil {
return err
}
}
}
flushfn(path, bt)
return nil
}
// appendBit appends a bit to a path, returning a new slice
func appendBit(path []byte, bit byte) []byte {
var p [256]byte
copy(p[:], path)
result := p[:len(path)]
return append(result, bit)
}
// GetHeight returns the height of the node.
func (bt *InternalNode) GetHeight() int {
var (

View file

@ -95,7 +95,7 @@ func TestInternalNodeGetWithResolver(t *testing.T) {
Values: values[:],
depth: 1,
}
return SerializeNode(stemNode), nil
return SerializeNode(stemNode, MaxGroupDepth), nil
}
return nil, errors.New("node not found")
}
@ -379,7 +379,7 @@ func TestInternalNodeCollectNodes(t *testing.T) {
collectedNodes = append(collectedNodes, n)
}
err := node.CollectNodes([]byte{1}, flushFn)
err := node.CollectNodes([]byte{1}, flushFn, MaxGroupDepth)
if err != nil {
t.Fatalf("Failed to collect nodes: %v", err)
}

View file

@ -184,7 +184,7 @@ func (it *binaryNodeIterator) Path() []byte {
// NodeBlob returns the serialized bytes of the current node.
func (it *binaryNodeIterator) NodeBlob() []byte {
return SerializeNode(it.current)
return SerializeNode(it.current, it.trie.groupDepth)
}
// Leaf returns true iff the current node is a leaf node.

View file

@ -134,8 +134,8 @@ func (bt *StemNode) Hash() common.Hash {
}
// CollectNodes collects all child nodes at a given path, and flushes it
// into the provided node collector.
func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error {
// into the provided node collector. groupDepth is ignored for StemNodes.
func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn, groupDepth int) error {
flush(path, bt)
return nil
}

View file

@ -347,7 +347,7 @@ func TestStemNodeCollectNodes(t *testing.T) {
collectedNodes = append(collectedNodes, n)
}
err := node.CollectNodes([]byte{0, 1, 0}, flushFn)
err := node.CollectNodes([]byte{0, 1, 0}, flushFn, MaxGroupDepth)
if err != nil {
t.Fatalf("Failed to collect nodes: %v", err)
}

View file

@ -115,9 +115,10 @@ func NewBinaryNode() BinaryNode {
// BinaryTrie is the implementation of https://eips.ethereum.org/EIPS/eip-7864.
type BinaryTrie struct {
root BinaryNode
reader *trie.Reader
tracer *trie.PrevalueTracer
root BinaryNode
reader *trie.Reader
tracer *trie.PrevalueTracer
groupDepth int // Number of levels per serialized group (1-8, default 8)
}
// ToDot converts the binary trie to a DOT language representation. Useful for debugging.
@ -127,15 +128,20 @@ func (t *BinaryTrie) ToDot() string {
}
// NewBinaryTrie creates a new binary trie.
func NewBinaryTrie(root common.Hash, db database.NodeDatabase) (*BinaryTrie, error) {
// groupDepth specifies the number of levels per serialized group (1-8).
func NewBinaryTrie(root common.Hash, db database.NodeDatabase, groupDepth int) (*BinaryTrie, error) {
if groupDepth < 1 || groupDepth > MaxGroupDepth {
groupDepth = MaxGroupDepth // Default to 8
}
reader, err := trie.NewReader(root, common.Hash{}, db)
if err != nil {
return nil, err
}
t := &BinaryTrie{
root: NewBinaryNode(),
reader: reader,
tracer: trie.NewPrevalueTracer(),
root: NewBinaryNode(),
reader: reader,
tracer: trie.NewPrevalueTracer(),
groupDepth: groupDepth,
}
// Parse the root node if it's not empty
if root != types.EmptyBinaryHash && root != types.EmptyRootHash {
@ -325,9 +331,9 @@ func (t *BinaryTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
// The root can be any type of BinaryNode (InternalNode, StemNode, etc.)
err := t.root.CollectNodes(nil, func(path []byte, node BinaryNode) {
serialized := SerializeNode(node)
serialized := SerializeNode(node, t.groupDepth)
nodeset.AddNode(path, trienode.NewNodeWithPrev(node.Hash(), serialized, t.tracer.Get(path)))
})
}, t.groupDepth)
if err != nil {
panic(fmt.Errorf("CollectNodes failed: %v", err))
}
@ -355,9 +361,10 @@ func (t *BinaryTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
// Copy creates a deep copy of the trie.
func (t *BinaryTrie) Copy() *BinaryTrie {
return &BinaryTrie{
root: t.root.Copy(),
reader: t.reader,
tracer: t.tracer.Copy(),
root: t.root.Copy(),
reader: t.reader,
tracer: t.tracer.Copy(),
groupDepth: t.groupDepth,
}
}

View file

@ -31,10 +31,11 @@ import (
// Config defines all necessary options for database.
type Config struct {
Preimages bool // Flag whether the preimage of node key is recorded
IsVerkle bool // Flag whether the db is holding a verkle tree
HashDB *hashdb.Config // Configs for hash-based scheme
PathDB *pathdb.Config // Configs for experimental path-based scheme
Preimages bool // Flag whether the preimage of node key is recorded
IsVerkle bool // Flag whether the db is holding a verkle tree
BinTrieGroupDepth int // Number of levels per serialized group in binary trie (1-8, default 8)
HashDB *hashdb.Config // Configs for hash-based scheme
PathDB *pathdb.Config // Configs for experimental path-based scheme
}
// HashDefaults represents a config for using hash-based scheme with
@ -48,9 +49,10 @@ var HashDefaults = &Config{
// VerkleDefaults represents a config for holding verkle trie data
// using path-based scheme with default settings.
var VerkleDefaults = &Config{
Preimages: false,
IsVerkle: true,
PathDB: pathdb.Defaults,
Preimages: false,
IsVerkle: true,
BinTrieGroupDepth: 8, // Default to byte-aligned groups
PathDB: pathdb.Defaults,
}
// backend defines the methods needed to access/update trie nodes in different
@ -380,6 +382,15 @@ func (db *Database) IsVerkle() bool {
return db.config.IsVerkle
}
// BinTrieGroupDepth returns the group depth for binary trie serialization (1-8).
// Returns 8 as default if not configured.
func (db *Database) BinTrieGroupDepth() int {
if db.config.BinTrieGroupDepth < 1 || db.config.BinTrieGroupDepth > 8 {
return 8 // Default
}
return db.config.BinTrieGroupDepth
}
// Disk returns the underlying disk database.
func (db *Database) Disk() ethdb.Database {
return db.disk