cmd, core, eth, triedb/pathdb: track node origins in the path database (#32418)

This PR is the first step in the trienode history series.

It introduces the `nodeWithOrigin` struct in the path database, which tracks
the original values of dirty nodes to support trienode history construction.

Note, the original value is always empty in this PR, so it won't break the 
existing journal for encoding and decoding. The compatibility of journal 
should be handled in the following PR.
This commit is contained in:
rjl493456442 2025-09-05 10:37:05 +08:00 committed by GitHub
parent f5fcfb2fbe
commit 902ec5baae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 648 additions and 210 deletions

View file

@ -168,10 +168,13 @@ type BlockChainConfig struct {
TrieNoAsyncFlush bool // Whether the asynchronous buffer flushing is disallowed TrieNoAsyncFlush bool // Whether the asynchronous buffer flushing is disallowed
TrieJournalDirectory string // Directory path to the journal used for persisting trie data across node restarts TrieJournalDirectory string // Directory path to the journal used for persisting trie data across node restarts
Preimages bool // Whether to store preimage of trie key to the disk Preimages bool // Whether to store preimage of trie key to the disk
StateHistory uint64 // Number of blocks from head whose state histories are reserved. StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top ArchiveMode bool // Whether to enable the archive mode
ArchiveMode bool // Whether to enable the archive mode
// Number of blocks from the chain head for which state histories are retained.
// If set to 0, all state histories across the entire chain will be retained;
StateHistory uint64
// State snapshot related options // State snapshot related options
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory

View file

@ -259,11 +259,24 @@ func (set *MergedNodeSet) Merge(other *NodeSet) error {
return nil return nil
} }
// Flatten returns a two-dimensional map for internal nodes. // Nodes returns a two-dimensional map for internal nodes.
func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { func (set *MergedNodeSet) Nodes() map[common.Hash]map[string]*Node {
nodes := make(map[common.Hash]map[string]*Node, len(set.Sets)) nodes := make(map[common.Hash]map[string]*Node, len(set.Sets))
for owner, set := range set.Sets { for owner, set := range set.Sets {
nodes[owner] = set.Nodes nodes[owner] = set.Nodes
} }
return nodes return nodes
} }
// NodeAndOrigins returns a two-dimensional map for internal nodes along with
// their original values.
func (set *MergedNodeSet) NodeAndOrigins() (map[common.Hash]map[string]*Node, map[common.Hash]map[string][]byte) {
var (
nodes = make(map[common.Hash]map[string]*Node, len(set.Sets))
origins = make(map[common.Hash]map[string][]byte, len(set.Sets))
)
for owner, set := range set.Sets {
nodes[owner], origins[owner] = set.Nodes, set.Origins
}
return nodes, origins
}

118
triedb/pathdb/config.go Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
const (
// defaultTrieCleanSize is the default memory allowance of clean trie cache.
defaultTrieCleanSize = 16 * 1024 * 1024
// defaultStateCleanSize is the default memory allowance of clean state cache.
defaultStateCleanSize = 16 * 1024 * 1024
// maxBufferSize is the maximum memory allowance of node buffer.
// Too large buffer will cause the system to pause for a long
// time when write happens. Also, the largest batch that pebble can
// support is 4GB, node will panic if batch size exceeds this limit.
maxBufferSize = 256 * 1024 * 1024
// defaultBufferSize is the default memory allowance of node buffer
// that aggregates the writes from above until it's flushed into the
// disk. It's meant to be used once the initial sync is finished.
// Do not increase the buffer size arbitrarily, otherwise the system
// pause time will increase when the database writes happen.
defaultBufferSize = 64 * 1024 * 1024
)
var (
// maxDiffLayers is the maximum diff layers allowed in the layer tree.
maxDiffLayers = 128
)
// Defaults contains default settings for Ethereum mainnet.
var Defaults = &Config{
StateHistory: params.FullImmutabilityThreshold,
EnableStateIndexing: false,
TrieCleanSize: defaultTrieCleanSize,
StateCleanSize: defaultStateCleanSize,
WriteBufferSize: defaultBufferSize,
}
// ReadOnly is the config in order to open database in read only mode.
var ReadOnly = &Config{
ReadOnly: true,
TrieCleanSize: defaultTrieCleanSize,
StateCleanSize: defaultStateCleanSize,
}
// Config contains the settings for database.
type Config struct {
StateHistory uint64 // Number of recent blocks to maintain state history for, 0: full chain
EnableStateIndexing bool // Whether to enable state history indexing for external state access
TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie data
StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data
WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer
ReadOnly bool // Flag whether the database is opened in read only mode
JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store)
// Testing configurations
SnapshotNoBuild bool // Flag Whether the state generation is disabled
NoAsyncFlush bool // Flag whether the background buffer flushing is disabled
NoAsyncGeneration bool // Flag whether the background generation is disabled
}
// sanitize checks the provided user configurations and changes anything that's
// unreasonable or unworkable.
func (c *Config) sanitize() *Config {
conf := *c
if conf.WriteBufferSize > maxBufferSize {
log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.WriteBufferSize), "updated", common.StorageSize(maxBufferSize))
conf.WriteBufferSize = maxBufferSize
}
return &conf
}
// fields returns a list of attributes of config for printing.
func (c *Config) fields() []interface{} {
var list []interface{}
if c.ReadOnly {
list = append(list, "readonly", true)
}
list = append(list, "triecache", common.StorageSize(c.TrieCleanSize))
list = append(list, "statecache", common.StorageSize(c.StateCleanSize))
list = append(list, "buffer", common.StorageSize(c.WriteBufferSize))
if c.StateHistory == 0 {
list = append(list, "state-history", "entire chain")
} else {
list = append(list, "state-history", fmt.Sprintf("last %d blocks", c.StateHistory))
}
if c.EnableStateIndexing {
list = append(list, "index-history", true)
}
if c.JournalDirectory != "" {
list = append(list, "journal-dir", c.JournalDirectory)
}
return list
}

View file

@ -31,37 +31,10 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-verkle" "github.com/ethereum/go-verkle"
) )
const (
// defaultTrieCleanSize is the default memory allowance of clean trie cache.
defaultTrieCleanSize = 16 * 1024 * 1024
// defaultStateCleanSize is the default memory allowance of clean state cache.
defaultStateCleanSize = 16 * 1024 * 1024
// maxBufferSize is the maximum memory allowance of node buffer.
// Too large buffer will cause the system to pause for a long
// time when write happens. Also, the largest batch that pebble can
// support is 4GB, node will panic if batch size exceeds this limit.
maxBufferSize = 256 * 1024 * 1024
// defaultBufferSize is the default memory allowance of node buffer
// that aggregates the writes from above until it's flushed into the
// disk. It's meant to be used once the initial sync is finished.
// Do not increase the buffer size arbitrarily, otherwise the system
// pause time will increase when the database writes happen.
defaultBufferSize = 64 * 1024 * 1024
)
var (
// maxDiffLayers is the maximum diff layers allowed in the layer tree.
maxDiffLayers = 128
)
// layer is the interface implemented by all state layers which includes some // layer is the interface implemented by all state layers which includes some
// public methods and some additional methods for internal usage. // public methods and some additional methods for internal usage.
type layer interface { type layer interface {
@ -105,7 +78,7 @@ type layer interface {
// the provided dirty trie nodes along with the state change set. // the provided dirty trie nodes along with the state change set.
// //
// Note, the maps are retained by the method to avoid copying everything. // Note, the maps are retained by the method to avoid copying everything.
update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer
// journal commits an entire diff hierarchy to disk into a single journal entry. // journal commits an entire diff hierarchy to disk into a single journal entry.
// This is meant to be used during shutdown to persist the layer without // This is meant to be used during shutdown to persist the layer without
@ -113,68 +86,6 @@ type layer interface {
journal(w io.Writer) error journal(w io.Writer) error
} }
// Config contains the settings for database.
type Config struct {
StateHistory uint64 // Number of recent blocks to maintain state history for
EnableStateIndexing bool // Whether to enable state history indexing for external state access
TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie nodes
StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data
WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer
ReadOnly bool // Flag whether the database is opened in read only mode
JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store)
// Testing configurations
SnapshotNoBuild bool // Flag Whether the state generation is allowed
NoAsyncFlush bool // Flag whether the background buffer flushing is allowed
NoAsyncGeneration bool // Flag whether the background generation is allowed
}
// sanitize checks the provided user configurations and changes anything that's
// unreasonable or unworkable.
func (c *Config) sanitize() *Config {
conf := *c
if conf.WriteBufferSize > maxBufferSize {
log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.WriteBufferSize), "updated", common.StorageSize(maxBufferSize))
conf.WriteBufferSize = maxBufferSize
}
return &conf
}
// fields returns a list of attributes of config for printing.
func (c *Config) fields() []interface{} {
var list []interface{}
if c.ReadOnly {
list = append(list, "readonly", true)
}
if c.SnapshotNoBuild {
list = append(list, "snapshot", false)
}
list = append(list, "triecache", common.StorageSize(c.TrieCleanSize))
list = append(list, "statecache", common.StorageSize(c.StateCleanSize))
list = append(list, "buffer", common.StorageSize(c.WriteBufferSize))
if c.StateHistory == 0 {
list = append(list, "history", "entire chain")
} else {
list = append(list, "history", fmt.Sprintf("last %d blocks", c.StateHistory))
}
if c.JournalDirectory != "" {
list = append(list, "journal-dir", c.JournalDirectory)
}
return list
}
// Defaults contains default settings for Ethereum mainnet.
var Defaults = &Config{
StateHistory: params.FullImmutabilityThreshold,
TrieCleanSize: defaultTrieCleanSize,
StateCleanSize: defaultStateCleanSize,
WriteBufferSize: defaultBufferSize,
}
// ReadOnly is the config in order to open database in read only mode.
var ReadOnly = &Config{ReadOnly: true}
// nodeHasher is the function to compute the hash of supplied node blob. // nodeHasher is the function to compute the hash of supplied node blob.
type nodeHasher func([]byte) (common.Hash, error) type nodeHasher func([]byte) (common.Hash, error)
@ -422,7 +333,8 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6
if err := db.modifyAllowed(); err != nil { if err := db.modifyAllowed(); err != nil {
return err return err
} }
if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { // TODO(rjl493456442) tracking the origins in the following PRs.
if err := db.tree.add(root, parentRoot, block, NewNodeSetWithOrigin(nodes.Nodes(), nil), states); err != nil {
return err return err
} }
// Keep 128 diff layers in the memory, persistent layer is 129th. // Keep 128 diff layers in the memory, persistent layer is 129th.

View file

@ -36,9 +36,10 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/exp/maps"
) )
func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root common.Hash, dirties map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root common.Hash, entries map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
var id *trie.ID var id *trie.ID
if addrHash == (common.Hash{}) { if addrHash == (common.Hash{}) {
id = trie.StateTrieID(stateRoot) id = trie.StateTrieID(stateRoot)
@ -49,13 +50,17 @@ func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root
if err != nil { if err != nil {
panic(fmt.Errorf("failed to load trie, err: %w", err)) panic(fmt.Errorf("failed to load trie, err: %w", err))
} }
for key, val := range dirties { var deletes []common.Hash
for key, val := range entries {
if len(val) == 0 { if len(val) == 0 {
tr.Delete(key.Bytes()) deletes = append(deletes, key)
} else { } else {
tr.Update(key.Bytes(), val) tr.Update(key.Bytes(), val)
} }
} }
for _, key := range deletes {
tr.Delete(key.Bytes())
}
return tr.Commit(false) return tr.Commit(false)
} }
@ -72,16 +77,18 @@ const (
createAccountOp int = iota createAccountOp int = iota
modifyAccountOp modifyAccountOp
deleteAccountOp deleteAccountOp
resurrectAccountOp
opLen opLen
) )
// genctx carries the generation context used within a single state transition.
type genctx struct { type genctx struct {
stateRoot common.Hash stateRoot common.Hash
accounts map[common.Hash][]byte // Keyed by the hash of account address accounts map[common.Hash][]byte // Keyed by the hash of account address
storages map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key storages map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key
accountOrigin map[common.Address][]byte // Keyed by the account address accountOrigin map[common.Address][]byte // Keyed by the account address
storageOrigin map[common.Address]map[common.Hash][]byte // Keyed by the account address and the hash of storage key storageOrigin map[common.Address]map[common.Hash][]byte // Keyed by the account address and the hash of storage key
nodes *trienode.MergedNodeSet nodes *trienode.MergedNodeSet // Trie nodes produced from the state transition
} }
func newCtx(stateRoot common.Hash) *genctx { func newCtx(stateRoot common.Hash) *genctx {
@ -123,20 +130,31 @@ type tester struct {
// state snapshots // state snapshots
snapAccounts map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address snapAccounts map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key
// trienode snapshots
snapNodes map[common.Hash]*trienode.MergedNodeSet
} }
func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, enableIndex bool, journalDir string) *tester { type testerConfig struct {
stateHistory uint64
isVerkle bool
layers int
enableIndex bool
journalDir string
}
func newTester(t *testing.T, config *testerConfig) *tester {
var ( var (
disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()}) disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
db = New(disk, &Config{ db = New(disk, &Config{
StateHistory: historyLimit, StateHistory: config.stateHistory,
EnableStateIndexing: enableIndex, EnableStateIndexing: config.enableIndex,
TrieCleanSize: 256 * 1024, TrieCleanSize: 256 * 1024,
StateCleanSize: 256 * 1024, StateCleanSize: 256 * 1024,
WriteBufferSize: 256 * 1024, WriteBufferSize: 256 * 1024,
NoAsyncFlush: true, NoAsyncFlush: true,
JournalDirectory: journalDir, JournalDirectory: config.journalDir,
}, isVerkle) }, config.isVerkle)
obj = &tester{ obj = &tester{
db: db, db: db,
@ -145,9 +163,10 @@ func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, ena
storages: make(map[common.Hash]map[common.Hash][]byte), storages: make(map[common.Hash]map[common.Hash][]byte),
snapAccounts: make(map[common.Hash]map[common.Hash][]byte), snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte), snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
snapNodes: make(map[common.Hash]*trienode.MergedNodeSet),
} }
) )
for i := 0; i < layers; i++ { for i := 0; i < config.layers; i++ {
var parent = types.EmptyRootHash var parent = types.EmptyRootHash
if len(obj.roots) != 0 { if len(obj.roots) != 0 {
parent = obj.roots[len(obj.roots)-1] parent = obj.roots[len(obj.roots)-1]
@ -270,10 +289,53 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
return root return root
} }
func (t *tester) resurrectStorage(ctx *genctx, addr common.Address, old map[common.Hash][]byte) common.Hash {
var (
addrHash = crypto.Keccak256Hash(addr.Bytes())
storage = make(map[common.Hash][]byte)
origin = make(map[common.Hash][]byte)
)
for i := 0; i < 3; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
key := testrand.Bytes(32)
hash := crypto.Keccak256Hash(key)
t.preimages[hash] = key
storage[hash] = v
origin[hash] = nil
}
var cnt int
for khash := range old {
cnt += 1
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
storage[khash] = v
origin[khash] = old[khash]
if cnt >= 3 {
break
}
}
root, set := updateTrie(t.db, ctx.stateRoot, addrHash, types.EmptyRootHash, storage)
maps.Copy(ctx.storages[addrHash], storage)
if ctx.storageOrigin[addr] == nil {
ctx.storageOrigin[addr] = make(map[common.Hash][]byte)
}
for k, v := range origin {
if _, exists := ctx.storageOrigin[addr][k]; !exists {
ctx.storageOrigin[addr][k] = v
}
}
ctx.nodes.Merge(set)
return root
}
func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) { func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
var ( var (
ctx = newCtx(parent) ctx = newCtx(parent)
dirties = make(map[common.Hash]struct{}) dirties = make(map[common.Hash]struct{})
deleted = make(map[common.Address]struct{})
resurrect = make(map[common.Address]struct{})
) )
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
// Start with account creation always // Start with account creation always
@ -336,6 +398,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
continue continue
} }
dirties[addrHash] = struct{}{} dirties[addrHash] = struct{}{}
deleted[addr] = struct{}{}
acct, _ := types.FullAccount(account) acct, _ := types.FullAccount(account)
if acct.Root != types.EmptyRootHash { if acct.Root != types.EmptyRootHash {
@ -343,6 +406,25 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
} }
ctx.accounts[addrHash] = nil ctx.accounts[addrHash] = nil
ctx.accountOrigin[addr] = account ctx.accountOrigin[addr] = account
case resurrectAccountOp:
if len(deleted) == 0 {
continue
}
addresses := maps.Keys(deleted)
addr := addresses[rand.Intn(len(addresses))]
if _, exist := resurrect[addr]; exist {
continue
}
resurrect[addr] = struct{}{}
addrHash := crypto.Keccak256Hash(addr.Bytes())
root := t.resurrectStorage(ctx, addr, t.storages[addrHash])
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
if _, exist := ctx.accountOrigin[addr]; !exist {
ctx.accountOrigin[addr] = nil
}
t.preimages[addrHash] = addr.Bytes()
} }
} }
root, set := updateTrie(t.db, parent, common.Hash{}, parent, ctx.accounts) root, set := updateTrie(t.db, parent, common.Hash{}, parent, ctx.accounts)
@ -351,6 +433,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash,
// Save state snapshot before commit // Save state snapshot before commit
t.snapAccounts[parent] = copyAccounts(t.accounts) t.snapAccounts[parent] = copyAccounts(t.accounts)
t.snapStorages[parent] = copyStorages(t.storages) t.snapStorages[parent] = copyStorages(t.storages)
t.snapNodes[parent] = ctx.nodes
// Commit all changes to live state set // Commit all changes to live state set
for addrHash, account := range ctx.accounts { for addrHash, account := range ctx.accounts {
@ -470,8 +553,7 @@ func TestDatabaseRollback(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
// Verify state histories tester := newTester(t, &testerConfig{layers: 32})
tester := newTester(t, 0, false, 32, false, "")
defer tester.release() defer tester.release()
if err := tester.verifyHistory(); err != nil { if err := tester.verifyHistory(); err != nil {
@ -505,7 +587,7 @@ func TestDatabaseRecoverable(t *testing.T) {
}() }()
var ( var (
tester = newTester(t, 0, false, 12, false, "") tester = newTester(t, &testerConfig{layers: 12})
index = tester.bottomIndex() index = tester.bottomIndex()
) )
defer tester.release() defer tester.release()
@ -526,7 +608,7 @@ func TestDatabaseRecoverable(t *testing.T) {
// Layers below current disk layer are recoverable // Layers below current disk layer are recoverable
{tester.roots[index-1], true}, {tester.roots[index-1], true},
// Disklayer itself is not recoverable, since it's // Disk layer itself is not recoverable, since it's
// available for accessing. // available for accessing.
{tester.roots[index], false}, {tester.roots[index], false},
@ -542,6 +624,59 @@ func TestDatabaseRecoverable(t *testing.T) {
} }
} }
func TestExecuteRollback(t *testing.T) {
// Redefine the diff layer depth allowance for faster testing.
maxDiffLayers = 4
defer func() {
maxDiffLayers = 128
}()
tester := newTester(t, &testerConfig{layers: 32})
defer tester.release()
// Revert database from top to bottom
for i := tester.bottomIndex(); i >= 0; i-- {
dl := tester.db.tree.bottom()
h, err := readStateHistory(tester.db.stateFreezer, dl.stateID())
if err != nil {
t.Fatalf("Failed to read history, err: %v", err)
}
nodes, err := apply(tester.db, h.meta.parent, h.meta.root, h.meta.version == stateHistoryV1, h.accounts, h.storages)
if err != nil {
t.Fatalf("Failed to apply history, err: %v", err)
}
// Verify the produced node set, ensuring they are aligned with the
// tracked dirty nodes.
want := tester.snapNodes[h.meta.parent]
if len(nodes) != len(want.Sets) {
t.Fatalf("Unexpected node sets, want: %d, got: %d", len(want.Sets), len(nodes))
}
for owner, setA := range nodes {
setB, ok := want.Sets[owner]
if !ok {
t.Fatalf("Excessive nodeset, %x", owner)
}
if len(setA) != len(setB.Origins) {
t.Fatalf("Unexpected origins, want: %d, got: %d", len(setA), len(setB.Origins))
}
for k, nA := range setA {
nB, ok := setB.Origins[k]
if !ok {
t.Fatalf("Excessive node, %v", []byte(k))
}
if !bytes.Equal(nA.Blob, nB) {
t.Fatalf("Unexpected node value, want: %v, got: %v", nA.Blob, nB)
}
}
}
if err := tester.db.Recover(h.meta.parent); err != nil {
t.Fatalf("Failed to recover db, err: %v", err)
}
}
}
func TestDisable(t *testing.T) { func TestDisable(t *testing.T) {
// Redefine the diff layer depth allowance for faster testing. // Redefine the diff layer depth allowance for faster testing.
maxDiffLayers = 4 maxDiffLayers = 4
@ -549,7 +684,7 @@ func TestDisable(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0, false, 32, false, "") tester := newTester(t, &testerConfig{layers: 32})
defer tester.release() defer tester.release()
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
@ -563,10 +698,6 @@ func TestDisable(t *testing.T) {
t.Fatalf("Failed to activate database: %v", err) t.Fatalf("Failed to activate database: %v", err)
} }
// Ensure journal is deleted from disk
if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
t.Fatal("Failed to clean journal")
}
// Ensure all trie histories are removed // Ensure all trie histories are removed
n, err := tester.db.stateFreezer.Ancients() n, err := tester.db.stateFreezer.Ancients()
if err != nil { if err != nil {
@ -591,7 +722,7 @@ func TestCommit(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0, false, 12, false, "") tester := newTester(t, &testerConfig{layers: 12})
defer tester.release() defer tester.release()
if err := tester.db.Commit(tester.lastHash(), false); err != nil { if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@ -626,7 +757,7 @@ func testJournal(t *testing.T, journalDir string) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0, false, 12, false, journalDir) tester := newTester(t, &testerConfig{layers: 12, journalDir: journalDir})
defer tester.release() defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil { if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -673,7 +804,7 @@ func testCorruptedJournal(t *testing.T, journalDir string, modifyFn func(databas
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0, false, 12, false, journalDir) tester := newTester(t, &testerConfig{layers: 12, journalDir: journalDir})
defer tester.release() defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil { if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -718,7 +849,7 @@ func TestTailTruncateHistory(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 10, false, 12, false, "") tester := newTester(t, &testerConfig{layers: 12, stateHistory: 10})
defer tester.release() defer tester.release()
tester.db.Close() tester.db.Close()

View file

@ -34,7 +34,7 @@ type diffLayer struct {
root common.Hash // Root hash to which this layer diff belongs to root common.Hash // Root hash to which this layer diff belongs to
id uint64 // Corresponding state id id uint64 // Corresponding state id
block uint64 // Associated block number block uint64 // Associated block number
nodes *nodeSet // Cached trie nodes indexed by owner and path nodes *nodeSetWithOrigin // Cached trie nodes indexed by owner and path
states *StateSetWithOrigin // Associated state changes along with origin value states *StateSetWithOrigin // Associated state changes along with origin value
parent layer // Parent layer modified by this one, never nil, **can be changed** parent layer // Parent layer modified by this one, never nil, **can be changed**
@ -42,7 +42,7 @@ type diffLayer struct {
} }
// newDiffLayer creates a new diff layer on top of an existing layer. // newDiffLayer creates a new diff layer on top of an existing layer.
func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer {
dl := &diffLayer{ dl := &diffLayer{
root: root, root: root,
id: id, id: id,
@ -151,7 +151,7 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// update implements the layer interface, creating a new layer on top of the // update implements the layer interface, creating a new layer on top of the
// existing layer tree with the specified data items. // existing layer tree with the specified data items.
func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer {
return newDiffLayer(dl, root, id, block, nodes, states) return newDiffLayer(dl, root, id, block, nodes, states)
} }

View file

@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
nblob = common.CopyBytes(blob) nblob = common.CopyBytes(blob)
} }
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
var layer layer var layer layer
layer = emptyLayer() layer = emptyLayer()
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
) )
nodes[common.Hash{}][string(path)] = node nodes[common.Hash{}][string(path)] = node
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
) )
nodes[common.Hash{}][string(path)] = node nodes[common.Hash{}][string(path)] = node
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
var layer layer var layer layer
layer = emptyLayer() layer = emptyLayer()

View file

@ -319,7 +319,7 @@ func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// update implements the layer interface, returning a new diff layer on top // update implements the layer interface, returning a new diff layer on top
// with the given state set. // with the given state set.
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer {
return newDiffLayer(dl, root, id, block, nodes, states) return newDiffLayer(dl, root, id, block, nodes, states)
} }
@ -413,7 +413,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// Merge the trie nodes and flat states of the bottom-most diff layer into the // Merge the trie nodes and flat states of the bottom-most diff layer into the
// buffer as the combined layer. // buffer as the combined layer.
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet) combined := dl.buffer.commit(bottom.nodes.nodeSet, bottom.states.stateSet)
// Terminate the background state snapshot generation before mutating the // Terminate the background state snapshot generation before mutating the
// persistent state. // persistent state.

View file

@ -59,13 +59,19 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash,
rawStorageKey: rawStorageKey, rawStorageKey: rawStorageKey,
nodes: trienode.NewMergedNodeSet(), nodes: trienode.NewMergedNodeSet(),
} }
var deletes []common.Address
for addr, account := range accounts { for addr, account := range accounts {
var err error
if len(account) == 0 { if len(account) == 0 {
err = deleteAccount(ctx, db, addr) deletes = append(deletes, addr)
} else { } else {
err = updateAccount(ctx, db, addr) err := updateAccount(ctx, db, addr)
if err != nil {
return nil, fmt.Errorf("failed to revert state, err: %w", err)
}
} }
}
for _, addr := range deletes {
err := deleteAccount(ctx, db, addr)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to revert state, err: %w", err) return nil, fmt.Errorf("failed to revert state, err: %w", err)
} }
@ -77,7 +83,7 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash,
if err := ctx.nodes.Merge(result); err != nil { if err := ctx.nodes.Merge(result); err != nil {
return nil, err return nil, err
} }
return ctx.nodes.Flatten(), nil return ctx.nodes.Nodes(), nil
} }
// updateAccount the account was present in prev-state, and may or may not // updateAccount the account was present in prev-state, and may or may not
@ -108,17 +114,23 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
if err != nil { if err != nil {
return err return err
} }
var deletes []common.Hash
for key, val := range ctx.storages[addr] { for key, val := range ctx.storages[addr] {
tkey := key tkey := key
if ctx.rawStorageKey { if ctx.rawStorageKey {
tkey = crypto.Keccak256Hash(key.Bytes()) tkey = crypto.Keccak256Hash(key.Bytes())
} }
var err error
if len(val) == 0 { if len(val) == 0 {
err = st.Delete(tkey.Bytes()) deletes = append(deletes, tkey)
} else { } else {
err = st.Update(tkey.Bytes(), val) err := st.Update(tkey.Bytes(), val)
if err != nil {
return err
}
} }
}
for _, tkey := range deletes {
err := st.Delete(tkey.Bytes())
if err != nil { if err != nil {
return err return err
} }

View file

@ -144,8 +144,7 @@ func testHistoryReader(t *testing.T, historyLimit uint64) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) env := newTester(t, &testerConfig{stateHistory: historyLimit, layers: 64, enableIndex: true})
env := newTester(t, historyLimit, false, 64, true, "")
defer env.release() defer env.release()
waitIndexing(env.db) waitIndexing(env.db)
@ -184,7 +183,8 @@ func TestHistoricalStateReader(t *testing.T) {
}() }()
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true)))
env := newTester(t, 0, false, 64, true, "") config := &testerConfig{stateHistory: 0, layers: 64, enableIndex: true}
env := newTester(t, config)
defer env.release() defer env.release()
waitIndexing(env.db) waitIndexing(env.db)

View file

@ -229,7 +229,7 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
return nil, fmt.Errorf("load block number: %v", err) return nil, fmt.Errorf("load block number: %v", err)
} }
// Read in-memory trie nodes from journal // Read in-memory trie nodes from journal
var nodes nodeSet var nodes nodeSetWithOrigin
if err := nodes.decode(r); err != nil { if err := nodes.decode(r); err != nil {
return nil, err return nil, err
} }

View file

@ -22,7 +22,6 @@ import (
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/trie/trienode"
) )
// layerTree is a group of state layers identified by the state root. // layerTree is a group of state layers identified by the state root.
@ -142,7 +141,7 @@ func (tree *layerTree) len() int {
} }
// add inserts a new layer into the tree if it can be linked to an existing old parent. // add inserts a new layer into the tree if it can be linked to an existing old parent.
func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *StateSetWithOrigin) error { func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) error {
// Reject noop updates to avoid self-loops. This is a special case that can // Reject noop updates to avoid self-loops. This is a special case that can
// happen for clique networks and proof-of-stake networks where empty blocks // happen for clique networks and proof-of-stake networks where empty blocks
// don't modify the state (0 block subsidy). // don't modify the state (0 block subsidy).
@ -156,7 +155,7 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6
if parent == nil { if parent == nil {
return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot) return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot)
} }
l := parent.update(root, parent.stateID()+1, block, newNodeSet(nodes.Flatten()), states) l := parent.update(root, parent.stateID()+1, block, nodes, states)
tree.lock.Lock() tree.lock.Lock()
defer tree.lock.Unlock() defer tree.lock.Unlock()

View file

@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/trie/trienode"
) )
func newTestLayerTree() *layerTree { func newTestLayerTree() *layerTree {
@ -45,9 +44,9 @@ func TestLayerCap(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -66,9 +65,9 @@ func TestLayerCap(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -86,9 +85,9 @@ func TestLayerCap(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -106,12 +105,12 @@ func TestLayerCap(t *testing.T) {
// ->C2'->C3'->C4' // ->C2'->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -131,12 +130,12 @@ func TestLayerCap(t *testing.T) {
// ->C2'->C3'->C4' // ->C2'->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -155,11 +154,11 @@ func TestLayerCap(t *testing.T) {
// ->C3'->C4' // ->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
// Chain: // Chain:
@ -213,8 +212,8 @@ func TestBaseLayer(t *testing.T) {
// C1->C2->C3 (HEAD) // C1->C2->C3 (HEAD)
{ {
func() { func() {
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
}, },
common.Hash{0x1}, common.Hash{0x1},
}, },
@ -230,9 +229,9 @@ func TestBaseLayer(t *testing.T) {
// C4->C5->C6 (HEAD) // C4->C5->C6 (HEAD)
{ {
func() { func() {
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x5}, common.Hash{0x4}, 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x5}, common.Hash{0x4}, 4, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x6}, common.Hash{0x5}, 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x6}, common.Hash{0x5}, 5, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.cap(common.Hash{0x6}, 2) tr.cap(common.Hash{0x6}, 2)
}, },
common.Hash{0x4}, common.Hash{0x4},
@ -258,7 +257,7 @@ func TestDescendant(t *testing.T) {
// C1->C2 (HEAD) // C1->C2 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -269,7 +268,7 @@ func TestDescendant(t *testing.T) {
// Chain: // Chain:
// C1->C2->C3 (HEAD) // C1->C2->C3 (HEAD)
op: func(tr *layerTree) { op: func(tr *layerTree) {
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
}, },
snapshotB: map[common.Hash]map[common.Hash]struct{}{ snapshotB: map[common.Hash]map[common.Hash]struct{}{
common.Hash{0x1}: { common.Hash{0x1}: {
@ -286,9 +285,9 @@ func TestDescendant(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -325,9 +324,9 @@ func TestDescendant(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -360,9 +359,9 @@ func TestDescendant(t *testing.T) {
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -392,12 +391,12 @@ func TestDescendant(t *testing.T) {
// ->C2'->C3'->C4' // ->C2'->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -445,12 +444,12 @@ func TestDescendant(t *testing.T) {
// ->C2'->C3'->C4' // ->C2'->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -494,11 +493,11 @@ func TestDescendant(t *testing.T) {
// ->C3'->C4' // ->C3'->C4'
init: func() *layerTree { init: func() *layerTree {
tr := newTestLayerTree() tr := newTestLayerTree()
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false))
return tr return tr
}, },
snapshotA: map[common.Hash]map[common.Hash]struct{}{ snapshotA: map[common.Hash]map[common.Hash]struct{}{
@ -580,11 +579,11 @@ func TestAccountLookup(t *testing.T) {
// Chain: // Chain:
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
tr := newTestLayerTree() // base = 0x1 tr := newTestLayerTree() // base = 0x1
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xa"), nil, nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xa"), nil, nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xb"), nil, nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xb"), nil, nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xa", "0xc"), nil, nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xa", "0xc"), nil, nil, nil, false))
var cases = []struct { var cases = []struct {
@ -734,11 +733,11 @@ func TestStorageLookup(t *testing.T) {
// Chain: // Chain:
// C1->C2->C3->C4 (HEAD) // C1->C2->C3->C4 (HEAD)
tr := newTestLayerTree() // base = 0x1 tr := newTestLayerTree() // base = 0x1
tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1"}}, nil), nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1"}}, nil), nil, nil, false))
tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x2"}}, nil), nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x2"}}, nil), nil, nil, false))
tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil),
NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1", "0x3"}}, nil), nil, nil, false)) NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1", "0x3"}}, nil), nil, nil, false))
var cases = []struct { var cases = []struct {

View file

@ -18,6 +18,7 @@ package pathdb
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"maps" "maps"
@ -301,3 +302,125 @@ func (s *nodeSet) dbsize() int {
} }
return m + int(s.size) return m + int(s.size)
} }
// nodeSetWithOrigin wraps the node set with additional original values of the
// mutated trie nodes.
type nodeSetWithOrigin struct {
*nodeSet
// nodeOrigin represents the trie nodes before the state transition. It's keyed
// by the account address hash and node path. The nil value means the trie node
// was not present.
nodeOrigin map[common.Hash]map[string][]byte
// memory size of the state data (accountNodeOrigin and storageNodeOrigin)
size uint64
}
// NewNodeSetWithOrigin constructs the state set with the provided data.
func NewNodeSetWithOrigin(nodes map[common.Hash]map[string]*trienode.Node, origins map[common.Hash]map[string][]byte) *nodeSetWithOrigin {
// Don't panic for the lazy callers, initialize the nil maps instead.
if origins == nil {
origins = make(map[common.Hash]map[string][]byte)
}
set := &nodeSetWithOrigin{
nodeSet: newNodeSet(nodes),
nodeOrigin: origins,
}
set.computeSize()
return set
}
// computeSize calculates the database size of the held trie nodes.
func (s *nodeSetWithOrigin) computeSize() {
var size int
for owner, slots := range s.nodeOrigin {
prefixLen := common.HashLength
if owner == (common.Hash{}) {
prefixLen = 0
}
for path, data := range slots {
size += prefixLen + len(path) + len(data)
}
}
s.size = s.nodeSet.size + uint64(size)
}
// encode serializes the content of node set into the provided writer.
func (s *nodeSetWithOrigin) encode(w io.Writer) error {
// Encode node set
if err := s.nodeSet.encode(w); err != nil {
return err
}
// Short circuit if the origins are not tracked
if len(s.nodeOrigin) == 0 {
return nil
}
// Encode node origins
nodes := make([]journalNodes, 0, len(s.nodeOrigin))
for owner, subset := range s.nodeOrigin {
entry := journalNodes{
Owner: owner,
Nodes: make([]journalNode, 0, len(subset)),
}
for path, node := range subset {
entry.Nodes = append(entry.Nodes, journalNode{
Path: []byte(path),
Blob: node,
})
}
nodes = append(nodes, entry)
}
return rlp.Encode(w, nodes)
}
// hasOrigin returns whether the origin data set exists in the rlp stream.
// It's a workaround for backward compatibility.
func (s *nodeSetWithOrigin) hasOrigin(r *rlp.Stream) (bool, error) {
kind, _, err := r.Kind()
if err != nil {
if errors.Is(err, io.EOF) {
return false, nil
}
return false, err
}
// If the type of next element in the RLP stream is:
// - `rlp.List`: represents the original value of trienodes;
// - others, like `boolean`: represent a field in the following state data set;
return kind == rlp.List, nil
}
// decode deserializes the content from the rlp stream into the node set.
func (s *nodeSetWithOrigin) decode(r *rlp.Stream) error {
if s.nodeSet == nil {
s.nodeSet = &nodeSet{}
}
if err := s.nodeSet.decode(r); err != nil {
return err
}
// Decode node origins
s.nodeOrigin = make(map[common.Hash]map[string][]byte)
if hasOrigin, err := s.hasOrigin(r); err != nil {
return err
} else if hasOrigin {
var encoded []journalNodes
if err := r.Decode(&encoded); err != nil {
return fmt.Errorf("load nodes: %v", err)
}
for _, entry := range encoded {
subset := make(map[string][]byte, len(entry.Nodes))
for _, n := range entry.Nodes {
if len(n.Blob) > 0 {
subset[string(n.Path)] = n.Blob
} else {
subset[string(n.Path)] = nil
}
}
s.nodeOrigin[entry.Owner] = subset
}
}
s.computeSize()
return nil
}

128
triedb/pathdb/nodes_test.go Normal file
View file

@ -0,0 +1,128 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
)
func TestNodeSetEncode(t *testing.T) {
nodes := make(map[common.Hash]map[string]*trienode.Node)
nodes[common.Hash{}] = map[string]*trienode.Node{
"": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}),
"1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}),
"2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}),
}
nodes[common.Hash{0x1}] = map[string]*trienode.Node{
"": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}),
"1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}),
"2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}),
}
s := newNodeSet(nodes)
buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil {
t.Fatalf("Failed to encode states, %v", err)
}
var dec nodeSet
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
t.Fatalf("Failed to decode states, %v", err)
}
if !reflect.DeepEqual(s.accountNodes, dec.accountNodes) {
t.Fatal("Unexpected account data")
}
if !reflect.DeepEqual(s.storageNodes, dec.storageNodes) {
t.Fatal("Unexpected storage data")
}
}
func TestNodeSetWithOriginEncode(t *testing.T) {
nodes := make(map[common.Hash]map[string]*trienode.Node)
nodes[common.Hash{}] = map[string]*trienode.Node{
"": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}),
"1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}),
"2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}),
}
nodes[common.Hash{0x1}] = map[string]*trienode.Node{
"": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}),
"1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}),
"2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}),
}
origins := make(map[common.Hash]map[string][]byte)
origins[common.Hash{}] = map[string][]byte{
"": nil,
"1": {0x1},
"2": {0x2},
}
origins[common.Hash{0x1}] = map[string][]byte{
"": nil,
"1": {0x1},
"2": {0x2},
}
// Encode with origin set
s := NewNodeSetWithOrigin(nodes, origins)
buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil {
t.Fatalf("Failed to encode states, %v", err)
}
var dec nodeSetWithOrigin
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
t.Fatalf("Failed to decode states, %v", err)
}
if !reflect.DeepEqual(s.accountNodes, dec.accountNodes) {
t.Fatal("Unexpected account data")
}
if !reflect.DeepEqual(s.storageNodes, dec.storageNodes) {
t.Fatal("Unexpected storage data")
}
if !reflect.DeepEqual(s.nodeOrigin, dec.nodeOrigin) {
t.Fatal("Unexpected node origin data")
}
// Encode without origin set
s = NewNodeSetWithOrigin(nodes, nil)
buf = bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil {
t.Fatalf("Failed to encode states, %v", err)
}
var dec2 nodeSetWithOrigin
if err := dec2.decode(rlp.NewStream(buf, 0)); err != nil {
t.Fatalf("Failed to decode states, %v", err)
}
if !reflect.DeepEqual(s.accountNodes, dec2.accountNodes) {
t.Fatal("Unexpected account data")
}
if !reflect.DeepEqual(s.storageNodes, dec2.storageNodes) {
t.Fatal("Unexpected storage data")
}
if len(dec2.nodeOrigin) != 0 {
t.Fatal("unexpected node origin data")
}
if dec2.size != s.size {
t.Fatalf("Unexpected data size, got: %d, want: %d", dec2.size, s.size)
}
}