go-ethereum/triedb/pathdb/history_index_test.go
rjl493456442 d5efd34010
triedb/pathdb: introduce extension to history index structure (#33399)
It's a PR based on #33303 and introduces an approach for trienode
history indexing.

---

In the current archive node design, resolving a historical trie node at
a specific block
involves the following steps:

- Look up the corresponding trie node index and locate the first entry
whose state ID
   is greater than the target state ID.
- Resolve the trie node from the associated trienode history object.

A naive approach would be to store mutation records for every trie node,
similar to
how flat state mutations are recorded. However, the total number of trie
nodes is
extremely large (approximately 2.4 billion), and the vast majority of
them are rarely
modified. Creating an index entry for each individual trie node would be
very wasteful
in both storage and indexing overhead. To address this, we aggregate
multiple trie
nodes into chunks and index mutations at the chunk level instead. 

---

For a storage trie, the trie is vertically partitioned into multiple sub
tries, each spanning
three consecutive levels. The top three levels (1 + 16 + 256 nodes) form
the first chunk,
and every subsequent three-level segment forms another chunk.

```
Original trie structure

Level 0               [ ROOT ]                               1 node
Level 1        [0] [1] [2] ... [f]                          16 nodes
Level 2     [00] [01] ... [0f] [10] ... [ff]               256 nodes
Level 3   [000] [001] ... [00f] [010] ... [fff]           4096 nodes
Level 4   [0000] ... [000f] [0010] ... [001f] ... [ffff] 65536 nodes

Vertical split into chunks (3 levels per chunk)

Level0             [ ROOT ]                     1 chunk
Level3        [000]   ...     [fff]          4096 chunks
Level6   [000000]    ...    [fffffff]    16777216 chunks  
```

Within each chunk, there are 273 nodes in total, regardless of the
chunk's depth in the trie.

```
Level 0           [ 0 ]                         1 node
Level 1        [ 1 ] … [ 16 ]                  16 nodes
Level 2     [ 17 ] … … [ 272 ]                256 nodes
```

Each chunk is uniquely identified by the path prefix of the root node of
its corresponding
sub-trie. Within a chunk, nodes are identified by a numeric index
ranging from 0 to 272.

For example, suppose that at block 100, the nodes with paths `[]`,
`[0]`, `[f]`, `[00]`, and `[ff]`
are modified. The mutation record for chunk 0 is then appended with the
following entry:

`[100 → [0, 1, 16, 17, 272]]`, `272` is the numeric ID of path `[ff]`.

Furthermore, due to the structural properties of the Merkle Patricia
Trie, if a child node
is modified, all of its ancestors along the same path must also be
updated. As a result,
in the above example, recording mutations for nodes `00` and `ff` alone
is sufficient,
as this implicitly indicates that their ancestor nodes `[]`, `[0]` and
`[f]` were also
modified at block 100.

--- 

Query processing is slightly more complicated. Since trie nodes are
indexed at the chunk
level, each individual trie node lookup requires an additional filtering
step to ensure that
a given mutation record actually corresponds to the target trie node.

As mentioned earlier, mutation records store only the numeric
identifiers of leaf nodes,
while ancestor nodes are omitted for storage efficiency. Consequently,
when querying
an ancestor node, additional checks are required to determine whether
the mutation
record implicitly represents a modification to that ancestor.

Moreover, since trie nodes are indexed at the chunk level, some trie
nodes may be
updated frequently, causing their mutation records to dominate the
index. Queries
targeting rarely modified trie nodes would then scan a large amount of
irrelevant
index data, significantly degrading performance.

To address this issue, a bitmap is introduced for each index block and
stored in the
chunk's metadata. Before loading a specific index block, the bitmap is
checked to
determine whether the block contains mutation records relevant to the
target trie node.
If the bitmap indicates that the block does not contain such records,
the block is skipped entirely.
2026-01-08 09:57:35 +01:00

397 lines
11 KiB
Go

// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/
package pathdb
import (
"math"
"math/rand"
"slices"
"sort"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
)
func TestIndexReaderBasic(t *testing.T) {
testIndexReaderBasic(t, 0)
testIndexReaderBasic(t, 2)
testIndexReaderBasic(t, 34)
}
func testIndexReaderBasic(t *testing.T, bitmapSize int) {
elements := []uint64{
1, 5, 10, 11, 20,
}
db := rawdb.NewMemoryDatabase()
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
for i := 0; i < len(elements); i++ {
bw.append(elements[i], randomExt(bitmapSize, 5))
}
batch := db.NewBatch()
bw.finish(batch)
batch.Write()
br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa}), bitmapSize)
if err != nil {
t.Fatalf("Failed to construct the index reader, %v", err)
}
cases := []struct {
value uint64
result uint64
}{
{0, 1},
{1, 5},
{10, 11},
{19, 20},
{20, math.MaxUint64},
{21, math.MaxUint64},
}
for _, c := range cases {
got, err := br.readGreaterThan(c.value)
if err != nil {
t.Fatalf("Unexpected error, got %v", err)
}
if got != c.result {
t.Fatalf("Unexpected result, got %v, wanted %v", got, c.result)
}
}
}
func TestIndexReaderLarge(t *testing.T) {
testIndexReaderLarge(t, 0)
testIndexReaderLarge(t, 2)
testIndexReaderLarge(t, 34)
}
func testIndexReaderLarge(t *testing.T, bitmapSize int) {
var elements []uint64
for i := 0; i < 10*4096; i++ {
elements = append(elements, rand.Uint64())
}
slices.Sort(elements)
db := rawdb.NewMemoryDatabase()
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
for i := 0; i < len(elements); i++ {
bw.append(elements[i], randomExt(bitmapSize, 5))
}
batch := db.NewBatch()
bw.finish(batch)
batch.Write()
br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa}), bitmapSize)
if err != nil {
t.Fatalf("Failed to construct the index reader, %v", err)
}
for i := 0; i < 100; i++ {
value := rand.Uint64()
pos := sort.Search(len(elements), func(i int) bool {
return elements[i] > value
})
got, err := br.readGreaterThan(value)
if err != nil {
t.Fatalf("Unexpected error, got %v", err)
}
if pos == len(elements) {
if got != math.MaxUint64 {
t.Fatalf("Unexpected result, got %d, wanted math.MaxUint64", got)
}
} else if got != elements[pos] {
t.Fatalf("Unexpected result, got %d, wanted %d", got, elements[pos])
}
}
}
func TestEmptyIndexReader(t *testing.T) {
br, err := newIndexReader(rawdb.NewMemoryDatabase(), newAccountIdent(common.Hash{0xa}), 0)
if err != nil {
t.Fatalf("Failed to construct the index reader, %v", err)
}
res, err := br.readGreaterThan(100)
if err != nil {
t.Fatalf("Failed to query, %v", err)
}
if res != math.MaxUint64 {
t.Fatalf("Unexpected result, got %d, wanted math.MaxUint64", res)
}
}
func TestIndexWriterBasic(t *testing.T) {
testIndexWriterBasic(t, 0)
testIndexWriterBasic(t, 2)
testIndexWriterBasic(t, 34)
}
func testIndexWriterBasic(t *testing.T, bitmapSize int) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
iw.append(2, randomExt(bitmapSize, 5))
if err := iw.append(1, randomExt(bitmapSize, 5)); err == nil {
t.Fatal("out-of-order insertion is not expected")
}
var maxElem uint64
for i := 0; i < 10; i++ {
iw.append(uint64(i+3), randomExt(bitmapSize, 5))
maxElem = uint64(i + 3)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), maxElem, bitmapSize)
if err != nil {
t.Fatalf("Failed to construct the block writer, %v", err)
}
for i := 0; i < 10; i++ {
if err := iw.append(uint64(i+100), randomExt(bitmapSize, 5)); err != nil {
t.Fatalf("Failed to append item, %v", err)
}
}
iw.finish(db.NewBatch())
}
func TestIndexWriterWithLimit(t *testing.T) {
testIndexWriterWithLimit(t, 0)
testIndexWriterWithLimit(t, 2)
testIndexWriterWithLimit(t, 34)
}
func testIndexWriterWithLimit(t *testing.T, bitmapSize int) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
// 200 iterations (with around 50 bytes extension) is enough to cross
// the block boundary (4096 bytes)
for i := 0; i < 200; i++ {
iw.append(uint64(i+1), randomExt(bitmapSize, 50))
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
for i := 0; i < 200; i++ {
limit := uint64(i + 1)
iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), limit, bitmapSize)
if err != nil {
t.Fatalf("Failed to construct the index writer, %v", err)
}
if iw.lastID != limit {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, limit)
}
// Re-fill the elements
var maxElem uint64
for elem := limit + 1; elem < 500; elem++ {
if err := iw.append(elem, randomExt(bitmapSize, 5)); err != nil {
t.Fatalf("Failed to append value %d: %v", elem, err)
}
maxElem = elem
}
if iw.lastID != maxElem {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, maxElem)
}
}
}
func TestIndexDeleterBasic(t *testing.T) {
testIndexDeleterBasic(t, 0)
testIndexDeleterBasic(t, 2)
testIndexDeleterBasic(t, 34)
}
func testIndexDeleterBasic(t *testing.T, bitmapSize int) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
// 200 iterations (with around 50 bytes extension) is enough to cross
// the block boundary (4096 bytes)
var maxElem uint64
for i := 0; i < 200; i++ {
iw.append(uint64(i+1), randomExt(bitmapSize, 50))
maxElem = uint64(i + 1)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
// Delete unknown id, the request should be rejected
id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), maxElem, bitmapSize)
if err := id.pop(500); err == nil {
t.Fatal("Expect error to occur for unknown id")
}
for i := 200; i >= 1; i-- {
if err := id.pop(uint64(i)); err != nil {
t.Fatalf("Unexpected error for element popping, %v", err)
}
if id.lastID != uint64(i-1) {
t.Fatalf("Unexpected lastID, want: %d, got: %d", uint64(i-1), iw.lastID)
}
}
}
func TestIndexDeleterWithLimit(t *testing.T) {
testIndexDeleterWithLimit(t, 0)
testIndexDeleterWithLimit(t, 2)
testIndexDeleterWithLimit(t, 34)
}
func testIndexDeleterWithLimit(t *testing.T, bitmapSize int) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize)
// 200 iterations (with around 50 bytes extension) is enough to cross
// the block boundary (4096 bytes)
for i := 0; i < 200; i++ {
iw.append(uint64(i+1), randomExt(bitmapSize, 50))
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
for i := 0; i < 200; i++ {
limit := uint64(i + 1)
id, err := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), limit, bitmapSize)
if err != nil {
t.Fatalf("Failed to construct the index writer, %v", err)
}
if id.lastID != limit {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, limit)
}
// Keep removing elements
for elem := id.lastID; elem > 0; elem-- {
if err := id.pop(elem); err != nil {
t.Fatalf("Failed to pop value %d: %v", elem, err)
}
}
}
}
func TestBatchIndexerWrite(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
batch = newBatchIndexer(db, false, typeStateHistory)
histories = makeStateHistories(10)
)
for i, h := range histories {
if err := batch.process(h, uint64(i+1)); err != nil {
t.Fatalf("Failed to process history, %v", err)
}
}
if err := batch.finish(true); err != nil {
t.Fatalf("Failed to finish batch indexer, %v", err)
}
metadata := loadIndexMetadata(db, typeStateHistory)
if metadata == nil || metadata.Last != uint64(10) {
t.Fatal("Unexpected index position")
}
var (
accounts = make(map[common.Hash][]uint64)
storages = make(map[common.Hash]map[common.Hash][]uint64)
)
for i, h := range histories {
for _, addr := range h.accountList {
addrHash := crypto.Keccak256Hash(addr.Bytes())
accounts[addrHash] = append(accounts[addrHash], uint64(i+1))
if _, ok := storages[addrHash]; !ok {
storages[addrHash] = make(map[common.Hash][]uint64)
}
for _, slot := range h.storageList[addr] {
storages[addrHash][slot] = append(storages[addrHash][slot], uint64(i+1))
}
}
}
for addrHash, indexes := range accounts {
ir, _ := newIndexReader(db, newAccountIdent(addrHash), 0)
for i := 0; i < len(indexes)-1; i++ {
n, err := ir.readGreaterThan(indexes[i])
if err != nil {
t.Fatalf("Failed to read index, %v", err)
}
if n != indexes[i+1] {
t.Fatalf("Unexpected result, want %d, got %d", indexes[i+1], n)
}
}
n, err := ir.readGreaterThan(indexes[len(indexes)-1])
if err != nil {
t.Fatalf("Failed to read index, %v", err)
}
if n != math.MaxUint64 {
t.Fatalf("Unexpected result, want math.MaxUint64, got %d", n)
}
}
for addrHash, slots := range storages {
for slotHash, indexes := range slots {
ir, _ := newIndexReader(db, newStorageIdent(addrHash, slotHash), 0)
for i := 0; i < len(indexes)-1; i++ {
n, err := ir.readGreaterThan(indexes[i])
if err != nil {
t.Fatalf("Failed to read index, %v", err)
}
if n != indexes[i+1] {
t.Fatalf("Unexpected result, want %d, got %d", indexes[i+1], n)
}
}
n, err := ir.readGreaterThan(indexes[len(indexes)-1])
if err != nil {
t.Fatalf("Failed to read index, %v", err)
}
if n != math.MaxUint64 {
t.Fatalf("Unexpected result, want math.MaxUint64, got %d", n)
}
}
}
}
func TestBatchIndexerDelete(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
bw = newBatchIndexer(db, false, typeStateHistory)
histories = makeStateHistories(10)
)
// Index histories
for i, h := range histories {
if err := bw.process(h, uint64(i+1)); err != nil {
t.Fatalf("Failed to process history, %v", err)
}
}
if err := bw.finish(true); err != nil {
t.Fatalf("Failed to finish batch indexer, %v", err)
}
// Unindex histories
bd := newBatchIndexer(db, true, typeStateHistory)
for i := len(histories) - 1; i >= 0; i-- {
if err := bd.process(histories[i], uint64(i+1)); err != nil {
t.Fatalf("Failed to process history, %v", err)
}
}
if err := bd.finish(true); err != nil {
t.Fatalf("Failed to finish batch indexer, %v", err)
}
metadata := loadIndexMetadata(db, typeStateHistory)
if metadata != nil {
t.Fatal("Unexpected index position")
}
it := db.NewIterator(rawdb.StateHistoryIndexPrefix, nil)
for it.Next() {
t.Fatal("Leftover history index data")
}
it.Release()
}