triedb/pathdb: rename history to state history (#32498)

This is a internal refactoring PR, renaming the history to stateHistory.

It's a pre-requisite PR for merging trienode history, avoid the name
conflict.
This commit is contained in:
rjl493456442 2025-08-26 14:52:39 +08:00 committed by GitHub
parent 16bd164f3b
commit 8c58f4920d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 105 additions and 103 deletions

View file

@ -219,12 +219,14 @@ type Database struct {
isVerkle bool // Flag if database is used for verkle tree
hasher nodeHasher // Trie node hasher
config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes
tree *layerTree // The group for all known layers
freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests
lock sync.RWMutex // Lock to prevent mutations from happening at the same time
indexer *historyIndexer // History indexer
config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes
tree *layerTree // The group for all known layers
stateFreezer ethdb.ResettableAncientStore // Freezer for storing state histories, nil possible in tests
stateIndexer *historyIndexer // History indexer historical state data, nil possible
lock sync.RWMutex // Lock to prevent mutations from happening at the same time
}
// New attempts to load an already existing layer from a persistent key-value
@ -275,8 +277,8 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
log.Crit("Failed to setup the generator", "err", err)
}
// TODO (rjl493456442) disable the background indexing in read-only mode
if db.freezer != nil && db.config.EnableStateIndexing {
db.indexer = newHistoryIndexer(db.diskdb, db.freezer, db.tree.bottom().stateID())
if db.stateFreezer != nil && db.config.EnableStateIndexing {
db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID())
log.Info("Enabled state history indexing")
}
fields := config.fields()
@ -304,14 +306,14 @@ func (db *Database) repairHistory() error {
if err != nil {
log.Crit("Failed to open state history freezer", "err", err)
}
db.freezer = freezer
db.stateFreezer = freezer
// Reset the entire state histories if the trie database is not initialized
// yet. This action is necessary because these state histories are not
// expected to exist without an initialized trie database.
id := db.tree.bottom().stateID()
if id == 0 {
frozen, err := db.freezer.Ancients()
frozen, err := db.stateFreezer.Ancients()
if err != nil {
log.Crit("Failed to retrieve head of state history", "err", err)
}
@ -321,7 +323,7 @@ func (db *Database) repairHistory() error {
// Purge all state history indexing data first
rawdb.DeleteStateHistoryIndexMetadata(db.diskdb)
rawdb.DeleteStateHistoryIndex(db.diskdb)
err := db.freezer.Reset()
err := db.stateFreezer.Reset()
if err != nil {
log.Crit("Failed to reset state histories", "err", err)
}
@ -331,7 +333,7 @@ func (db *Database) repairHistory() error {
}
// Truncate the extra state histories above in freezer in case it's not
// aligned with the disk layer. It might happen after a unclean shutdown.
pruned, err := truncateFromHead(db.diskdb, db.freezer, id)
pruned, err := truncateFromHead(db.diskdb, db.stateFreezer, id)
if err != nil {
log.Crit("Failed to truncate extra state histories", "err", err)
}
@ -507,13 +509,13 @@ func (db *Database) Enable(root common.Hash) error {
// all root->id mappings should be removed as well. Since
// mappings can be huge and might take a while to clear
// them, just leave them in disk and wait for overwriting.
if db.freezer != nil {
if db.stateFreezer != nil {
// TODO(rjl493456442) would be better to group them into a batch.
//
// Purge all state history indexing data first
rawdb.DeleteStateHistoryIndexMetadata(db.diskdb)
rawdb.DeleteStateHistoryIndex(db.diskdb)
if err := db.freezer.Reset(); err != nil {
if err := db.stateFreezer.Reset(); err != nil {
return err
}
}
@ -529,9 +531,9 @@ func (db *Database) Enable(root common.Hash) error {
// To ensure the history indexer always matches the current state, we must:
// 1. Close any existing indexer
// 2. Re-initialize the indexer so it starts indexing from the new state root.
if db.indexer != nil && db.freezer != nil && db.config.EnableStateIndexing {
db.indexer.close()
db.indexer = newHistoryIndexer(db.diskdb, db.freezer, db.tree.bottom().stateID())
if db.stateIndexer != nil && db.stateFreezer != nil && db.config.EnableStateIndexing {
db.stateIndexer.close()
db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID())
log.Info("Re-enabled state history indexing")
}
log.Info("Rebuilt trie database", "root", root)
@ -551,7 +553,7 @@ func (db *Database) Recover(root common.Hash) error {
if err := db.modifyAllowed(); err != nil {
return err
}
if db.freezer == nil {
if db.stateFreezer == nil {
return errors.New("state rollback is non-supported")
}
// Short circuit if the target state is not recoverable
@ -564,7 +566,7 @@ func (db *Database) Recover(root common.Hash) error {
dl = db.tree.bottom()
)
for dl.rootHash() != root {
h, err := readHistory(db.freezer, dl.stateID())
h, err := readStateHistory(db.stateFreezer, dl.stateID())
if err != nil {
return err
}
@ -585,7 +587,7 @@ func (db *Database) Recover(root common.Hash) error {
if err := db.diskdb.SyncKeyValue(); err != nil {
return err
}
_, err := truncateFromHead(db.diskdb, db.freezer, dl.stateID())
_, err := truncateFromHead(db.diskdb, db.stateFreezer, dl.stateID())
if err != nil {
return err
}
@ -613,12 +615,12 @@ func (db *Database) Recoverable(root common.Hash) bool {
// dev mode. As a consequence, the Pathdb loses the ability for deep reorg
// in certain cases.
// TODO(rjl493456442): Implement the in-memory ancient store.
if db.freezer == nil {
if db.stateFreezer == nil {
return false
}
// Ensure the requested state is a canonical state and all state
// histories in range [id+1, disklayer.ID] are present and complete.
return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
return checkStateHistories(db.stateFreezer, *id+1, dl.stateID()-*id, func(m *meta) error {
if m.parent != root {
return errors.New("unexpected state history")
}
@ -646,14 +648,14 @@ func (db *Database) Close() error {
dl.resetCache() // release the memory held by clean cache
// Terminate the background state history indexer
if db.indexer != nil {
db.indexer.close()
if db.stateIndexer != nil {
db.stateIndexer.close()
}
// Close the attached state history freezer.
if db.freezer == nil {
if db.stateFreezer == nil {
return nil
}
return db.freezer.Close()
return db.stateFreezer.Close()
}
// Size returns the current storage size of the memory cache in front of the
@ -704,7 +706,7 @@ func (db *Database) journalPath() string {
// End: State ID of the last history for the query. 0 implies the last available
// object is selected as the ending point. Note end is included in the query.
func (db *Database) AccountHistory(address common.Address, start, end uint64) (*HistoryStats, error) {
return accountHistory(db.freezer, address, start, end)
return accountHistory(db.stateFreezer, address, start, end)
}
// StorageHistory inspects the storage history within the specified range.
@ -717,22 +719,22 @@ func (db *Database) AccountHistory(address common.Address, start, end uint64) (*
//
// Note, slot refers to the hash of the raw slot key.
func (db *Database) StorageHistory(address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
return storageHistory(db.freezer, address, slot, start, end)
return storageHistory(db.stateFreezer, address, slot, start, end)
}
// HistoryRange returns the block numbers associated with earliest and latest
// state history in the local store.
func (db *Database) HistoryRange() (uint64, uint64, error) {
return historyRange(db.freezer)
return historyRange(db.stateFreezer)
}
// IndexProgress returns the indexing progress made so far. It provides the
// number of states that remain unindexed.
func (db *Database) IndexProgress() (uint64, error) {
if db.indexer == nil {
if db.stateIndexer == nil {
return 0, nil
}
return db.indexer.progress()
return db.stateIndexer.progress()
}
// AccountIterator creates a new account iterator for the specified root hash and

View file

@ -426,7 +426,7 @@ func (t *tester) verifyHistory() error {
for i, root := range t.roots {
// The state history related to the state above disk layer should not exist.
if i > bottom {
_, err := readHistory(t.db.freezer, uint64(i+1))
_, err := readStateHistory(t.db.stateFreezer, uint64(i+1))
if err == nil {
return errors.New("unexpected state history")
}
@ -434,7 +434,7 @@ func (t *tester) verifyHistory() error {
}
// The state history related to the state below or equal to the disk layer
// should exist.
obj, err := readHistory(t.db.freezer, uint64(i+1))
obj, err := readStateHistory(t.db.stateFreezer, uint64(i+1))
if err != nil {
return err
}
@ -568,7 +568,7 @@ func TestDisable(t *testing.T) {
t.Fatal("Failed to clean journal")
}
// Ensure all trie histories are removed
n, err := tester.db.freezer.Ancients()
n, err := tester.db.stateFreezer.Ancients()
if err != nil {
t.Fatal("Failed to clean state history")
}
@ -724,7 +724,7 @@ func TestTailTruncateHistory(t *testing.T) {
tester.db.Close()
tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}, false)
head, err := tester.db.freezer.Ancients()
head, err := tester.db.stateFreezer.Ancients()
if err != nil {
t.Fatalf("Failed to obtain freezer head")
}

View file

@ -337,16 +337,16 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
overflow bool
oldest uint64
)
if dl.db.freezer != nil {
if dl.db.stateFreezer != nil {
// Bail out with an error if writing the state history fails.
// This can happen, for example, if the device is full.
err := writeHistory(dl.db.freezer, bottom)
err := writeStateHistory(dl.db.stateFreezer, bottom)
if err != nil {
return nil, err
}
// Determine if the persisted history object has exceeded the configured
// limitation, set the overflow as true if so.
tail, err := dl.db.freezer.Tail()
tail, err := dl.db.stateFreezer.Tail()
if err != nil {
return nil, err
}
@ -356,8 +356,8 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation**
}
// Notify the state history indexer for newly created history
if dl.db.indexer != nil {
if err := dl.db.indexer.extend(bottom.stateID()); err != nil {
if dl.db.stateIndexer != nil {
if err := dl.db.stateIndexer.extend(bottom.stateID()); err != nil {
return nil, err
}
}
@ -418,7 +418,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// Freeze the live buffer and schedule background flushing
dl.frozen = combined
dl.frozen.flush(bottom.root, dl.db.diskdb, dl.db.freezer, progress, dl.nodes, dl.states, bottom.stateID(), func() {
dl.frozen.flush(bottom.root, dl.db.diskdb, dl.db.stateFreezer, progress, dl.nodes, dl.states, bottom.stateID(), func() {
// Resume the background generation if it's not completed yet.
// The generator is assumed to be available if the progress is
// not nil.
@ -448,7 +448,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// To remove outdated history objects from the end, we set the 'tail' parameter
// to 'oldest-1' due to the offset between the freezer index and the history ID.
if overflow {
pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1)
pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.stateFreezer, oldest-1)
if err != nil {
return nil, err
}
@ -458,7 +458,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
}
// revert applies the given state history and return a reverted disk layer.
func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) {
start := time.Now()
if h.meta.root != dl.rootHash() {
return nil, errUnexpectedHistory
@ -484,8 +484,8 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
dl.stale = true
// Unindex the corresponding state history
if dl.db.indexer != nil {
if err := dl.db.indexer.shorten(dl.id); err != nil {
if dl.db.stateIndexer != nil {
if err := dl.db.stateIndexer.shorten(dl.id); err != nil {
return nil, err
}
}

View file

@ -180,7 +180,7 @@ func TestBatchIndexerWrite(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
batch = newBatchIndexer(db, false)
histories = makeHistories(10)
histories = makeStateHistories(10)
)
for i, h := range histories {
if err := batch.process(h, uint64(i+1)); err != nil {
@ -257,7 +257,7 @@ func TestBatchIndexerDelete(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
bw = newBatchIndexer(db, false)
histories = makeHistories(10)
histories = makeStateHistories(10)
)
// Index histories
for i, h := range histories {

View file

@ -93,7 +93,7 @@ func newBatchIndexer(db ethdb.KeyValueStore, delete bool) *batchIndexer {
// process iterates through the accounts and their associated storage slots in the
// state history, tracking the mapping between state and history IDs.
func (b *batchIndexer) process(h *history, historyID uint64) error {
func (b *batchIndexer) process(h *stateHistory, historyID uint64) error {
for _, address := range h.accountList {
addrHash := crypto.Keccak256Hash(address.Bytes())
b.counter += 1
@ -241,7 +241,7 @@ func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancient
}
return fmt.Errorf("history indexing is out of order, last: %s, requested: %d", last, historyID)
}
h, err := readHistory(freezer, historyID)
h, err := readStateHistory(freezer, historyID)
if err != nil {
return err
}
@ -271,7 +271,7 @@ func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancie
}
return fmt.Errorf("history unindexing is out of order, last: %s, requested: %d", last, historyID)
}
h, err := readHistory(freezer, historyID)
h, err := readStateHistory(freezer, historyID)
if err != nil {
return err
}
@ -524,7 +524,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID
if count > historyReadBatch {
count = historyReadBatch
}
histories, err := readHistories(i.freezer, current, count)
histories, err := readStateHistories(i.freezer, current, count)
if err != nil {
// The history read might fall if the history is truncated from
// head due to revert operation.

View file

@ -32,7 +32,7 @@ func TestHistoryIndexerShortenDeadlock(t *testing.T) {
freezer, _ := rawdb.NewStateFreezer(t.TempDir(), false, false)
defer freezer.Close()
histories := makeHistories(100)
histories := makeStateHistories(100)
for i, h := range histories {
accountData, storageData, accountIndex, storageIndex := h.encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)

View file

@ -61,7 +61,7 @@ func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint
return first, last, nil
}
func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) {
func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*stateHistory, *HistoryStats)) (*HistoryStats, error) {
var (
stats = &HistoryStats{}
init = time.Now()
@ -74,7 +74,7 @@ func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory fu
for id := start; id <= end; id += 1 {
// The entire history object is decoded, although it's unnecessary for
// account inspection. TODO(rjl493456442) optimization is worthwhile.
h, err := readHistory(freezer, id)
h, err := readStateHistory(freezer, id)
if err != nil {
return nil, err
}
@ -98,7 +98,7 @@ func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory fu
// accountHistory inspects the account history within the range.
func accountHistory(freezer ethdb.AncientReader, address common.Address, start, end uint64) (*HistoryStats, error) {
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
return inspectHistory(freezer, start, end, func(h *stateHistory, stats *HistoryStats) {
blob, exists := h.accounts[address]
if !exists {
return
@ -111,7 +111,7 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
// storageHistory inspects the storage history within the range.
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
slotHash := crypto.Keccak256Hash(slot.Bytes())
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
return inspectHistory(freezer, start, end, func(h *stateHistory, stats *HistoryStats) {
slots, exists := h.storages[address]
if !exists {
return
@ -145,11 +145,11 @@ func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) {
}
last := head - 1
fh, err := readHistory(freezer, first)
fh, err := readStateHistory(freezer, first)
if err != nil {
return 0, 0, err
}
lh, err := readHistory(freezer, last)
lh, err := readStateHistory(freezer, last)
if err != nil {
return 0, 0, err
}

View file

@ -133,7 +133,7 @@ func testHistoryReader(t *testing.T, historyLimit uint64) {
var (
roots = env.roots
dRoot = env.db.tree.bottom().rootHash()
hr = newHistoryReader(env.db.diskdb, env.db.freezer)
hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer)
)
for _, root := range roots {
if root == dRoot {

View file

@ -234,12 +234,13 @@ func (m *meta) decode(blob []byte) error {
}
}
// history represents a set of state changes belong to a block along with
// stateHistory represents a set of state changes belong to a block along with
// the metadata including the state roots involved in the state transition.
//
// State history objects in disk are linked with each other by a unique id
// (8-bytes integer), the oldest state history object can be pruned on demand
// in order to control the storage size.
type history struct {
type stateHistory struct {
meta *meta // Meta data of history
accounts map[common.Address][]byte // Account data keyed by its address hash
accountList []common.Address // Sorted account hash list
@ -247,8 +248,8 @@ type history struct {
storageList map[common.Address][]common.Hash // Sorted slot hash list
}
// newHistory constructs the state history object with provided state change set.
func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
// newStateHistory constructs the state history object with provided states.
func newStateHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *stateHistory {
var (
accountList = slices.SortedFunc(maps.Keys(accounts), common.Address.Cmp)
storageList = make(map[common.Address][]common.Hash)
@ -260,7 +261,7 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
if !rawStorageKey {
version = stateHistoryV0
}
return &history{
return &stateHistory{
meta: &meta{
version: version,
parent: parent,
@ -276,7 +277,7 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
// stateSet returns the state set, keyed by the hash of the account address
// and the hash of the storage slot key.
func (h *history) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) {
func (h *stateHistory) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) {
var (
accounts = make(map[common.Hash][]byte)
storages = make(map[common.Hash]map[common.Hash][]byte)
@ -304,7 +305,7 @@ func (h *history) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common
// encode serializes the state history and returns four byte streams represent
// concatenated account/storage data, account/storage indexes respectively.
func (h *history) encode() ([]byte, []byte, []byte, []byte) {
func (h *stateHistory) encode() ([]byte, []byte, []byte, []byte) {
var (
slotNumber uint32 // the number of processed slots
accountData []byte // the buffer for concatenated account data
@ -459,7 +460,7 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
}
// decode deserializes the account and storage data from the provided byte stream.
func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
func (h *stateHistory) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
var (
count = len(accountIndexes) / accountIndexSize
accounts = make(map[common.Address][]byte, count)
@ -503,8 +504,8 @@ func (h *history) decode(accountData, storageData, accountIndexes, storageIndexe
return nil
}
// readHistory reads and decodes the state history object by the given id.
func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) {
// readStateHistory reads a single state history records with the specified id.
func readStateHistory(reader ethdb.AncientReader, id uint64) (*stateHistory, error) {
mData, accountIndexes, storageIndexes, accountData, storageData, err := rawdb.ReadStateHistory(reader, id)
if err != nil {
return nil, err
@ -513,17 +514,16 @@ func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) {
if err := m.decode(mData); err != nil {
return nil, err
}
h := history{meta: &m}
h := stateHistory{meta: &m}
if err := h.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
return nil, err
}
return &h, nil
}
// readHistories reads and decodes a list of state histories with the specific
// history range.
func readHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*history, error) {
var histories []*history
// readStateHistories reads a list of state history records within the specified range.
func readStateHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*stateHistory, error) {
var histories []*stateHistory
metaList, aIndexList, sIndexList, aDataList, sDataList, err := rawdb.ReadStateHistoryList(freezer, start, count)
if err != nil {
return nil, err
@ -533,7 +533,7 @@ func readHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*
if err := m.decode(metaList[i]); err != nil {
return nil, err
}
h := history{meta: &m}
h := stateHistory{meta: &m}
if err := h.decode(aDataList[i], sDataList[i], aIndexList[i], sIndexList[i]); err != nil {
return nil, err
}
@ -542,15 +542,15 @@ func readHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*
return histories, nil
}
// writeHistory persists the state history with the provided state set.
func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
// writeStateHistory persists the state history associated with the given diff layer.
func writeStateHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
// Short circuit if state set is not available.
if dl.states == nil {
return errors.New("state change set is not available")
}
var (
start = time.Now()
history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
history = newStateHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
)
accountData, storageData, accountIndex, storageIndex := history.encode()
dataSize := common.StorageSize(len(accountData) + len(storageData))
@ -568,9 +568,9 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
return nil
}
// checkHistories retrieves a batch of meta objects with the specified range
// checkStateHistories retrieves a batch of meta objects with the specified range
// and performs the callback on each item.
func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
func checkStateHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
for count > 0 {
number := count
if number > 10000 {

View file

@ -49,36 +49,36 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
return accounts, storages
}
func makeHistory(rawStorageKey bool) *history {
func makeStateHistory(rawStorageKey bool) *stateHistory {
accounts, storages := randomStateSet(3)
return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
return newStateHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
}
func makeHistories(n int) []*history {
func makeStateHistories(n int) []*stateHistory {
var (
parent = types.EmptyRootHash
result []*history
result []*stateHistory
)
for i := 0; i < n; i++ {
root := testrand.Hash()
accounts, storages := randomStateSet(3)
h := newHistory(root, parent, uint64(i), accounts, storages, false)
h := newStateHistory(root, parent, uint64(i), accounts, storages, false)
parent = root
result = append(result, h)
}
return result
}
func TestEncodeDecodeHistory(t *testing.T) {
testEncodeDecodeHistory(t, false)
testEncodeDecodeHistory(t, true)
func TestEncodeDecodeStateHistory(t *testing.T) {
testEncodeDecodeStateHistory(t, false)
testEncodeDecodeStateHistory(t, true)
}
func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
func testEncodeDecodeStateHistory(t *testing.T, rawStorageKey bool) {
var (
m meta
dec history
obj = makeHistory(rawStorageKey)
dec stateHistory
obj = makeStateHistory(rawStorageKey)
)
// check if meta data can be correctly encode/decode
blob := obj.meta.encode()
@ -108,7 +108,7 @@ func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
}
}
func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) {
func checkStateHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) {
blob := rawdb.ReadStateHistoryMeta(freezer, id)
if exist && len(blob) == 0 {
t.Fatalf("Failed to load trie history, %d", id)
@ -126,14 +126,14 @@ func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientRe
func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) {
for i, j := from, 0; i <= to; i, j = i+1, j+1 {
checkHistory(t, db, freezer, i, roots[j], exist)
checkStateHistory(t, db, freezer, i, roots[j], exist)
}
}
func TestTruncateHeadHistory(t *testing.T) {
func TestTruncateHeadStateHistory(t *testing.T) {
var (
roots []common.Hash
hs = makeHistories(10)
hs = makeStateHistories(10)
db = rawdb.NewMemoryDatabase()
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)
@ -158,10 +158,10 @@ func TestTruncateHeadHistory(t *testing.T) {
}
}
func TestTruncateTailHistory(t *testing.T) {
func TestTruncateTailStateHistory(t *testing.T) {
var (
roots []common.Hash
hs = makeHistories(10)
hs = makeStateHistories(10)
db = rawdb.NewMemoryDatabase()
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)
@ -183,7 +183,7 @@ func TestTruncateTailHistory(t *testing.T) {
}
}
func TestTruncateTailHistories(t *testing.T) {
func TestTruncateTailStateHistories(t *testing.T) {
var cases = []struct {
limit uint64
expPruned int
@ -204,7 +204,7 @@ func TestTruncateTailHistories(t *testing.T) {
for i, c := range cases {
var (
roots []common.Hash
hs = makeHistories(10)
hs = makeStateHistories(10)
db = rawdb.NewMemoryDatabase()
freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false, false)
)
@ -232,7 +232,7 @@ func TestTruncateTailHistories(t *testing.T) {
func TestTruncateOutOfRange(t *testing.T) {
var (
hs = makeHistories(10)
hs = makeStateHistories(10)
db = rawdb.NewMemoryDatabase()
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)

View file

@ -207,10 +207,10 @@ type HistoricalStateReader struct {
// HistoricReader constructs a reader for accessing the requested historic state.
func (db *Database) HistoricReader(root common.Hash) (*HistoricalStateReader, error) {
// Bail out if the state history hasn't been fully indexed
if db.indexer == nil || db.freezer == nil {
if db.stateIndexer == nil || db.stateFreezer == nil {
return nil, fmt.Errorf("historical state %x is not available", root)
}
if !db.indexer.inited() {
if !db.stateIndexer.inited() {
return nil, errors.New("state histories haven't been fully indexed yet")
}
// States at the current disk layer or above are directly accessible via
@ -230,7 +230,7 @@ func (db *Database) HistoricReader(root common.Hash) (*HistoricalStateReader, er
return &HistoricalStateReader{
id: *id,
db: db,
reader: newHistoryReader(db.diskdb, db.freezer),
reader: newHistoryReader(db.diskdb, db.stateFreezer),
}, nil
}