fix: use the bottom layer's block instead

Signed-off-by: jsvisa <delweng@gmail.com>
This commit is contained in:
jsvisa 2025-09-12 01:54:36 +00:00
parent ddc42b8376
commit aff6811e09
5 changed files with 45 additions and 17 deletions

View file

@ -697,7 +697,7 @@ func (db *Database) SnapshotCompleted() bool {
func (db *Database) FrezzerTailBlock() (uint64, error) { func (db *Database) FrezzerTailBlock() (uint64, error) {
freezer := db.stateFreezer freezer := db.stateFreezer
if freezer == nil { if freezer == nil {
return 0, errors.New("freezer is not available") return db.tree.bottom().blockNumber(), nil
} }
tailID, err := freezer.Tail() tailID, err := freezer.Tail()
@ -705,9 +705,9 @@ func (db *Database) FrezzerTailBlock() (uint64, error) {
return 0, err return 0, err
} }
// No state has been persistent, return the genesis block number. // No state has been persistent, get the block number from in-memory state.
if tailID == 0 { if tailID == 0 {
return 0, nil return db.tree.bottom().blockNumber(), nil
} }
blob := rawdb.ReadStateHistoryMeta(freezer, tailID+1) blob := rawdb.ReadStateHistoryMeta(freezer, tailID+1)

View file

@ -31,9 +31,10 @@ import (
// diskLayer is a low level persistent layer built on top of a key-value store. // diskLayer is a low level persistent layer built on top of a key-value store.
type diskLayer struct { type diskLayer struct {
root common.Hash // Immutable, root hash to which this layer was made for root common.Hash // Immutable, root hash to which this layer was made for
id uint64 // Immutable, corresponding state id id uint64 // Immutable, corresponding state id
db *Database // Path-based trie database block uint64 // Immutable, associated block number
db *Database // Path-based trie database
// These two caches must be maintained separately, because the key // These two caches must be maintained separately, because the key
// for the root node of the storage trie (accountHash) is identical // for the root node of the storage trie (accountHash) is identical
@ -54,7 +55,7 @@ type diskLayer struct {
} }
// newDiskLayer creates a new disk layer based on the passing arguments. // newDiskLayer creates a new disk layer based on the passing arguments.
func newDiskLayer(root common.Hash, id uint64, db *Database, nodes *fastcache.Cache, states *fastcache.Cache, buffer *buffer, frozen *buffer) *diskLayer { func newDiskLayer(root common.Hash, id uint64, block uint64, db *Database, nodes *fastcache.Cache, states *fastcache.Cache, buffer *buffer, frozen *buffer) *diskLayer {
// Initialize the clean caches if the memory allowance is not zero // Initialize the clean caches if the memory allowance is not zero
// or reuse the provided caches if they are not nil (inherited from // or reuse the provided caches if they are not nil (inherited from
// the original disk layer). // the original disk layer).
@ -67,6 +68,7 @@ func newDiskLayer(root common.Hash, id uint64, db *Database, nodes *fastcache.Ca
return &diskLayer{ return &diskLayer{
root: root, root: root,
id: id, id: id,
block: block,
db: db, db: db,
nodes: nodes, nodes: nodes,
states: states, states: states,
@ -85,6 +87,11 @@ func (dl *diskLayer) stateID() uint64 {
return dl.id return dl.id
} }
// blockNumber returns the associated block number of disk layer.
func (dl *diskLayer) blockNumber() uint64 {
return dl.block
}
// parentLayer implements the layer interface, returning nil as there's no layer // parentLayer implements the layer interface, returning nil as there's no layer
// below the disk. // below the disk.
func (dl *diskLayer) parentLayer() layer { func (dl *diskLayer) parentLayer() layer {
@ -471,7 +478,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
combined = newBuffer(dl.db.config.WriteBufferSize, nil, nil, 0) combined = newBuffer(dl.db.config.WriteBufferSize, nil, nil, 0)
} }
// Link the generator if snapshot is not yet completed // Link the generator if snapshot is not yet completed
ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.nodes, dl.states, combined, dl.frozen) ndl := newDiskLayer(bottom.root, bottom.stateID(), bottom.block, dl.db, dl.nodes, dl.states, combined, dl.frozen)
if dl.generator != nil { if dl.generator != nil {
ndl.setGenerator(dl.generator) ndl.setGenerator(dl.generator)
} }
@ -520,7 +527,7 @@ func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
ndl := newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.nodes, dl.states, dl.buffer, dl.frozen) ndl := newDiskLayer(h.meta.parent, dl.id-1, h.meta.block-1, dl.db, dl.nodes, dl.states, dl.buffer, dl.frozen)
// Link the generator if it exists // Link the generator if it exists
if dl.generator != nil { if dl.generator != nil {
@ -559,7 +566,7 @@ func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) {
} }
// Link the generator and resume generation if the snapshot is not yet // Link the generator and resume generation if the snapshot is not yet
// fully completed. // fully completed.
ndl := newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.nodes, dl.states, dl.buffer, dl.frozen) ndl := newDiskLayer(h.meta.parent, dl.id-1, h.meta.block-1, dl.db, dl.nodes, dl.states, dl.buffer, dl.frozen)
if dl.generator != nil && !dl.generator.completed() { if dl.generator != nil && !dl.generator.completed() {
ndl.generator = dl.generator ndl.generator = dl.generator
ndl.generator.run(h.meta.parent) ndl.generator.run(h.meta.parent)

View file

@ -186,7 +186,7 @@ func generateSnapshot(triedb *Database, root common.Hash, noBuild bool) *diskLay
stats = &generatorStats{start: time.Now()} stats = &generatorStats{start: time.Now()}
genMarker = []byte{} // Initialized but empty! genMarker = []byte{} // Initialized but empty!
) )
dl := newDiskLayer(root, 0, triedb, nil, nil, newBuffer(triedb.config.WriteBufferSize, nil, nil, 0), nil) dl := newDiskLayer(root, 0, 0, triedb, nil, nil, newBuffer(triedb.config.WriteBufferSize, nil, nil, 0), nil)
dl.setGenerator(newGenerator(triedb.diskdb, noBuild, genMarker, stats)) dl.setGenerator(newGenerator(triedb.diskdb, noBuild, genMarker, stats))
if !noBuild { if !noBuild {

View file

@ -177,7 +177,19 @@ func (db *Database) loadLayers() layer {
log.Info("Failed to load journal, discard it", "err", err) log.Info("Failed to load journal, discard it", "err", err)
} }
// Return single layer with persistent state. // Return single layer with persistent state.
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0), nil) stateID := rawdb.ReadPersistentStateID(db.diskdb)
var block uint64
if stateID > 0 {
// Try to get block number from state history metadata
blob := rawdb.ReadStateHistoryMeta(db.diskdb, stateID)
if blob != nil {
var m meta
if err := m.decode(blob); err == nil {
block = m.block
}
}
}
return newDiskLayer(root, stateID, block, db, nil, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0), nil)
} }
// loadDiskLayer reads the binary blob from the layer journal, reconstructing // loadDiskLayer reads the binary blob from the layer journal, reconstructing
@ -195,6 +207,11 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
if err := r.Decode(&id); err != nil { if err := r.Decode(&id); err != nil {
return nil, fmt.Errorf("load state id: %v", err) return nil, fmt.Errorf("load state id: %v", err)
} }
// Resolve the block number of disk layer
var block uint64
if err := r.Decode(&block); err != nil {
return nil, fmt.Errorf("load block number: %v", err)
}
stored := rawdb.ReadPersistentStateID(db.diskdb) stored := rawdb.ReadPersistentStateID(db.diskdb)
if stored > id { if stored > id {
return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id) return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id)
@ -209,7 +226,7 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
if err := states.decode(r); err != nil { if err := states.decode(r); err != nil {
return nil, err return nil, err
} }
return newDiskLayer(root, id, db, nil, nil, newBuffer(db.config.WriteBufferSize, &nodes, &states, id-stored), nil), nil return newDiskLayer(root, id, block, db, nil, nil, newBuffer(db.config.WriteBufferSize, &nodes, &states, id-stored), nil), nil
} }
// loadDiffLayer reads the next sections of a layer journal, reconstructing a new // loadDiffLayer reads the next sections of a layer journal, reconstructing a new
@ -259,15 +276,19 @@ func (dl *diskLayer) journal(w io.Writer) error {
if err := rlp.Encode(w, dl.id); err != nil { if err := rlp.Encode(w, dl.id); err != nil {
return err return err
} }
// Step three, write the accumulated trie nodes into the journal // Step three, write the corresponding block number into the journal
if err := rlp.Encode(w, dl.block); err != nil {
return err
}
// Step four, write the accumulated trie nodes into the journal
if err := dl.buffer.nodes.encode(w); err != nil { if err := dl.buffer.nodes.encode(w); err != nil {
return err return err
} }
// Step four, write the accumulated flat states into the journal // Step five, write the accumulated flat states into the journal
if err := dl.buffer.states.encode(w); err != nil { if err := dl.buffer.states.encode(w); err != nil {
return err return err
} }
log.Debug("Journaled pathdb disk layer", "root", dl.root, "id", dl.id) log.Debug("Journaled pathdb disk layer", "root", dl.root, "id", dl.id, "block", dl.block)
return nil return nil
} }

View file

@ -26,7 +26,7 @@ import (
func newTestLayerTree() *layerTree { func newTestLayerTree() *layerTree {
db := New(rawdb.NewMemoryDatabase(), nil, false) db := New(rawdb.NewMemoryDatabase(), nil, false)
l := newDiskLayer(common.Hash{0x1}, 0, db, nil, nil, newBuffer(0, nil, nil, 0), nil) l := newDiskLayer(common.Hash{0x1}, 0, 0, db, nil, nil, newBuffer(0, nil, nil, 0), nil)
t := newLayerTree(l) t := newLayerTree(l)
return t return t
} }