triedb/pathdb: add AdoptSyncedState for snap/2 completion path (#34874)

This PR adds `AdoptSyncedState()` alongside `Enable()`. It does the same
pathdb bookkeeping (now factored into a shared `resetForReactivation()`
helper), but skips the regeneration. The wiring/calling code lands in
#34626

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
This commit is contained in:
Jonny Rhea 2026-05-06 08:08:15 -05:00 committed by GitHub
parent b92c86deb7
commit 06c30cc7e1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 150 additions and 19 deletions

View file

@ -327,6 +327,16 @@ func (db *Database) Enable(root common.Hash) error {
return pdb.Enable(root)
}
// AdoptSyncedState activates the database after a snap/2 sync and adopts the
// flat state populated during sync as-is, skipping regeneration.
func (db *Database) AdoptSyncedState(root common.Hash) error {
pdb, ok := db.backend.(*pathdb.Database)
if !ok {
return errors.New("not supported")
}
return pdb.AdoptSyncedState(root)
}
// Journal commits an entire diff hierarchy to disk into a single journal entry.
// This is meant to be used during shutdown to persist the snapshot without
// flattening everything down (bad for reorgs). It's only supported by path-based

View file

@ -365,16 +365,9 @@ func (db *Database) Disable() error {
return nil
}
// Enable activates database and resets the state tree with the provided persistent
// state root once the state sync is finished.
func (db *Database) Enable(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if the database is in read only mode.
if db.readOnly {
return errDatabaseReadOnly
}
// resetForReactivation performs the pathdb-side bookkeeping shared by both
// Enable and AdoptSyncedState.
func (db *Database) resetForReactivation(root common.Hash) error {
// Ensure the provided state root matches the stored one.
stored, err := db.hasher(rawdb.ReadAccountTrieNode(db.diskdb, nil))
if err != nil {
@ -383,27 +376,40 @@ func (db *Database) Enable(root common.Hash) error {
if stored != root {
return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root)
}
// Drop the stale state journal in persistent database and
// reset the persistent state id back to zero.
// Drop the stale state journal marker and reset the persistent state id
// back to zero.
batch := db.diskdb.NewBatch()
rawdb.DeleteSnapshotRoot(batch)
rawdb.WritePersistentStateID(batch, 0)
if err := batch.Write(); err != nil {
return err
}
// Clean up all state histories in freezer. Theoretically
// all root->id mappings should be removed as well. Since
// mappings can be huge and might take a while to clear
// them, just leave them in disk and wait for overwriting.
// Clean up all state histories in the freezer. Theoretically all root->id
// mappings should be removed as well; since those can be huge, leave them
// on disk and let them be overwritten.
purgeHistory(db.stateFreezer, db.diskdb, typeStateHistory)
purgeHistory(db.trienodeFreezer, db.diskdb, typeTrienodeHistory)
// Re-enable the database as the final step.
// Re-enable the database as the final bookkeeping step.
db.waitSync = false
rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished)
return nil
}
// Re-construct a new disk layer backed by persistent state
// and schedule the state snapshot generation if it's permitted.
// Enable activates the database after a snap/1 sync and schedules background
// regeneration of the snapshot from the trie.
func (db *Database) Enable(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
if db.readOnly {
return errDatabaseReadOnly
}
if err := db.resetForReactivation(root); err != nil {
return err
}
// Re-construct a new disk layer backed by persistent state and schedule
// the state snapshot generation if it's permitted.
db.tree.init(generateSnapshot(db, root, db.isUBT || db.config.SnapshotNoBuild))
// After snap sync, the state of the database may have changed completely.
@ -416,6 +422,43 @@ func (db *Database) Enable(root common.Hash) error {
return nil
}
// AdoptSyncedState reactivates the database after a snap/2 sync. The syncer
// already wrote a consistent flat state, so we take it as-is instead of
// rebuilding it from the trie. The new disk layer has no generator attached,
// and a "done" marker is written so future boots know the snapshot is
// already complete.
func (db *Database) AdoptSyncedState(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
if db.readOnly {
return errDatabaseReadOnly
}
if err := db.resetForReactivation(root); err != nil {
return err
}
// Tell the snapshot subsystem the flat state is good by writing the new root
// and a "done" marker (nil journal) so the next boot doesn't try to rebuild it.
batch := db.diskdb.NewBatch()
rawdb.WriteSnapshotRoot(batch, root)
journalProgress(batch, nil, nil)
if err := batch.Write(); err != nil {
return err
}
// New disk layer, no generator attached. Nothing to rebuild, and reads
// can serve the flat state right away without waiting on a generator to
// scan past every key.
dl := newDiskLayer(root, 0, db, nil, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0), nil)
db.tree.init(dl)
db.setHistoryIndexer()
log.Info("Adopted synced state", "root", root)
return nil
}
// Recover rollbacks the database to a specified historical point.
// The state is supported as the rollback destination only if it's
// canonical state and the corresponding trie histories are existent.

View file

@ -748,6 +748,84 @@ func TestDisable(t *testing.T) {
}
}
// TestAdoptSyncedState verifies that AdoptSyncedState rejects a wrong root,
// writes the on-disk markers that say the snapshot is already complete,
// leaves a single fresh disk layer with no generator attached, and clears
// out stale state histories.
func TestAdoptSyncedState(t *testing.T) {
maxDiffLayers = 4
defer func() {
maxDiffLayers = 128
}()
tester := newTester(t, &testerConfig{layers: 12})
defer tester.release()
// Push everything down to disk so the trie root is the persistent root.
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
t.Fatalf("Failed to commit, err: %v", err)
}
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
// Mimic the snap-syncing state.
if err := tester.db.Disable(); err != nil {
t.Fatalf("Failed to disable database: %v", err)
}
// Mismatched root must be rejected.
if err := tester.db.AdoptSyncedState(types.EmptyRootHash); err == nil {
t.Fatal("Mismatched root should be rejected")
}
if err := tester.db.AdoptSyncedState(stored); err != nil {
t.Fatalf("AdoptSyncedState failed: %v", err)
}
// On-disk markers reflect a completed snapshot.
if got := rawdb.ReadSnapshotRoot(tester.db.diskdb); got != stored {
t.Fatalf("SnapshotRoot mismatch: got %x want %x", got, stored)
}
if blob := rawdb.ReadSnapshotGenerator(tester.db.diskdb); len(blob) == 0 {
t.Fatal("Generator journal not written")
} else {
var entry journalGenerator
if err := rlp.DecodeBytes(blob, &entry); err != nil {
t.Fatalf("Failed to decode generator journal: %v", err)
}
if !entry.Done {
t.Fatal("Generator journal should be marked Done")
}
// RLP turns a nil slice into an empty one on decode, so check length.
if len(entry.Marker) != 0 {
t.Fatalf("Generator marker should be empty, got %x", entry.Marker)
}
}
if rawdb.ReadSnapSyncStatusFlag(tester.db.diskdb) != rawdb.StateSyncFinished {
t.Fatal("Sync-status flag should be StateSyncFinished")
}
if tester.db.waitSync {
t.Fatal("waitSync should be false after adopt")
}
// State histories are purged.
if n, err := tester.db.stateFreezer.Ancients(); err != nil || n != 0 {
t.Fatalf("State histories not purged: count=%d err=%v", n, err)
}
// Layer tree has a single disk layer with no generator attached.
if got := tester.db.tree.len(); got != 1 {
t.Fatalf("Expected single layer, got %d", got)
}
dl := tester.db.tree.bottom()
if dl.rootHash() != stored {
t.Fatalf("Disk layer root mismatch: got %x want %x", dl.rootHash(), stored)
}
if dl.generator != nil {
t.Fatal("Disk layer should have no generator after adopt")
}
if dl.genMarker() != nil {
t.Fatal("genMarker should be nil after adopt")
}
}
func TestCommit(t *testing.T) {
// Redefine the diff layer depth allowance for faster testing.
maxDiffLayers = 4