core, ethdb, triedb: add batch close (#33708)

Pebble maintains a batch pool to recycle the batch object. Unfortunately
batch object must be
explicitly returned via `batch.Close` function. This PR extends the
batch interface by adding
the close function and also invoke batch.Close in some critical code
paths.

Memory allocation must be measured before merging this change. What's
more, it's an open
question that whether we should apply batch.Close as much as possible in
every invocation.
This commit is contained in:
rjl493456442 2026-03-04 18:17:47 +08:00 committed by GitHub
parent 814edc5308
commit dd202d4283
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 30 additions and 0 deletions

View file

@ -1283,6 +1283,8 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head
batch := bc.db.NewBatch()
defer batch.Close()
rawdb.WriteHeadHeaderHash(batch, block.Hash())
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
@ -1657,6 +1659,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
batch = bc.db.NewBatch()
start = time.Now()
)
defer batch.Close()
rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(batch, statedb.Preimages())
@ -2666,6 +2670,8 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error
// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
batch := bc.db.NewBatch()
defer batch.Close()
for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) {
rawdb.DeleteTxLookupEntry(batch, tx)
}

View file

@ -253,6 +253,11 @@ func (b *tableBatch) Reset() {
b.batch.Reset()
}
// Close closes the batch and releases all associated resources.
func (b *tableBatch) Close() {
b.batch.Close()
}
// tableReplayer is a wrapper around a batch replayer which truncates
// the added prefix.
type tableReplayer struct {

View file

@ -1342,6 +1342,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
if err := batch.Write(); err != nil {
return nil, err
}
batch.Close()
}
if !ret.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version

View file

@ -37,6 +37,9 @@ type Batch interface {
// Replay replays the batch contents.
Replay(w KeyValueWriter) error
// Close closes the batch and releases all associated resources.
Close()
}
// Batcher wraps the NewBatch method of a backing data store.

View file

@ -518,6 +518,9 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return b.b.Replay(&replayer{writer: w})
}
// Close closes the batch and releases all associated resources.
func (b *batch) Close() {}
// replayer is a small wrapper to implement the correct replay methods.
type replayer struct {
writer ethdb.KeyValueWriter

View file

@ -338,6 +338,9 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return nil
}
// Close closes the batch and releases all associated resources.
func (b *batch) Close() {}
// iterator can walk over the (potentially partial) keyspace of a memory key
// value store. Internally it is a deep copy of the entire iterated state,
// sorted by keys.

View file

@ -731,6 +731,12 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
}
}
// Close closes the batch and releases all associated resources. After it is
// closed, any subsequent operations on this batch are undefined.
func (b *batch) Close() {
b.b.Close()
}
// pebbleIterator is a wrapper of underlying iterator in storage engine.
// The purpose of this structure is to implement the missing APIs.
//

View file

@ -880,6 +880,7 @@ func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {}
func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
func (b *spongeBatch) Close() {}
// TestCommitSequence tests that the trie.Commit operation writes the elements
// of the trie in the expected order.

View file

@ -180,6 +180,8 @@ func (b *buffer) flush(root common.Hash, db ethdb.KeyValueStore, freezers []ethd
b.flushErr = err
return
}
batch.Close()
commitBytesMeter.Mark(int64(size))
commitNodesMeter.Mark(int64(nodes))
commitAccountsMeter.Mark(int64(accounts))