core/rawdb: integrate eradb backend for RPC (#31604)

This implements a backing store for chain history based on era1 files.
The new store is integrated with the freezer. Queries for blocks and receipts
below the current freezer tail are handled by the era store.

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: lightclient <lightclient@protonmail.com>
This commit is contained in:
Sina M 2025-06-03 10:47:38 +02:00 committed by GitHub
parent c87b856c1a
commit a7d9b52eaf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
34 changed files with 774 additions and 271 deletions

View file

@ -45,6 +45,7 @@ import (
"github.com/ethereum/go-ethereum/internal/era/eradl" "github.com/ethereum/go-ethereum/internal/era/eradl"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -277,10 +278,7 @@ func initGenesis(ctx *cli.Context) error {
overrides.OverrideVerkle = &v overrides.OverrideVerkle = &v
} }
chaindb, err := stack.OpenDatabaseWithFreezer("chaindata", 0, 0, ctx.String(utils.AncientFlag.Name), "", false) chaindb := utils.MakeChainDatabase(ctx, stack, false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
defer chaindb.Close() defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
@ -317,7 +315,7 @@ func dumpGenesis(ctx *cli.Context) error {
// dump whatever already exists in the datadir // dump whatever already exists in the datadir
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
db, err := stack.OpenDatabase("chaindata", 0, 0, "", true) db, err := stack.OpenDatabaseWithOptions("chaindata", node.DatabaseOptions{ReadOnly: true})
if err != nil { if err != nil {
return err return err
} }

View file

@ -111,6 +111,11 @@ var (
Usage: "Root directory for ancient data (default = inside chaindata)", Usage: "Root directory for ancient data (default = inside chaindata)",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
EraFlag = &flags.DirectoryFlag{
Name: "datadir.era",
Usage: "Root directory for era1 history (default = inside ancient/chain)",
Category: flags.EthCategory,
}
MinFreeDiskSpaceFlag = &flags.DirectoryFlag{ MinFreeDiskSpaceFlag = &flags.DirectoryFlag{
Name: "datadir.minfreedisk", Name: "datadir.minfreedisk",
Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)", Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)",
@ -977,6 +982,7 @@ var (
DatabaseFlags = []cli.Flag{ DatabaseFlags = []cli.Flag{
DataDirFlag, DataDirFlag,
AncientFlag, AncientFlag,
EraFlag,
RemoteDBFlag, RemoteDBFlag,
DBEngineFlag, DBEngineFlag,
StateSchemeFlag, StateSchemeFlag,
@ -1613,6 +1619,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(AncientFlag.Name) { if ctx.IsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.String(AncientFlag.Name) cfg.DatabaseFreezer = ctx.String(AncientFlag.Name)
} }
if ctx.IsSet(EraFlag.Name) {
cfg.DatabaseEra = ctx.String(EraFlag.Name)
}
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
@ -2082,7 +2091,15 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
} }
chainDb = remotedb.New(client) chainDb = remotedb.New(client)
default: default:
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "eth/db/chaindata/", readonly) options := node.DatabaseOptions{
ReadOnly: readonly,
Cache: cache,
Handles: handles,
AncientsDirectory: ctx.String(AncientFlag.Name),
MetricsNamespace: "eth/db/chaindata/",
EraDirectory: ctx.String(EraFlag.Name),
}
chainDb, err = stack.OpenDatabaseWithOptions("chaindata", options)
} }
if err != nil { if err != nil {
Fatalf("Could not open database: %v", err) Fatalf("Could not open database: %v", err)

View file

@ -158,7 +158,7 @@ func TestHistoryImportAndExport(t *testing.T) {
} }
// Now import Era. // Now import Era.
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db2, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -47,30 +47,37 @@ func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] {
// Add adds a value to the cache. Returns true if an item was evicted to store the new item. // Add adds a value to the cache. Returns true if an item was evicted to store the new item.
func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) { func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) {
_, _, evicted = c.Add3(key, value)
return evicted
}
// Add3 adds a value to the cache. If an item was evicted to store the new one, it returns the evicted item.
func (c *BasicLRU[K, V]) Add3(key K, value V) (ek K, ev V, evicted bool) {
item, ok := c.items[key] item, ok := c.items[key]
if ok { if ok {
// Already exists in cache.
item.value = value item.value = value
c.items[key] = item c.items[key] = item
c.list.moveToFront(item.elem) c.list.moveToFront(item.elem)
return false return ek, ev, false
} }
var elem *listElem[K] var elem *listElem[K]
if c.Len() >= c.cap { if c.Len() >= c.cap {
elem = c.list.removeLast() elem = c.list.removeLast()
delete(c.items, elem.v)
evicted = true evicted = true
ek = elem.v
ev = c.items[ek].value
delete(c.items, ek)
} else { } else {
elem = new(listElem[K]) elem = new(listElem[K])
} }
// Store the new item. // Store the new item.
// Note that, if another item was evicted, we re-use its list element here. // Note that if another item was evicted, we re-use its list element here.
elem.v = key elem.v = key
c.items[key] = cacheItem[K, V]{elem, value} c.items[key] = cacheItem[K, V]{elem, value}
c.list.pushElem(elem) c.list.pushElem(elem)
return evicted return ek, ev, evicted
} }
// Contains reports whether the given key exists in the cache. // Contains reports whether the given key exists in the cache.

View file

@ -1769,7 +1769,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }
@ -1854,7 +1854,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
if err != nil { if err != nil {
t.Fatalf("Failed to reopen persistent key-value database: %v", err) t.Fatalf("Failed to reopen persistent key-value database: %v", err)
} }
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err = rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to reopen persistent freezer database: %v", err) t.Fatalf("Failed to reopen persistent freezer database: %v", err)
} }
@ -1919,7 +1919,7 @@ func testIssue23496(t *testing.T, scheme string) {
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }
@ -1979,7 +1979,7 @@ func testIssue23496(t *testing.T, scheme string) {
if err != nil { if err != nil {
t.Fatalf("Failed to reopen persistent key-value database: %v", err) t.Fatalf("Failed to reopen persistent key-value database: %v", err)
} }
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err = rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to reopen persistent freezer database: %v", err) t.Fatalf("Failed to reopen persistent freezer database: %v", err)
} }

View file

@ -1973,7 +1973,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }

View file

@ -70,7 +70,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }
@ -265,7 +265,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
newdb, err := rawdb.NewDatabaseWithFreezer(pdb, snaptest.ancient, "", false) newdb, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: snaptest.ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }

View file

@ -738,7 +738,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
t.Fatalf("failed to insert receipt %d: %v", n, err) t.Fatalf("failed to insert receipt %d: %v", n, err)
} }
// Freezer style fast import the chain. // Freezer style fast import the chain.
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) ancientDb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
@ -824,7 +824,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// makeDb creates a db instance for testing. // makeDb creates a db instance for testing.
makeDb := func() ethdb.Database { makeDb := func() ethdb.Database {
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
@ -1623,7 +1623,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
// Import the shared chain and the original canonical one // Import the shared chain and the original canonical one
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
defer db.Close() defer db.Close()
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil) chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
@ -1689,7 +1689,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
// Import the chain as a ancient-first node and ensure all pointers are updated // Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) ancientDb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
if err != nil { if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
@ -1747,7 +1747,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
}) })
// Import the canonical chain // Import the canonical chain
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) diskdb, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
defer diskdb.Close() defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil) chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
@ -1959,7 +1959,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
b.OffsetTime(-9) // A higher difficulty b.OffsetTime(-9) // A higher difficulty
}) })
// Import the shared chain and the original canonical one // Import the shared chain and the original canonical one
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) chaindb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
@ -2122,7 +2122,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
} }
}) })
// Import the shared chain and the original canonical one // Import the shared chain and the original canonical one
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) chaindb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
@ -2496,7 +2496,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err) t.Fatalf("Failed to create persistent key-value database: %v", err)
} }
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false) db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
if err != nil { if err != nil {
t.Fatalf("Failed to create persistent freezer database: %v", err) t.Fatalf("Failed to create persistent freezer database: %v", err)
} }
@ -3403,7 +3403,7 @@ func testSetCanonical(t *testing.T, scheme string) {
} }
gen.AddTx(tx) gen.AddTx(tx)
}) })
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) diskdb, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
defer diskdb.Close() defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil) chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
@ -4199,7 +4199,7 @@ func testChainReorgSnapSync(t *testing.T, ancientLimit uint64) {
gen.SetCoinbase(common.Address{0: byte(0xb), 19: byte(i)}) gen.SetCoinbase(common.Address{0: byte(0xb), 19: byte(i)})
}) })
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
defer db.Close() defer db.Close()
chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil) chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
@ -4315,7 +4315,7 @@ func testInsertChainWithCutoff(t *testing.T, cutoff uint64, ancientLimit uint64,
config := DefaultCacheConfigWithScheme(rawdb.PathScheme) config := DefaultCacheConfigWithScheme(rawdb.PathScheme)
config.ChainHistoryMode = history.KeepPostMerge config.ChainHistoryMode = history.KeepPostMerge
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
defer db.Close() defer db.Close()
chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil) chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()

View file

@ -416,7 +416,7 @@ func checkReceiptsRLP(have, want types.Receipts) error {
func TestAncientStorage(t *testing.T) { func TestAncientStorage(t *testing.T) {
// Freezer style fast import the chain. // Freezer style fast import the chain.
frdir := t.TempDir() frdir := t.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
if err != nil { if err != nil {
t.Fatalf("failed to create database with ancient backend") t.Fatalf("failed to create database with ancient backend")
} }
@ -469,7 +469,7 @@ func TestAncientStorage(t *testing.T) {
} }
func TestWriteAncientHeaderChain(t *testing.T) { func TestWriteAncientHeaderChain(t *testing.T) {
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), t.TempDir(), "", false) db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: t.TempDir()})
if err != nil { if err != nil {
t.Fatalf("failed to create database with ancient backend") t.Fatalf("failed to create database with ancient backend")
} }
@ -586,7 +586,7 @@ func TestHashesInRange(t *testing.T) {
func BenchmarkWriteAncientBlocks(b *testing.B) { func BenchmarkWriteAncientBlocks(b *testing.B) {
// Open freezer database. // Open freezer database.
frdir := b.TempDir() frdir := b.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
if err != nil { if err != nil {
b.Fatalf("failed to create database with ancient backend") b.Fatalf("failed to create database with ancient backend")
} }
@ -890,7 +890,7 @@ func TestHeadersRLPStorage(t *testing.T) {
// Have N headers in the freezer // Have N headers in the freezer
frdir := t.TempDir() frdir := t.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
if err != nil { if err != nil {
t.Fatalf("failed to create database with ancient backend") t.Fatalf("failed to create database with ancient backend")
} }

View file

@ -77,13 +77,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
} }
for _, c := range cases { for _, c := range cases {
for i := c.start; i < c.limit; i++ { for i := c.start; i < c.limit; i++ {
exist, err := db.HasAncient("a", uint64(i))
if err != nil {
t.Fatalf("Failed to check presence, %v", err)
}
if exist {
t.Fatalf("Item %d is already truncated", uint64(i))
}
_, err = db.Ancient("a", uint64(i)) _, err = db.Ancient("a", uint64(i))
if err == nil { if err == nil {
t.Fatal("Error is expected for non-existent item") t.Fatal("Error is expected for non-existent item")
@ -93,13 +86,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
// Test the items in range should be reachable // Test the items in range should be reachable
for i := 10; i < 90; i++ { for i := 10; i < 90; i++ {
exist, err := db.HasAncient("a", uint64(i))
if err != nil {
t.Fatalf("Failed to check presence, %v", err)
}
if !exist {
t.Fatalf("Item %d is missing", uint64(i))
}
blob, err := db.Ancient("a", uint64(i)) blob, err := db.Ancient("a", uint64(i))
if err != nil { if err != nil {
t.Fatalf("Failed to retrieve item, %v", err) t.Fatalf("Failed to retrieve item, %v", err)
@ -110,13 +96,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
} }
// Test the items in unknown table shouldn't be reachable // Test the items in unknown table shouldn't be reachable
exist, err := db.HasAncient("b", uint64(0))
if err != nil {
t.Fatalf("Failed to check presence, %v", err)
}
if exist {
t.Fatal("Item in unknown table shouldn't be found")
}
_, err = db.Ancient("b", uint64(0)) _, err = db.Ancient("b", uint64(0))
if err == nil { if err == nil {
t.Fatal("Error is expected for unknown table") t.Fatal("Error is expected for unknown table")

View file

@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb/eradb"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -43,7 +44,10 @@ const (
// feature. The background thread will keep moving ancient chain segments from // feature. The background thread will keep moving ancient chain segments from
// key-value database to flat files for saving space on live database. // key-value database to flat files for saving space on live database.
type chainFreezer struct { type chainFreezer struct {
ethdb.AncientStore // Ancient store for storing cold chain segment ancients ethdb.AncientStore // Ancient store for storing cold chain segment
// Optional Era database used as a backup for the pruned chain.
eradb *eradb.Store
quit chan struct{} quit chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
@ -56,23 +60,27 @@ type chainFreezer struct {
// state freezer (e.g. dev mode). // state freezer (e.g. dev mode).
// - if non-empty directory is given, initializes the regular file-based // - if non-empty directory is given, initializes the regular file-based
// state freezer. // state freezer.
func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) { func newChainFreezer(datadir string, eraDir string, namespace string, readonly bool) (*chainFreezer, error) {
var (
err error
freezer ethdb.AncientStore
)
if datadir == "" { if datadir == "" {
freezer = NewMemoryFreezer(readonly, chainFreezerTableConfigs) return &chainFreezer{
} else { ancients: NewMemoryFreezer(readonly, chainFreezerTableConfigs),
freezer, err = NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerTableConfigs) quit: make(chan struct{}),
trigger: make(chan chan struct{}),
}, nil
} }
freezer, err := NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerTableConfigs)
if err != nil {
return nil, err
}
edb, err := eradb.New(resolveChainEraDir(datadir, eraDir))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &chainFreezer{ return &chainFreezer{
AncientStore: freezer, ancients: freezer,
quit: make(chan struct{}), eradb: edb,
trigger: make(chan chan struct{}), quit: make(chan struct{}),
trigger: make(chan chan struct{}),
}, nil }, nil
} }
@ -84,7 +92,11 @@ func (f *chainFreezer) Close() error {
close(f.quit) close(f.quit)
} }
f.wg.Wait() f.wg.Wait()
return f.AncientStore.Close()
if f.eradb != nil {
f.eradb.Close()
}
return f.ancients.Close()
} }
// readHeadNumber returns the number of chain head block. 0 is returned if the // readHeadNumber returns the number of chain head block. 0 is returned if the
@ -334,3 +346,75 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
}) })
return hashes, err return hashes, err
} }
// Ancient retrieves an ancient binary blob from the append-only immutable files.
func (f *chainFreezer) Ancient(kind string, number uint64) ([]byte, error) {
// Lookup the entry in the underlying ancient store, assuming that
// headers and hashes are always available.
if kind == ChainFreezerHeaderTable || kind == ChainFreezerHashTable {
return f.ancients.Ancient(kind, number)
}
tail, err := f.ancients.Tail()
if err != nil {
return nil, err
}
// Lookup the entry in the underlying ancient store if it's not pruned
if number >= tail {
return f.ancients.Ancient(kind, number)
}
// Lookup the entry in the optional era backend
if f.eradb == nil {
return nil, errOutOfBounds
}
switch kind {
case ChainFreezerBodiesTable:
return f.eradb.GetRawBody(number)
case ChainFreezerReceiptTable:
return f.eradb.GetRawReceipts(number)
}
return nil, errUnknownTable
}
// ReadAncients executes an operation while preventing mutations to the freezer,
// i.e. if fn performs multiple reads, they will be consistent with each other.
func (f *chainFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
if store, ok := f.ancients.(*Freezer); ok {
store.writeLock.Lock()
defer store.writeLock.Unlock()
}
return fn(f)
}
// Methods below are just pass-through to the underlying ancient store.
func (f *chainFreezer) Ancients() (uint64, error) {
return f.ancients.Ancients()
}
func (f *chainFreezer) Tail() (uint64, error) {
return f.ancients.Tail()
}
func (f *chainFreezer) AncientSize(kind string) (uint64, error) {
return f.ancients.AncientSize(kind)
}
func (f *chainFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
return f.ancients.AncientRange(kind, start, count, maxBytes)
}
func (f *chainFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
return f.ancients.ModifyAncients(fn)
}
func (f *chainFreezer) TruncateHead(items uint64) (uint64, error) {
return f.ancients.TruncateHead(items)
}
func (f *chainFreezer) TruncateTail(items uint64) (uint64, error) {
return f.ancients.TruncateTail(items)
}
func (f *chainFreezer) SyncAncient() error {
return f.ancients.SyncAncient()
}

View file

@ -86,11 +86,6 @@ type nofreezedb struct {
ethdb.KeyValueStore ethdb.KeyValueStore
} }
// HasAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
return false, errNotSupported
}
// Ancient returns an error as we don't have a backing chain freezer. // Ancient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errNotSupported return nil, errNotSupported
@ -186,19 +181,49 @@ func resolveChainFreezerDir(ancient string) string {
return freezer return freezer
} }
// NewDatabaseWithFreezer creates a high level database on top of a given key- // resolveChainEraDir is a helper function which resolves the absolute path of era database.
// value data store with a freezer moving immutable chain segments into cold func resolveChainEraDir(chainFreezerDir string, era string) string {
// storage. The passed ancient indicates the path of root ancient directory switch {
// where the chain freezer can be opened. case era == "":
return filepath.Join(chainFreezerDir, "era")
case !filepath.IsAbs(era):
return filepath.Join(chainFreezerDir, era)
default:
return era
}
}
// NewDatabaseWithFreezer creates a high level database on top of a given key-value store.
// The passed ancient indicates the path of root ancient directory where the chain freezer
// can be opened.
//
// Deprecated: use Open.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
return Open(db, OpenOptions{
Ancient: ancient,
MetricsNamespace: namespace,
ReadOnly: readonly,
})
}
// OpenOptions specifies options for opening the database.
type OpenOptions struct {
Ancient string // ancients directory
Era string // era files directory
MetricsNamespace string // prefix added to freezer metric names
ReadOnly bool
}
// Open creates a high-level database wrapper for the given key-value store.
func Open(db ethdb.KeyValueStore, opts OpenOptions) (ethdb.Database, error) {
// Create the idle freezer instance. If the given ancient directory is empty, // Create the idle freezer instance. If the given ancient directory is empty,
// in-memory chain freezer is used (e.g. dev mode); otherwise the regular // in-memory chain freezer is used (e.g. dev mode); otherwise the regular
// file-based freezer is created. // file-based freezer is created.
chainFreezerDir := ancient chainFreezerDir := opts.Ancient
if chainFreezerDir != "" { if chainFreezerDir != "" {
chainFreezerDir = resolveChainFreezerDir(chainFreezerDir) chainFreezerDir = resolveChainFreezerDir(chainFreezerDir)
} }
frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly) frdb, err := newChainFreezer(chainFreezerDir, opts.Era, opts.MetricsNamespace, opts.ReadOnly)
if err != nil { if err != nil {
printChainMetadata(db) printChainMetadata(db)
return nil, err return nil, err
@ -282,7 +307,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
} }
} }
// Freezer is consistent with the key-value database, permit combining the two // Freezer is consistent with the key-value database, permit combining the two
if !readonly { if !opts.ReadOnly {
frdb.wg.Add(1) frdb.wg.Add(1)
go func() { go func() {
frdb.freeze(db) frdb.freeze(db)
@ -290,7 +315,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
}() }()
} }
return &freezerdb{ return &freezerdb{
ancientRoot: ancient, ancientRoot: opts.Ancient,
KeyValueStore: db, KeyValueStore: db,
chainFreezer: frdb, chainFreezer: frdb,
}, nil }, nil

345
core/rawdb/eradb/eradb.go Normal file
View file

@ -0,0 +1,345 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package eradb implements a history backend using era1 files.
package eradb
import (
"bytes"
"errors"
"fmt"
"io/fs"
"path/filepath"
"sync"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
const openFileLimit = 64
var errClosed = errors.New("era store is closed")
// Store manages read access to a directory of era1 files.
// The getter methods are thread-safe.
type Store struct {
datadir string
// The mutex protects all remaining fields.
mu sync.Mutex
cond *sync.Cond
lru lru.BasicLRU[uint64, *fileCacheEntry]
opening map[uint64]*fileCacheEntry
closing bool
}
type fileCacheEntry struct {
refcount int // reference count. This is protected by Store.mu!
opened chan struct{} // signals opening of file has completed
file *era.Era // the file
err error // error from opening the file
}
type fileCacheStatus byte
const (
storeClosing fileCacheStatus = iota
fileIsNew
fileIsOpening
fileIsCached
)
// New opens the store directory.
func New(datadir string) (*Store, error) {
db := &Store{
datadir: datadir,
lru: lru.NewBasicLRU[uint64, *fileCacheEntry](openFileLimit),
opening: make(map[uint64]*fileCacheEntry),
}
db.cond = sync.NewCond(&db.mu)
log.Info("Opened Era store", "datadir", datadir)
return db, nil
}
// Close closes all open era1 files in the cache.
func (db *Store) Close() {
db.mu.Lock()
defer db.mu.Unlock()
// Prevent new cache additions.
db.closing = true
// Deref all active files. Since inactive files have a refcount of one, they will be
// closed right here and now after decrementing. Files which are currently being used
// have a refcount > 1 and will hit zero when their access finishes.
for _, epoch := range db.lru.Keys() {
entry, _ := db.lru.Peek(epoch)
if entry.derefAndClose(epoch) {
db.lru.Remove(epoch)
}
}
// Wait for all store access to finish.
for db.lru.Len() > 0 || len(db.opening) > 0 {
db.cond.Wait()
}
}
// GetRawBody returns the raw body for a given block number.
func (db *Store) GetRawBody(number uint64) ([]byte, error) {
epoch := number / uint64(era.MaxEra1Size)
entry := db.getEraByEpoch(epoch)
if entry.err != nil {
if errors.Is(entry.err, fs.ErrNotExist) {
return nil, nil
}
return nil, entry.err
}
defer db.doneWithFile(epoch, entry)
return entry.file.GetRawBodyByNumber(number)
}
// GetRawReceipts returns the raw receipts for a given block number.
func (db *Store) GetRawReceipts(number uint64) ([]byte, error) {
epoch := number / uint64(era.MaxEra1Size)
entry := db.getEraByEpoch(epoch)
if entry.err != nil {
if errors.Is(entry.err, fs.ErrNotExist) {
return nil, nil
}
return nil, entry.err
}
defer db.doneWithFile(epoch, entry)
data, err := entry.file.GetRawReceiptsByNumber(number)
if err != nil {
return nil, err
}
return convertReceipts(data)
}
// convertReceipts transforms an encoded block receipts list from the format
// used by era1 into the 'storage' format used by the go-ethereum ancients database.
func convertReceipts(input []byte) ([]byte, error) {
var (
out bytes.Buffer
enc = rlp.NewEncoderBuffer(&out)
)
blockListIter, err := rlp.NewListIterator(input)
if err != nil {
return nil, fmt.Errorf("invalid block receipts list: %v", err)
}
outerList := enc.List()
for i := 0; blockListIter.Next(); i++ {
kind, content, _, err := rlp.Split(blockListIter.Value())
if err != nil {
return nil, fmt.Errorf("receipt %d invalid: %v", i, err)
}
var receiptData []byte
switch kind {
case rlp.Byte:
return nil, fmt.Errorf("receipt %d is single byte", i)
case rlp.String:
// Typed receipt - skip type.
receiptData = content[1:]
case rlp.List:
// Legacy receipt
receiptData = blockListIter.Value()
}
// Convert data list.
// Input is [status, gas-used, bloom, logs]
// Output is [status, gas-used, logs], i.e. we need to skip the bloom.
dataIter, err := rlp.NewListIterator(receiptData)
if err != nil {
return nil, fmt.Errorf("receipt %d has invalid data: %v", i, err)
}
innerList := enc.List()
for field := 0; dataIter.Next(); field++ {
if field == 2 {
continue // skip bloom
}
enc.Write(dataIter.Value())
}
enc.ListEnd(innerList)
if dataIter.Err() != nil {
return nil, fmt.Errorf("receipt %d iterator error: %v", i, dataIter.Err())
}
}
enc.ListEnd(outerList)
if blockListIter.Err() != nil {
return nil, fmt.Errorf("block receipt list iterator error: %v", blockListIter.Err())
}
enc.Flush()
return out.Bytes(), nil
}
// getEraByEpoch opens an era file or gets it from the cache.
// The caller can freely access the returned entry's .file and .err
// db.doneWithFile must be called when it is done reading the file.
func (db *Store) getEraByEpoch(epoch uint64) *fileCacheEntry {
stat, entry := db.getCacheEntry(epoch)
switch stat {
case storeClosing:
return &fileCacheEntry{err: errClosed}
case fileIsNew:
// Open the file and put it into the cache.
e, err := db.openEraFile(epoch)
if err != nil {
db.fileFailedToOpen(epoch, entry, err)
} else {
db.fileOpened(epoch, entry, e)
}
close(entry.opened)
case fileIsOpening:
// Wait for open to finish.
<-entry.opened
case fileIsCached:
// Nothing to do.
default:
panic(fmt.Sprintf("invalid file state %d", stat))
}
return entry
}
// getCacheEntry gets an open era file from the cache.
func (db *Store) getCacheEntry(epoch uint64) (stat fileCacheStatus, entry *fileCacheEntry) {
db.mu.Lock()
defer db.mu.Unlock()
if db.closing {
return storeClosing, nil
}
if entry = db.opening[epoch]; entry != nil {
stat = fileIsOpening
} else if entry, _ = db.lru.Get(epoch); entry != nil {
stat = fileIsCached
} else {
// It's a new file, create an entry in the opening table. Note the entry is
// created with an initial refcount of one. We increment the count once more
// before returning, but the count will return to one when the file has been
// accessed. When the store is closed or the file gets evicted from the cache,
// refcount will be decreased by one, thus allowing it to hit zero.
entry = &fileCacheEntry{refcount: 1, opened: make(chan struct{})}
db.opening[epoch] = entry
stat = fileIsNew
}
entry.refcount++
return stat, entry
}
// fileOpened is called after an era file has been successfully opened.
func (db *Store) fileOpened(epoch uint64, entry *fileCacheEntry, file *era.Era) {
db.mu.Lock()
defer db.mu.Unlock()
delete(db.opening, epoch)
db.cond.Signal() // db.opening was modified
// The database may have been closed while opening the file. When that happens, we
// need to close the file here, since it isn't tracked by the LRU yet.
if db.closing {
entry.err = errClosed
file.Close()
return
}
// Add it to the LRU. This may evict an existing item, which we have to close.
entry.file = file
evictedEpoch, evictedEntry, _ := db.lru.Add3(epoch, entry)
if evictedEntry != nil {
evictedEntry.derefAndClose(evictedEpoch)
}
}
// fileFailedToOpen is called when an era file could not be opened.
func (db *Store) fileFailedToOpen(epoch uint64, entry *fileCacheEntry, err error) {
db.mu.Lock()
defer db.mu.Unlock()
delete(db.opening, epoch)
db.cond.Signal() // db.opening was modified
entry.err = err
}
func (db *Store) openEraFile(epoch uint64) (*era.Era, error) {
// File name scheme is <network>-<epoch>-<root>.
glob := fmt.Sprintf("*-%05d-*.era1", epoch)
matches, err := filepath.Glob(filepath.Join(db.datadir, glob))
if err != nil {
return nil, err
}
if len(matches) > 1 {
return nil, fmt.Errorf("multiple era1 files found for epoch %d", epoch)
}
if len(matches) == 0 {
return nil, fs.ErrNotExist
}
filename := matches[0]
e, err := era.Open(filename)
if err != nil {
return nil, err
}
// Sanity-check start block.
if e.Start()%uint64(era.MaxEra1Size) != 0 {
return nil, fmt.Errorf("pre-merge era1 file has invalid boundary. %d %% %d != 0", e.Start(), era.MaxEra1Size)
}
log.Debug("Opened era1 file", "epoch", epoch)
return e, nil
}
// doneWithFile signals that the caller has finished using a file.
// This decrements the refcount and ensures the file is closed by the last user.
func (db *Store) doneWithFile(epoch uint64, entry *fileCacheEntry) {
db.mu.Lock()
defer db.mu.Unlock()
if entry.err != nil {
return
}
if entry.derefAndClose(epoch) {
// Delete closed entry from LRU if it is still present.
if e, _ := db.lru.Peek(epoch); e == entry {
db.lru.Remove(epoch)
db.cond.Signal() // db.lru was modified
}
}
}
// derefAndClose decrements the reference counter and closes the file
// when it hits zero.
func (entry *fileCacheEntry) derefAndClose(epoch uint64) (closed bool) {
entry.refcount--
if entry.refcount > 0 {
return false
}
closeErr := entry.file.Close()
if closeErr == nil {
log.Debug("Closed era1 file", "epoch", epoch)
} else {
log.Warn("Error closing era1 file", "epoch", epoch, "err", closeErr)
}
return true
}

View file

@ -0,0 +1,103 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eradb
import (
"sync"
"testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEraDatabase(t *testing.T) {
db, err := New("testdata")
require.NoError(t, err)
defer db.Close()
r, err := db.GetRawBody(175881)
require.NoError(t, err)
var body *types.Body
err = rlp.DecodeBytes(r, &body)
require.NoError(t, err)
require.NotNil(t, body, "block body not found")
assert.Equal(t, 3, len(body.Transactions))
r, err = db.GetRawReceipts(175881)
require.NoError(t, err)
var receipts []*types.ReceiptForStorage
err = rlp.DecodeBytes(r, &receipts)
require.NoError(t, err)
require.NotNil(t, receipts, "receipts not found")
assert.Equal(t, 3, len(receipts), "receipts length mismatch")
}
func TestEraDatabaseConcurrentOpen(t *testing.T) {
db, err := New("testdata")
require.NoError(t, err)
defer db.Close()
const N = 25
var wg sync.WaitGroup
wg.Add(N)
for range N {
go func() {
defer wg.Done()
r, err := db.GetRawBody(1024)
if err != nil {
t.Error("err:", err)
}
if len(r) == 0 {
t.Error("empty body")
}
}()
}
wg.Wait()
}
func TestEraDatabaseConcurrentOpenClose(t *testing.T) {
db, err := New("testdata")
require.NoError(t, err)
defer db.Close()
const N = 10
var wg sync.WaitGroup
wg.Add(N)
for range N {
go func() {
defer wg.Done()
r, err := db.GetRawBody(1024)
if err == errClosed {
return
}
if err != nil {
t.Error("err:", err)
}
if len(r) == 0 {
t.Error("empty body")
}
}()
}
wg.Add(1)
go func() {
defer wg.Done()
db.Close()
}()
wg.Wait()
}

Binary file not shown.

Binary file not shown.

View file

@ -172,10 +172,7 @@ func (f *Freezer) Close() error {
errs = append(errs, err) errs = append(errs, err)
} }
}) })
if errs != nil { return errors.Join(errs...)
return fmt.Errorf("%v", errs)
}
return nil
} }
// AncientDatadir returns the path of the ancient store. // AncientDatadir returns the path of the ancient store.
@ -183,15 +180,6 @@ func (f *Freezer) AncientDatadir() (string, error) {
return f.datadir, nil return f.datadir, nil
} }
// HasAncient returns an indicator whether the specified ancient data exists
// in the freezer.
func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
if table := f.tables[kind]; table != nil {
return table.has(number), nil
}
return false, nil
}
// Ancient retrieves an ancient binary blob from the append-only immutable files. // Ancient retrieves an ancient binary blob from the append-only immutable files.
func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) { func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {

View file

@ -45,14 +45,6 @@ func newMemoryTable(name string, config freezerTableConfig) *memoryTable {
return &memoryTable{name: name, config: config} return &memoryTable{name: name, config: config}
} }
// has returns an indicator whether the specified data exists.
func (t *memoryTable) has(number uint64) bool {
t.lock.RLock()
defer t.lock.RUnlock()
return number >= t.offset && number < t.items
}
// retrieve retrieves multiple items in sequence, starting from the index 'start'. // retrieve retrieves multiple items in sequence, starting from the index 'start'.
// It will return: // It will return:
// - at most 'count' items, // - at most 'count' items,
@ -232,17 +224,6 @@ func NewMemoryFreezer(readonly bool, tableName map[string]freezerTableConfig) *M
} }
} }
// HasAncient returns an indicator whether the specified data exists.
func (f *MemoryFreezer) HasAncient(kind string, number uint64) (bool, error) {
f.lock.RLock()
defer f.lock.RUnlock()
if table := f.tables[kind]; table != nil {
return table.has(number), nil
}
return false, nil
}
// Ancient retrieves an ancient binary blob from the in-memory freezer. // Ancient retrieves an ancient binary blob from the in-memory freezer.
func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) { func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) {
f.lock.RLock() f.lock.RLock()

View file

@ -105,15 +105,6 @@ func (f *resettableFreezer) Close() error {
return f.freezer.Close() return f.freezer.Close()
} }
// HasAncient returns an indicator whether the specified ancient data exists
// in the freezer
func (f *resettableFreezer) HasAncient(kind string, number uint64) (bool, error) {
f.lock.RLock()
defer f.lock.RUnlock()
return f.freezer.HasAncient(kind, number)
}
// Ancient retrieves an ancient binary blob from the append-only immutable files. // Ancient retrieves an ancient binary blob from the append-only immutable files.
func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) {
f.lock.RLock() f.lock.RLock()

View file

@ -1106,12 +1106,6 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output, sizes, nil return output, sizes, nil
} }
// has returns an indicator whether the specified number data is still accessible
// in the freezer table.
func (t *freezerTable) has(number uint64) bool {
return t.items.Load() > number && t.itemHidden.Load() <= number
}
// size returns the total data size in the freezer table. // size returns the total data size in the freezer table.
func (t *freezerTable) size() (uint64, error) { func (t *freezerTable) size() (uint64, error) {
t.lock.RLock() t.lock.RLock()

View file

@ -357,9 +357,6 @@ func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
// Check at index n-1. // Check at index n-1.
if n > 0 { if n > 0 {
index := n - 1 index := n - 1
if ok, _ := f.HasAncient(kind, index); !ok {
t.Errorf("HasAncient(%q, %d) returned false unexpectedly", kind, index)
}
if _, err := f.Ancient(kind, index); err != nil { if _, err := f.Ancient(kind, index); err != nil {
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err) t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
} }
@ -367,9 +364,6 @@ func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
// Check at index n. // Check at index n.
index := n index := n
if ok, _ := f.HasAncient(kind, index); ok {
t.Errorf("HasAncient(%q, %d) returned true unexpectedly", kind, index)
}
if _, err := f.Ancient(kind, index); err == nil { if _, err := f.Ancient(kind, index); err == nil {
t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index) t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index)
} else if err != errOutOfBounds { } else if err != errOutOfBounds {

View file

@ -50,12 +50,6 @@ func (t *table) Get(key []byte) ([]byte, error) {
return t.db.Get(append([]byte(t.prefix), key...)) return t.db.Get(append([]byte(t.prefix), key...))
} }
// HasAncient is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) HasAncient(kind string, number uint64) (bool, error) {
return t.db.HasAncient(kind, number)
}
// Ancient is a noop passthrough that just forwards the request to the underlying // Ancient is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) Ancient(kind string, number uint64) ([]byte, error) { func (t *table) Ancient(kind string, number uint64) ([]byte, error) {

View file

@ -116,7 +116,7 @@ func TestTxIndexer(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...))) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...)))
// Index the initial blocks from ancient store // Index the initial blocks from ancient store
@ -235,7 +235,7 @@ func TestTxIndexerRepair(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...)) encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...))
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts)
@ -426,7 +426,7 @@ func TestTxIndexerReport(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...)) encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...))
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts)

View file

@ -128,7 +128,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
} }
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) dbOptions := node.DatabaseOptions{
Cache: config.DatabaseCache,
Handles: config.DatabaseHandles,
AncientsDirectory: config.DatabaseFreezer,
EraDirectory: config.DatabaseEra,
MetricsNamespace: "eth/db/chaindata/",
}
chainDb, err := stack.OpenDatabaseWithOptions("chaindata", dbOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -56,7 +56,7 @@ func newTester(t *testing.T) *downloadTester {
// newTesterWithNotification creates a new downloader test mocker. // newTesterWithNotification creates a new downloader test mocker.
func newTesterWithNotification(t *testing.T, success func()) *downloadTester { func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -120,6 +120,7 @@ type Config struct {
DatabaseHandles int `toml:"-"` DatabaseHandles int `toml:"-"`
DatabaseCache int DatabaseCache int
DatabaseFreezer string DatabaseFreezer string
DatabaseEra string
TrieCleanCache int TrieCleanCache int
TrieDirtyCache int TrieDirtyCache int

View file

@ -37,6 +37,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
DatabaseHandles int `toml:"-"` DatabaseHandles int `toml:"-"`
DatabaseCache int DatabaseCache int
DatabaseFreezer string DatabaseFreezer string
DatabaseEra string
TrieCleanCache int TrieCleanCache int
TrieDirtyCache int TrieDirtyCache int
TrieTimeout time.Duration TrieTimeout time.Duration
@ -77,6 +78,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache enc.DatabaseCache = c.DatabaseCache
enc.DatabaseFreezer = c.DatabaseFreezer enc.DatabaseFreezer = c.DatabaseFreezer
enc.DatabaseEra = c.DatabaseEra
enc.TrieCleanCache = c.TrieCleanCache enc.TrieCleanCache = c.TrieCleanCache
enc.TrieDirtyCache = c.TrieDirtyCache enc.TrieDirtyCache = c.TrieDirtyCache
enc.TrieTimeout = c.TrieTimeout enc.TrieTimeout = c.TrieTimeout
@ -121,6 +123,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
DatabaseHandles *int `toml:"-"` DatabaseHandles *int `toml:"-"`
DatabaseCache *int DatabaseCache *int
DatabaseFreezer *string DatabaseFreezer *string
DatabaseEra *string
TrieCleanCache *int TrieCleanCache *int
TrieDirtyCache *int TrieDirtyCache *int
TrieTimeout *time.Duration TrieTimeout *time.Duration
@ -204,6 +207,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DatabaseFreezer != nil { if dec.DatabaseFreezer != nil {
c.DatabaseFreezer = *dec.DatabaseFreezer c.DatabaseFreezer = *dec.DatabaseFreezer
} }
if dec.DatabaseEra != nil {
c.DatabaseEra = *dec.DatabaseEra
}
if dec.TrieCleanCache != nil { if dec.TrieCleanCache != nil {
c.TrieCleanCache = *dec.TrieCleanCache c.TrieCleanCache = *dec.TrieCleanCache
} }

View file

@ -92,10 +92,6 @@ type KeyValueStore interface {
// AncientReaderOp contains the methods required to read from immutable ancient data. // AncientReaderOp contains the methods required to read from immutable ancient data.
type AncientReaderOp interface { type AncientReaderOp interface {
// HasAncient returns an indicator whether the specified data exists in the
// ancient store.
HasAncient(kind string, number uint64) (bool, error)
// Ancient retrieves an ancient binary blob from the append-only immutable files. // Ancient retrieves an ancient binary blob from the append-only immutable files.
Ancient(kind string, number uint64) ([]byte, error) Ancient(kind string, number uint64) ([]byte, error)

View file

@ -48,13 +48,6 @@ func (db *Database) Get(key []byte) ([]byte, error) {
return resp, nil return resp, nil
} }
func (db *Database) HasAncient(kind string, number uint64) (bool, error) {
if _, err := db.Ancient(kind, number); err != nil {
return false, err
}
return true, nil
}
func (db *Database) Ancient(kind string, number uint64) ([]byte, error) { func (db *Database) Ancient(kind string, number uint64) ([]byte, error) {
var resp hexutil.Bytes var resp hexutil.Bytes
err := db.remote.Call(&resp, "debug_dbAncient", kind, number) err := db.remote.Call(&resp, "debug_dbAncient", kind, number)

View file

@ -18,7 +18,6 @@ package era
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
@ -126,32 +125,10 @@ func (e *Era) Close() error {
return e.f.Close() return e.f.Close()
} }
// GetHeaderByNumber returns the header for the given block number.
func (e *Era) GetHeaderByNumber(num uint64) (*types.Header, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, errors.New("out-of-bounds")
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
// Read and decompress header.
r, _, err := newSnappyReader(e.s, TypeCompressedHeader, off)
if err != nil {
return nil, err
}
var header types.Header
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
return &header, nil
}
// GetBlockByNumber returns the block for the given block number. // GetBlockByNumber returns the block for the given block number.
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) { func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
if e.m.start > num || e.m.start+e.m.count <= num { if e.m.start > num || e.m.start+e.m.count <= num {
return nil, errors.New("out-of-bounds") return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
} }
off, err := e.readOffset(num) off, err := e.readOffset(num)
if err != nil { if err != nil {
@ -177,10 +154,30 @@ func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
return types.NewBlockWithHeader(&header).WithBody(body), nil return types.NewBlockWithHeader(&header).WithBody(body), nil
} }
// GetReceiptsByNumber returns the receipts for the given block number. // GetRawBodyByNumber returns the RLP-encoded body for the given block number.
func (e *Era) GetReceiptsByNumber(num uint64) (types.Receipts, error) { func (e *Era) GetRawBodyByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num { if e.m.start > num || e.m.start+e.m.count <= num {
return nil, errors.New("out-of-bounds") return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
off, err = e.s.SkipN(off, 1)
if err != nil {
return nil, err
}
r, _, err := newSnappyReader(e.s, TypeCompressedBody, off)
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
// GetRawReceiptsByNumber returns the RLP-encoded receipts for the given block number.
func (e *Era) GetRawReceiptsByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
} }
off, err := e.readOffset(num) off, err := e.readOffset(num)
if err != nil { if err != nil {
@ -193,16 +190,11 @@ func (e *Era) GetReceiptsByNumber(num uint64) (types.Receipts, error) {
return nil, err return nil, err
} }
// Read and decompress receipts.
r, _, err := newSnappyReader(e.s, TypeCompressedReceipts, off) r, _, err := newSnappyReader(e.s, TypeCompressedReceipts, off)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var receipts types.Receipts return io.ReadAll(r)
if err := rlp.Decode(r, &receipts); err != nil {
return nil, err
}
return receipts, nil
} }
// Accumulator reads the accumulator entry in the Era1 file. // Accumulator reads the accumulator entry in the Era1 file.

View file

@ -101,17 +101,6 @@ func TestEra1Builder(t *testing.T) {
if !bytes.Equal(rawHeader, chain.headers[i]) { if !bytes.Equal(rawHeader, chain.headers[i]) {
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], rawHeader) t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], rawHeader)
} }
header, err := e.GetHeaderByNumber(i)
if err != nil {
t.Fatalf("error reading header: %v", err)
}
encHeader, err := rlp.EncodeToBytes(header)
if err != nil {
t.Fatalf("error encoding header: %v", err)
}
if !bytes.Equal(encHeader, chain.headers[i]) {
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], encHeader)
}
// Check bodies. // Check bodies.
body, err := io.ReadAll(it.Body) body, err := io.ReadAll(it.Body)
@ -130,7 +119,7 @@ func TestEra1Builder(t *testing.T) {
if !bytes.Equal(rawReceipts, chain.receipts[i]) { if !bytes.Equal(rawReceipts, chain.receipts[i]) {
t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], rawReceipts) t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], rawReceipts)
} }
receipts, err := e.GetReceiptsByNumber(i) receipts, err := getReceiptsByNumber(e, i)
if err != nil { if err != nil {
t.Fatalf("error reading receipts: %v", err) t.Fatalf("error reading receipts: %v", err)
} }
@ -179,3 +168,15 @@ func mustEncode(obj any) []byte {
} }
return b return b
} }
func getReceiptsByNumber(e *Era, number uint64) (types.Receipts, error) {
r, err := e.GetRawReceiptsByNumber(number)
if err != nil {
return nil, err
}
var receipts types.Receipts
if err := rlp.DecodeBytes(r, &receipts); err != nil {
return nil, err
}
return receipts, nil
}

View file

@ -26,16 +26,25 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
// openOptions contains the options to apply when opening a database. // DatabaseOptions contains the options to apply when opening a database.
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used. type DatabaseOptions struct {
type openOptions struct { // Directory for storing chain history ("freezer").
Type string // "leveldb" | "pebble" AncientsDirectory string
Directory string // the datadir
AncientsDirectory string // the ancients-dir // The optional Era folder, which can be either a subfolder under
Namespace string // the namespace for database relevant metrics // ancient/chain or a directory specified via an absolute path.
Cache int // the capacity(in megabytes) of the data caching EraDirectory string
Handles int // number of files to be open simultaneously
ReadOnly bool MetricsNamespace string // the namespace for database relevant metrics
Cache int // the capacity(in megabytes) of the data caching
Handles int // number of files to be open simultaneously
ReadOnly bool // if true, no writes can be performed
}
type internalOpenOptions struct {
directory string
dbEngine string // "leveldb" | "pebble"
DatabaseOptions
} }
// openDatabase opens both a disk-based key-value database such as leveldb or pebble, but also // openDatabase opens both a disk-based key-value database such as leveldb or pebble, but also
@ -43,15 +52,18 @@ type openOptions struct {
// set on the provided OpenOptions. // set on the provided OpenOptions.
// The passed o.AncientDir indicates the path of root ancient directory where // The passed o.AncientDir indicates the path of root ancient directory where
// the chain freezer can be opened. // the chain freezer can be opened.
func openDatabase(o openOptions) (ethdb.Database, error) { func openDatabase(o internalOpenOptions) (ethdb.Database, error) {
kvdb, err := openKeyValueDatabase(o) kvdb, err := openKeyValueDatabase(o)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(o.AncientsDirectory) == 0 { opts := rawdb.OpenOptions{
return kvdb, nil Ancient: o.AncientsDirectory,
Era: o.EraDirectory,
MetricsNamespace: o.MetricsNamespace,
ReadOnly: o.ReadOnly,
} }
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly) frdb, err := rawdb.Open(kvdb, opts)
if err != nil { if err != nil {
kvdb.Close() kvdb.Close()
return nil, err return nil, err
@ -61,37 +73,37 @@ func openDatabase(o openOptions) (ethdb.Database, error) {
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble. // openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
// //
// type == null type != null // type == null type != null
// +---------------------------------------- // +----------------------------------------
// db is non-existent | pebble default | specified type // db is non-existent | pebble default | specified type
// db is existent | from db | specified type (if compatible) // db is existent | from db | specified type (if compatible)
func openKeyValueDatabase(o openOptions) (ethdb.Database, error) { func openKeyValueDatabase(o internalOpenOptions) (ethdb.KeyValueStore, error) {
// Reject any unsupported database type // Reject any unsupported database type
if len(o.Type) != 0 && o.Type != rawdb.DBLeveldb && o.Type != rawdb.DBPebble { if len(o.dbEngine) != 0 && o.dbEngine != rawdb.DBLeveldb && o.dbEngine != rawdb.DBPebble {
return nil, fmt.Errorf("unknown db.engine %v", o.Type) return nil, fmt.Errorf("unknown db.engine %v", o.dbEngine)
} }
// Retrieve any pre-existing database's type and use that or the requested one // Retrieve any pre-existing database's type and use that or the requested one
// as long as there's no conflict between the two types // as long as there's no conflict between the two types
existingDb := rawdb.PreexistingDatabase(o.Directory) existingDb := rawdb.PreexistingDatabase(o.directory)
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { if len(existingDb) != 0 && len(o.dbEngine) != 0 && o.dbEngine != existingDb {
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.dbEngine, existingDb)
} }
if o.Type == rawdb.DBPebble || existingDb == rawdb.DBPebble { if o.dbEngine == rawdb.DBPebble || existingDb == rawdb.DBPebble {
log.Info("Using pebble as the backing database") log.Info("Using pebble as the backing database")
return newPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) return newPebbleDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
} }
if o.Type == rawdb.DBLeveldb || existingDb == rawdb.DBLeveldb { if o.dbEngine == rawdb.DBLeveldb || existingDb == rawdb.DBLeveldb {
log.Info("Using leveldb as the backing database") log.Info("Using leveldb as the backing database")
return newLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) return newLevelDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
} }
// No pre-existing database, no user-requested one either. Default to Pebble. // No pre-existing database, no user-requested one either. Default to Pebble.
log.Info("Defaulting to pebble as the backing database") log.Info("Defaulting to pebble as the backing database")
return newPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) return newPebbleDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
} }
// newLevelDBDatabase creates a persistent key-value database without a freezer // newLevelDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage. // moving immutable chain segments into cold storage.
func newLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { func newLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.KeyValueStore, error) {
db, err := leveldb.New(file, cache, handles, namespace, readonly) db, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil { if err != nil {
return nil, err return nil, err
@ -102,7 +114,7 @@ func newLevelDBDatabase(file string, cache int, handles int, namespace string, r
// newPebbleDBDatabase creates a persistent key-value database without a freezer // newPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage. // moving immutable chain segments into cold storage.
func newPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { func newPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.KeyValueStore, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly) db, err := pebble.New(file, cache, handles, namespace, readonly)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -697,27 +697,27 @@ func (n *Node) EventMux() *event.TypeMux {
} }
// OpenDatabase opens an existing database with the given name (or creates one if no // OpenDatabase opens an existing database with the given name (or creates one if no
// previous can be found) from within the node's instance directory. If the node is // previous can be found) from within the node's instance directory. If the node has no
// ephemeral, a memory database is returned. // data directory, an in-memory database is returned.
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) { func (n *Node) OpenDatabaseWithOptions(name string, opt DatabaseOptions) (ethdb.Database, error) {
n.lock.Lock() n.lock.Lock()
defer n.lock.Unlock() defer n.lock.Unlock()
if n.state == closedState { if n.state == closedState {
return nil, ErrNodeStopped return nil, ErrNodeStopped
} }
var db ethdb.Database var db ethdb.Database
var err error var err error
if n.config.DataDir == "" { if n.config.DataDir == "" {
db = rawdb.NewMemoryDatabase() db, _ = rawdb.Open(memorydb.New(), rawdb.OpenOptions{
MetricsNamespace: opt.MetricsNamespace,
ReadOnly: opt.ReadOnly,
})
} else { } else {
db, err = openDatabase(openOptions{ opt.AncientsDirectory = n.ResolveAncient(name, opt.AncientsDirectory)
Type: n.config.DBEngine, db, err = openDatabase(internalOpenOptions{
Directory: n.ResolvePath(name), directory: n.ResolvePath(name),
Namespace: namespace, dbEngine: n.config.DBEngine,
Cache: cache, DatabaseOptions: opt,
Handles: handles,
ReadOnly: readonly,
}) })
} }
if err == nil { if err == nil {
@ -726,36 +726,31 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r
return db, err return db, err
} }
// OpenDatabase opens an existing database with the given name (or creates one if no
// previous can be found) from within the node's instance directory.
// If the node has no data directory, an in-memory database is returned.
// Deprecated: use OpenDatabaseWithOptions instead.
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) {
return n.OpenDatabaseWithOptions(name, DatabaseOptions{
MetricsNamespace: namespace,
Cache: cache,
Handles: handles,
ReadOnly: readonly,
})
}
// OpenDatabaseWithFreezer opens an existing database with the given name (or // OpenDatabaseWithFreezer opens an existing database with the given name (or
// creates one if no previous can be found) from within the node's data directory, // creates one if no previous can be found) from within the node's data directory.
// also attaching a chain freezer to it that moves ancient chain data from the // If the node has no data directory, an in-memory database is returned.
// database to immutable append-only files. If the node is an ephemeral one, a // Deprecated: use OpenDatabaseWithOptions instead.
// memory database is returned.
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) { func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
n.lock.Lock() return n.OpenDatabaseWithOptions(name, DatabaseOptions{
defer n.lock.Unlock() AncientsDirectory: n.ResolveAncient(name, ancient),
if n.state == closedState { MetricsNamespace: namespace,
return nil, ErrNodeStopped Cache: cache,
} Handles: handles,
var db ethdb.Database ReadOnly: readonly,
var err error })
if n.config.DataDir == "" {
db, err = rawdb.NewDatabaseWithFreezer(memorydb.New(), "", namespace, readonly)
} else {
db, err = openDatabase(openOptions{
Type: n.config.DBEngine,
Directory: n.ResolvePath(name),
AncientsDirectory: n.ResolveAncient(name, ancient),
Namespace: namespace,
Cache: cache,
Handles: handles,
ReadOnly: readonly,
})
}
if err == nil {
db = n.wrapDatabase(db)
}
return db, err
} }
// ResolvePath returns the absolute path of a resource in the instance directory. // ResolvePath returns the absolute path of a resource in the instance directory.

View file

@ -123,7 +123,7 @@ type tester struct {
func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int) *tester { func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int) *tester {
var ( var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
db = New(disk, &Config{ db = New(disk, &Config{
StateHistory: historyLimit, StateHistory: historyLimit,
TrieCleanSize: 256 * 1024, TrieCleanSize: 256 * 1024,