mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-02-26 07:37:20 +00:00
core/rawdb: integrate eradb backend for RPC (#31604)
This implements a backing store for chain history based on era1 files. The new store is integrated with the freezer. Queries for blocks and receipts below the current freezer tail are handled by the era store. --------- Co-authored-by: Gary Rong <garyrong0905@gmail.com> Co-authored-by: Felix Lange <fjl@twurst.com> Co-authored-by: lightclient <lightclient@protonmail.com>
This commit is contained in:
parent
c87b856c1a
commit
a7d9b52eaf
34 changed files with 774 additions and 271 deletions
|
|
@ -45,6 +45,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/internal/era/eradl"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
|
@ -277,10 +278,7 @@ func initGenesis(ctx *cli.Context) error {
|
|||
overrides.OverrideVerkle = &v
|
||||
}
|
||||
|
||||
chaindb, err := stack.OpenDatabaseWithFreezer("chaindata", 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, false)
|
||||
defer chaindb.Close()
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
||||
|
|
@ -317,7 +315,7 @@ func dumpGenesis(ctx *cli.Context) error {
|
|||
// dump whatever already exists in the datadir
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
|
||||
db, err := stack.OpenDatabase("chaindata", 0, 0, "", true)
|
||||
db, err := stack.OpenDatabaseWithOptions("chaindata", node.DatabaseOptions{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -111,6 +111,11 @@ var (
|
|||
Usage: "Root directory for ancient data (default = inside chaindata)",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
EraFlag = &flags.DirectoryFlag{
|
||||
Name: "datadir.era",
|
||||
Usage: "Root directory for era1 history (default = inside ancient/chain)",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
MinFreeDiskSpaceFlag = &flags.DirectoryFlag{
|
||||
Name: "datadir.minfreedisk",
|
||||
Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)",
|
||||
|
|
@ -977,6 +982,7 @@ var (
|
|||
DatabaseFlags = []cli.Flag{
|
||||
DataDirFlag,
|
||||
AncientFlag,
|
||||
EraFlag,
|
||||
RemoteDBFlag,
|
||||
DBEngineFlag,
|
||||
StateSchemeFlag,
|
||||
|
|
@ -1613,6 +1619,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||
if ctx.IsSet(AncientFlag.Name) {
|
||||
cfg.DatabaseFreezer = ctx.String(AncientFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EraFlag.Name) {
|
||||
cfg.DatabaseEra = ctx.String(EraFlag.Name)
|
||||
}
|
||||
|
||||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
|
|
@ -2082,7 +2091,15 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
|
|||
}
|
||||
chainDb = remotedb.New(client)
|
||||
default:
|
||||
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "eth/db/chaindata/", readonly)
|
||||
options := node.DatabaseOptions{
|
||||
ReadOnly: readonly,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
AncientsDirectory: ctx.String(AncientFlag.Name),
|
||||
MetricsNamespace: "eth/db/chaindata/",
|
||||
EraDirectory: ctx.String(EraFlag.Name),
|
||||
}
|
||||
chainDb, err = stack.OpenDatabaseWithOptions("chaindata", options)
|
||||
}
|
||||
if err != nil {
|
||||
Fatalf("Could not open database: %v", err)
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ func TestHistoryImportAndExport(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now import Era.
|
||||
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db2, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,30 +47,37 @@ func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] {
|
|||
|
||||
// Add adds a value to the cache. Returns true if an item was evicted to store the new item.
|
||||
func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
_, _, evicted = c.Add3(key, value)
|
||||
return evicted
|
||||
}
|
||||
|
||||
// Add3 adds a value to the cache. If an item was evicted to store the new one, it returns the evicted item.
|
||||
func (c *BasicLRU[K, V]) Add3(key K, value V) (ek K, ev V, evicted bool) {
|
||||
item, ok := c.items[key]
|
||||
if ok {
|
||||
// Already exists in cache.
|
||||
item.value = value
|
||||
c.items[key] = item
|
||||
c.list.moveToFront(item.elem)
|
||||
return false
|
||||
return ek, ev, false
|
||||
}
|
||||
|
||||
var elem *listElem[K]
|
||||
if c.Len() >= c.cap {
|
||||
elem = c.list.removeLast()
|
||||
delete(c.items, elem.v)
|
||||
evicted = true
|
||||
ek = elem.v
|
||||
ev = c.items[ek].value
|
||||
delete(c.items, ek)
|
||||
} else {
|
||||
elem = new(listElem[K])
|
||||
}
|
||||
|
||||
// Store the new item.
|
||||
// Note that, if another item was evicted, we re-use its list element here.
|
||||
// Note that if another item was evicted, we re-use its list element here.
|
||||
elem.v = key
|
||||
c.items[key] = cacheItem[K, V]{elem, value}
|
||||
c.list.pushElem(elem)
|
||||
return evicted
|
||||
return ek, ev, evicted
|
||||
}
|
||||
|
||||
// Contains reports whether the given key exists in the cache.
|
||||
|
|
|
|||
|
|
@ -1769,7 +1769,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
@ -1854,7 +1854,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
|
||||
}
|
||||
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err = rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
@ -1919,7 +1919,7 @@ func testIssue23496(t *testing.T, scheme string) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
@ -1979,7 +1979,7 @@ func testIssue23496(t *testing.T, scheme string) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
|
||||
}
|
||||
db, err = rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err = rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1973,7 +1973,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
@ -265,7 +265,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
newdb, err := rawdb.NewDatabaseWithFreezer(pdb, snaptest.ancient, "", false)
|
||||
newdb, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: snaptest.ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -738,7 +738,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
|
|||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||
}
|
||||
// Freezer style fast import the chain.
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
ancientDb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
|
|
@ -824,7 +824,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
|
|||
|
||||
// makeDb creates a db instance for testing.
|
||||
makeDb := func() ethdb.Database {
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
|
|
@ -1623,7 +1623,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
|
|||
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
||||
|
||||
// Import the shared chain and the original canonical one
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
defer db.Close()
|
||||
|
||||
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
|
||||
|
|
@ -1689,7 +1689,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
|
|||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
||||
|
||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
||||
ancientDb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
|
|
@ -1747,7 +1747,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
|
|||
})
|
||||
|
||||
// Import the canonical chain
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
diskdb, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
defer diskdb.Close()
|
||||
|
||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
|
||||
|
|
@ -1959,7 +1959,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
|
|||
b.OffsetTime(-9) // A higher difficulty
|
||||
})
|
||||
// Import the shared chain and the original canonical one
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
chaindb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
|
|
@ -2122,7 +2122,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
|||
}
|
||||
})
|
||||
// Import the shared chain and the original canonical one
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
chaindb, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
|
|
@ -2496,7 +2496,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent key-value database: %v", err)
|
||||
}
|
||||
db, err := rawdb.NewDatabaseWithFreezer(pdb, ancient, "", false)
|
||||
db, err := rawdb.Open(pdb, rawdb.OpenOptions{Ancient: ancient})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create persistent freezer database: %v", err)
|
||||
}
|
||||
|
|
@ -3403,7 +3403,7 @@ func testSetCanonical(t *testing.T, scheme string) {
|
|||
}
|
||||
gen.AddTx(tx)
|
||||
})
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
diskdb, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
defer diskdb.Close()
|
||||
|
||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
|
||||
|
|
@ -4199,7 +4199,7 @@ func testChainReorgSnapSync(t *testing.T, ancientLimit uint64) {
|
|||
gen.SetCoinbase(common.Address{0: byte(0xb), 19: byte(i)})
|
||||
})
|
||||
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
defer db.Close()
|
||||
|
||||
chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
|
||||
|
|
@ -4315,7 +4315,7 @@ func testInsertChainWithCutoff(t *testing.T, cutoff uint64, ancientLimit uint64,
|
|||
config := DefaultCacheConfigWithScheme(rawdb.PathScheme)
|
||||
config.ChainHistoryMode = history.KeepPostMerge
|
||||
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
defer db.Close()
|
||||
chain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
|
||||
defer chain.Stop()
|
||||
|
|
|
|||
|
|
@ -416,7 +416,7 @@ func checkReceiptsRLP(have, want types.Receipts) error {
|
|||
func TestAncientStorage(t *testing.T) {
|
||||
// Freezer style fast import the chain.
|
||||
frdir := t.TempDir()
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||
db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
|
@ -469,7 +469,7 @@ func TestAncientStorage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWriteAncientHeaderChain(t *testing.T) {
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), t.TempDir(), "", false)
|
||||
db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: t.TempDir()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
|
@ -586,7 +586,7 @@ func TestHashesInRange(t *testing.T) {
|
|||
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||
// Open freezer database.
|
||||
frdir := b.TempDir()
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||
db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
|
@ -890,7 +890,7 @@ func TestHeadersRLPStorage(t *testing.T) {
|
|||
// Have N headers in the freezer
|
||||
frdir := t.TempDir()
|
||||
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||
db, err := Open(NewMemoryDatabase(), OpenOptions{Ancient: frdir})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,13 +77,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
|
|||
}
|
||||
for _, c := range cases {
|
||||
for i := c.start; i < c.limit; i++ {
|
||||
exist, err := db.HasAncient("a", uint64(i))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check presence, %v", err)
|
||||
}
|
||||
if exist {
|
||||
t.Fatalf("Item %d is already truncated", uint64(i))
|
||||
}
|
||||
_, err = db.Ancient("a", uint64(i))
|
||||
if err == nil {
|
||||
t.Fatal("Error is expected for non-existent item")
|
||||
|
|
@ -93,13 +86,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
|
|||
|
||||
// Test the items in range should be reachable
|
||||
for i := 10; i < 90; i++ {
|
||||
exist, err := db.HasAncient("a", uint64(i))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check presence, %v", err)
|
||||
}
|
||||
if !exist {
|
||||
t.Fatalf("Item %d is missing", uint64(i))
|
||||
}
|
||||
blob, err := db.Ancient("a", uint64(i))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve item, %v", err)
|
||||
|
|
@ -110,13 +96,6 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
|
|||
}
|
||||
|
||||
// Test the items in unknown table shouldn't be reachable
|
||||
exist, err := db.HasAncient("b", uint64(0))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check presence, %v", err)
|
||||
}
|
||||
if exist {
|
||||
t.Fatal("Item in unknown table shouldn't be found")
|
||||
}
|
||||
_, err = db.Ancient("b", uint64(0))
|
||||
if err == nil {
|
||||
t.Fatal("Error is expected for unknown table")
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb/eradb"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
|
@ -43,7 +44,10 @@ const (
|
|||
// feature. The background thread will keep moving ancient chain segments from
|
||||
// key-value database to flat files for saving space on live database.
|
||||
type chainFreezer struct {
|
||||
ethdb.AncientStore // Ancient store for storing cold chain segment
|
||||
ancients ethdb.AncientStore // Ancient store for storing cold chain segment
|
||||
|
||||
// Optional Era database used as a backup for the pruned chain.
|
||||
eradb *eradb.Store
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
|
@ -56,21 +60,25 @@ type chainFreezer struct {
|
|||
// state freezer (e.g. dev mode).
|
||||
// - if non-empty directory is given, initializes the regular file-based
|
||||
// state freezer.
|
||||
func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) {
|
||||
var (
|
||||
err error
|
||||
freezer ethdb.AncientStore
|
||||
)
|
||||
func newChainFreezer(datadir string, eraDir string, namespace string, readonly bool) (*chainFreezer, error) {
|
||||
if datadir == "" {
|
||||
freezer = NewMemoryFreezer(readonly, chainFreezerTableConfigs)
|
||||
} else {
|
||||
freezer, err = NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerTableConfigs)
|
||||
return &chainFreezer{
|
||||
ancients: NewMemoryFreezer(readonly, chainFreezerTableConfigs),
|
||||
quit: make(chan struct{}),
|
||||
trigger: make(chan chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
freezer, err := NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerTableConfigs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
edb, err := eradb.New(resolveChainEraDir(datadir, eraDir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &chainFreezer{
|
||||
AncientStore: freezer,
|
||||
ancients: freezer,
|
||||
eradb: edb,
|
||||
quit: make(chan struct{}),
|
||||
trigger: make(chan chan struct{}),
|
||||
}, nil
|
||||
|
|
@ -84,7 +92,11 @@ func (f *chainFreezer) Close() error {
|
|||
close(f.quit)
|
||||
}
|
||||
f.wg.Wait()
|
||||
return f.AncientStore.Close()
|
||||
|
||||
if f.eradb != nil {
|
||||
f.eradb.Close()
|
||||
}
|
||||
return f.ancients.Close()
|
||||
}
|
||||
|
||||
// readHeadNumber returns the number of chain head block. 0 is returned if the
|
||||
|
|
@ -334,3 +346,75 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
|
|||
})
|
||||
return hashes, err
|
||||
}
|
||||
|
||||
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
||||
func (f *chainFreezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
// Lookup the entry in the underlying ancient store, assuming that
|
||||
// headers and hashes are always available.
|
||||
if kind == ChainFreezerHeaderTable || kind == ChainFreezerHashTable {
|
||||
return f.ancients.Ancient(kind, number)
|
||||
}
|
||||
tail, err := f.ancients.Tail()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Lookup the entry in the underlying ancient store if it's not pruned
|
||||
if number >= tail {
|
||||
return f.ancients.Ancient(kind, number)
|
||||
}
|
||||
// Lookup the entry in the optional era backend
|
||||
if f.eradb == nil {
|
||||
return nil, errOutOfBounds
|
||||
}
|
||||
switch kind {
|
||||
case ChainFreezerBodiesTable:
|
||||
return f.eradb.GetRawBody(number)
|
||||
case ChainFreezerReceiptTable:
|
||||
return f.eradb.GetRawReceipts(number)
|
||||
}
|
||||
return nil, errUnknownTable
|
||||
}
|
||||
|
||||
// ReadAncients executes an operation while preventing mutations to the freezer,
|
||||
// i.e. if fn performs multiple reads, they will be consistent with each other.
|
||||
func (f *chainFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
|
||||
if store, ok := f.ancients.(*Freezer); ok {
|
||||
store.writeLock.Lock()
|
||||
defer store.writeLock.Unlock()
|
||||
}
|
||||
return fn(f)
|
||||
}
|
||||
|
||||
// Methods below are just pass-through to the underlying ancient store.
|
||||
|
||||
func (f *chainFreezer) Ancients() (uint64, error) {
|
||||
return f.ancients.Ancients()
|
||||
}
|
||||
|
||||
func (f *chainFreezer) Tail() (uint64, error) {
|
||||
return f.ancients.Tail()
|
||||
}
|
||||
|
||||
func (f *chainFreezer) AncientSize(kind string) (uint64, error) {
|
||||
return f.ancients.AncientSize(kind)
|
||||
}
|
||||
|
||||
func (f *chainFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||
return f.ancients.AncientRange(kind, start, count, maxBytes)
|
||||
}
|
||||
|
||||
func (f *chainFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||
return f.ancients.ModifyAncients(fn)
|
||||
}
|
||||
|
||||
func (f *chainFreezer) TruncateHead(items uint64) (uint64, error) {
|
||||
return f.ancients.TruncateHead(items)
|
||||
}
|
||||
|
||||
func (f *chainFreezer) TruncateTail(items uint64) (uint64, error) {
|
||||
return f.ancients.TruncateTail(items)
|
||||
}
|
||||
|
||||
func (f *chainFreezer) SyncAncient() error {
|
||||
return f.ancients.SyncAncient()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,11 +86,6 @@ type nofreezedb struct {
|
|||
ethdb.KeyValueStore
|
||||
}
|
||||
|
||||
// HasAncient returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
|
||||
return false, errNotSupported
|
||||
}
|
||||
|
||||
// Ancient returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
return nil, errNotSupported
|
||||
|
|
@ -186,19 +181,49 @@ func resolveChainFreezerDir(ancient string) string {
|
|||
return freezer
|
||||
}
|
||||
|
||||
// NewDatabaseWithFreezer creates a high level database on top of a given key-
|
||||
// value data store with a freezer moving immutable chain segments into cold
|
||||
// storage. The passed ancient indicates the path of root ancient directory
|
||||
// where the chain freezer can be opened.
|
||||
// resolveChainEraDir is a helper function which resolves the absolute path of era database.
|
||||
func resolveChainEraDir(chainFreezerDir string, era string) string {
|
||||
switch {
|
||||
case era == "":
|
||||
return filepath.Join(chainFreezerDir, "era")
|
||||
case !filepath.IsAbs(era):
|
||||
return filepath.Join(chainFreezerDir, era)
|
||||
default:
|
||||
return era
|
||||
}
|
||||
}
|
||||
|
||||
// NewDatabaseWithFreezer creates a high level database on top of a given key-value store.
|
||||
// The passed ancient indicates the path of root ancient directory where the chain freezer
|
||||
// can be opened.
|
||||
//
|
||||
// Deprecated: use Open.
|
||||
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
return Open(db, OpenOptions{
|
||||
Ancient: ancient,
|
||||
MetricsNamespace: namespace,
|
||||
ReadOnly: readonly,
|
||||
})
|
||||
}
|
||||
|
||||
// OpenOptions specifies options for opening the database.
|
||||
type OpenOptions struct {
|
||||
Ancient string // ancients directory
|
||||
Era string // era files directory
|
||||
MetricsNamespace string // prefix added to freezer metric names
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// Open creates a high-level database wrapper for the given key-value store.
|
||||
func Open(db ethdb.KeyValueStore, opts OpenOptions) (ethdb.Database, error) {
|
||||
// Create the idle freezer instance. If the given ancient directory is empty,
|
||||
// in-memory chain freezer is used (e.g. dev mode); otherwise the regular
|
||||
// file-based freezer is created.
|
||||
chainFreezerDir := ancient
|
||||
chainFreezerDir := opts.Ancient
|
||||
if chainFreezerDir != "" {
|
||||
chainFreezerDir = resolveChainFreezerDir(chainFreezerDir)
|
||||
}
|
||||
frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly)
|
||||
frdb, err := newChainFreezer(chainFreezerDir, opts.Era, opts.MetricsNamespace, opts.ReadOnly)
|
||||
if err != nil {
|
||||
printChainMetadata(db)
|
||||
return nil, err
|
||||
|
|
@ -282,7 +307,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||
}
|
||||
}
|
||||
// Freezer is consistent with the key-value database, permit combining the two
|
||||
if !readonly {
|
||||
if !opts.ReadOnly {
|
||||
frdb.wg.Add(1)
|
||||
go func() {
|
||||
frdb.freeze(db)
|
||||
|
|
@ -290,7 +315,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
|||
}()
|
||||
}
|
||||
return &freezerdb{
|
||||
ancientRoot: ancient,
|
||||
ancientRoot: opts.Ancient,
|
||||
KeyValueStore: db,
|
||||
chainFreezer: frdb,
|
||||
}, nil
|
||||
|
|
|
|||
345
core/rawdb/eradb/eradb.go
Normal file
345
core/rawdb/eradb/eradb.go
Normal file
|
|
@ -0,0 +1,345 @@
|
|||
// Copyright 2025 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package eradb implements a history backend using era1 files.
|
||||
package eradb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/internal/era"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const openFileLimit = 64
|
||||
|
||||
var errClosed = errors.New("era store is closed")
|
||||
|
||||
// Store manages read access to a directory of era1 files.
|
||||
// The getter methods are thread-safe.
|
||||
type Store struct {
|
||||
datadir string
|
||||
|
||||
// The mutex protects all remaining fields.
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond
|
||||
lru lru.BasicLRU[uint64, *fileCacheEntry]
|
||||
opening map[uint64]*fileCacheEntry
|
||||
closing bool
|
||||
}
|
||||
|
||||
type fileCacheEntry struct {
|
||||
refcount int // reference count. This is protected by Store.mu!
|
||||
opened chan struct{} // signals opening of file has completed
|
||||
file *era.Era // the file
|
||||
err error // error from opening the file
|
||||
}
|
||||
|
||||
type fileCacheStatus byte
|
||||
|
||||
const (
|
||||
storeClosing fileCacheStatus = iota
|
||||
fileIsNew
|
||||
fileIsOpening
|
||||
fileIsCached
|
||||
)
|
||||
|
||||
// New opens the store directory.
|
||||
func New(datadir string) (*Store, error) {
|
||||
db := &Store{
|
||||
datadir: datadir,
|
||||
lru: lru.NewBasicLRU[uint64, *fileCacheEntry](openFileLimit),
|
||||
opening: make(map[uint64]*fileCacheEntry),
|
||||
}
|
||||
db.cond = sync.NewCond(&db.mu)
|
||||
log.Info("Opened Era store", "datadir", datadir)
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Close closes all open era1 files in the cache.
|
||||
func (db *Store) Close() {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
// Prevent new cache additions.
|
||||
db.closing = true
|
||||
|
||||
// Deref all active files. Since inactive files have a refcount of one, they will be
|
||||
// closed right here and now after decrementing. Files which are currently being used
|
||||
// have a refcount > 1 and will hit zero when their access finishes.
|
||||
for _, epoch := range db.lru.Keys() {
|
||||
entry, _ := db.lru.Peek(epoch)
|
||||
if entry.derefAndClose(epoch) {
|
||||
db.lru.Remove(epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all store access to finish.
|
||||
for db.lru.Len() > 0 || len(db.opening) > 0 {
|
||||
db.cond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// GetRawBody returns the raw body for a given block number.
|
||||
func (db *Store) GetRawBody(number uint64) ([]byte, error) {
|
||||
epoch := number / uint64(era.MaxEra1Size)
|
||||
entry := db.getEraByEpoch(epoch)
|
||||
if entry.err != nil {
|
||||
if errors.Is(entry.err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, entry.err
|
||||
}
|
||||
defer db.doneWithFile(epoch, entry)
|
||||
|
||||
return entry.file.GetRawBodyByNumber(number)
|
||||
}
|
||||
|
||||
// GetRawReceipts returns the raw receipts for a given block number.
|
||||
func (db *Store) GetRawReceipts(number uint64) ([]byte, error) {
|
||||
epoch := number / uint64(era.MaxEra1Size)
|
||||
entry := db.getEraByEpoch(epoch)
|
||||
if entry.err != nil {
|
||||
if errors.Is(entry.err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, entry.err
|
||||
}
|
||||
defer db.doneWithFile(epoch, entry)
|
||||
|
||||
data, err := entry.file.GetRawReceiptsByNumber(number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertReceipts(data)
|
||||
}
|
||||
|
||||
// convertReceipts transforms an encoded block receipts list from the format
|
||||
// used by era1 into the 'storage' format used by the go-ethereum ancients database.
|
||||
func convertReceipts(input []byte) ([]byte, error) {
|
||||
var (
|
||||
out bytes.Buffer
|
||||
enc = rlp.NewEncoderBuffer(&out)
|
||||
)
|
||||
blockListIter, err := rlp.NewListIterator(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid block receipts list: %v", err)
|
||||
}
|
||||
outerList := enc.List()
|
||||
for i := 0; blockListIter.Next(); i++ {
|
||||
kind, content, _, err := rlp.Split(blockListIter.Value())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("receipt %d invalid: %v", i, err)
|
||||
}
|
||||
var receiptData []byte
|
||||
switch kind {
|
||||
case rlp.Byte:
|
||||
return nil, fmt.Errorf("receipt %d is single byte", i)
|
||||
case rlp.String:
|
||||
// Typed receipt - skip type.
|
||||
receiptData = content[1:]
|
||||
case rlp.List:
|
||||
// Legacy receipt
|
||||
receiptData = blockListIter.Value()
|
||||
}
|
||||
// Convert data list.
|
||||
// Input is [status, gas-used, bloom, logs]
|
||||
// Output is [status, gas-used, logs], i.e. we need to skip the bloom.
|
||||
dataIter, err := rlp.NewListIterator(receiptData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("receipt %d has invalid data: %v", i, err)
|
||||
}
|
||||
innerList := enc.List()
|
||||
for field := 0; dataIter.Next(); field++ {
|
||||
if field == 2 {
|
||||
continue // skip bloom
|
||||
}
|
||||
enc.Write(dataIter.Value())
|
||||
}
|
||||
enc.ListEnd(innerList)
|
||||
if dataIter.Err() != nil {
|
||||
return nil, fmt.Errorf("receipt %d iterator error: %v", i, dataIter.Err())
|
||||
}
|
||||
}
|
||||
enc.ListEnd(outerList)
|
||||
if blockListIter.Err() != nil {
|
||||
return nil, fmt.Errorf("block receipt list iterator error: %v", blockListIter.Err())
|
||||
}
|
||||
enc.Flush()
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// getEraByEpoch opens an era file or gets it from the cache.
|
||||
// The caller can freely access the returned entry's .file and .err
|
||||
// db.doneWithFile must be called when it is done reading the file.
|
||||
func (db *Store) getEraByEpoch(epoch uint64) *fileCacheEntry {
|
||||
stat, entry := db.getCacheEntry(epoch)
|
||||
|
||||
switch stat {
|
||||
case storeClosing:
|
||||
return &fileCacheEntry{err: errClosed}
|
||||
|
||||
case fileIsNew:
|
||||
// Open the file and put it into the cache.
|
||||
e, err := db.openEraFile(epoch)
|
||||
if err != nil {
|
||||
db.fileFailedToOpen(epoch, entry, err)
|
||||
} else {
|
||||
db.fileOpened(epoch, entry, e)
|
||||
}
|
||||
close(entry.opened)
|
||||
|
||||
case fileIsOpening:
|
||||
// Wait for open to finish.
|
||||
<-entry.opened
|
||||
|
||||
case fileIsCached:
|
||||
// Nothing to do.
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid file state %d", stat))
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
// getCacheEntry gets an open era file from the cache.
|
||||
func (db *Store) getCacheEntry(epoch uint64) (stat fileCacheStatus, entry *fileCacheEntry) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
if db.closing {
|
||||
return storeClosing, nil
|
||||
}
|
||||
if entry = db.opening[epoch]; entry != nil {
|
||||
stat = fileIsOpening
|
||||
} else if entry, _ = db.lru.Get(epoch); entry != nil {
|
||||
stat = fileIsCached
|
||||
} else {
|
||||
// It's a new file, create an entry in the opening table. Note the entry is
|
||||
// created with an initial refcount of one. We increment the count once more
|
||||
// before returning, but the count will return to one when the file has been
|
||||
// accessed. When the store is closed or the file gets evicted from the cache,
|
||||
// refcount will be decreased by one, thus allowing it to hit zero.
|
||||
entry = &fileCacheEntry{refcount: 1, opened: make(chan struct{})}
|
||||
db.opening[epoch] = entry
|
||||
stat = fileIsNew
|
||||
}
|
||||
entry.refcount++
|
||||
return stat, entry
|
||||
}
|
||||
|
||||
// fileOpened is called after an era file has been successfully opened.
|
||||
func (db *Store) fileOpened(epoch uint64, entry *fileCacheEntry, file *era.Era) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
delete(db.opening, epoch)
|
||||
db.cond.Signal() // db.opening was modified
|
||||
|
||||
// The database may have been closed while opening the file. When that happens, we
|
||||
// need to close the file here, since it isn't tracked by the LRU yet.
|
||||
if db.closing {
|
||||
entry.err = errClosed
|
||||
file.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Add it to the LRU. This may evict an existing item, which we have to close.
|
||||
entry.file = file
|
||||
evictedEpoch, evictedEntry, _ := db.lru.Add3(epoch, entry)
|
||||
if evictedEntry != nil {
|
||||
evictedEntry.derefAndClose(evictedEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
// fileFailedToOpen is called when an era file could not be opened.
|
||||
func (db *Store) fileFailedToOpen(epoch uint64, entry *fileCacheEntry, err error) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
delete(db.opening, epoch)
|
||||
db.cond.Signal() // db.opening was modified
|
||||
entry.err = err
|
||||
}
|
||||
|
||||
func (db *Store) openEraFile(epoch uint64) (*era.Era, error) {
|
||||
// File name scheme is <network>-<epoch>-<root>.
|
||||
glob := fmt.Sprintf("*-%05d-*.era1", epoch)
|
||||
matches, err := filepath.Glob(filepath.Join(db.datadir, glob))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return nil, fmt.Errorf("multiple era1 files found for epoch %d", epoch)
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
filename := matches[0]
|
||||
|
||||
e, err := era.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Sanity-check start block.
|
||||
if e.Start()%uint64(era.MaxEra1Size) != 0 {
|
||||
return nil, fmt.Errorf("pre-merge era1 file has invalid boundary. %d %% %d != 0", e.Start(), era.MaxEra1Size)
|
||||
}
|
||||
log.Debug("Opened era1 file", "epoch", epoch)
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// doneWithFile signals that the caller has finished using a file.
|
||||
// This decrements the refcount and ensures the file is closed by the last user.
|
||||
func (db *Store) doneWithFile(epoch uint64, entry *fileCacheEntry) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
if entry.err != nil {
|
||||
return
|
||||
}
|
||||
if entry.derefAndClose(epoch) {
|
||||
// Delete closed entry from LRU if it is still present.
|
||||
if e, _ := db.lru.Peek(epoch); e == entry {
|
||||
db.lru.Remove(epoch)
|
||||
db.cond.Signal() // db.lru was modified
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// derefAndClose decrements the reference counter and closes the file
|
||||
// when it hits zero.
|
||||
func (entry *fileCacheEntry) derefAndClose(epoch uint64) (closed bool) {
|
||||
entry.refcount--
|
||||
if entry.refcount > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
closeErr := entry.file.Close()
|
||||
if closeErr == nil {
|
||||
log.Debug("Closed era1 file", "epoch", epoch)
|
||||
} else {
|
||||
log.Warn("Error closing era1 file", "epoch", epoch, "err", closeErr)
|
||||
}
|
||||
return true
|
||||
}
|
||||
103
core/rawdb/eradb/eradb_test.go
Normal file
103
core/rawdb/eradb/eradb_test.go
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2025 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eradb
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEraDatabase(t *testing.T) {
|
||||
db, err := New("testdata")
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
r, err := db.GetRawBody(175881)
|
||||
require.NoError(t, err)
|
||||
var body *types.Body
|
||||
err = rlp.DecodeBytes(r, &body)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, body, "block body not found")
|
||||
assert.Equal(t, 3, len(body.Transactions))
|
||||
|
||||
r, err = db.GetRawReceipts(175881)
|
||||
require.NoError(t, err)
|
||||
var receipts []*types.ReceiptForStorage
|
||||
err = rlp.DecodeBytes(r, &receipts)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, receipts, "receipts not found")
|
||||
assert.Equal(t, 3, len(receipts), "receipts length mismatch")
|
||||
}
|
||||
|
||||
func TestEraDatabaseConcurrentOpen(t *testing.T) {
|
||||
db, err := New("testdata")
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
const N = 25
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(N)
|
||||
for range N {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r, err := db.GetRawBody(1024)
|
||||
if err != nil {
|
||||
t.Error("err:", err)
|
||||
}
|
||||
if len(r) == 0 {
|
||||
t.Error("empty body")
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestEraDatabaseConcurrentOpenClose(t *testing.T) {
|
||||
db, err := New("testdata")
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
const N = 10
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(N)
|
||||
for range N {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r, err := db.GetRawBody(1024)
|
||||
if err == errClosed {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Error("err:", err)
|
||||
}
|
||||
if len(r) == 0 {
|
||||
t.Error("empty body")
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
db.Close()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
BIN
core/rawdb/eradb/testdata/sepolia-00000-643a00f7.era1
vendored
Normal file
BIN
core/rawdb/eradb/testdata/sepolia-00000-643a00f7.era1
vendored
Normal file
Binary file not shown.
BIN
core/rawdb/eradb/testdata/sepolia-00021-b8814b14.era1
vendored
Normal file
BIN
core/rawdb/eradb/testdata/sepolia-00021-b8814b14.era1
vendored
Normal file
Binary file not shown.
|
|
@ -172,10 +172,7 @@ func (f *Freezer) Close() error {
|
|||
errs = append(errs, err)
|
||||
}
|
||||
})
|
||||
if errs != nil {
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// AncientDatadir returns the path of the ancient store.
|
||||
|
|
@ -183,15 +180,6 @@ func (f *Freezer) AncientDatadir() (string, error) {
|
|||
return f.datadir, nil
|
||||
}
|
||||
|
||||
// HasAncient returns an indicator whether the specified ancient data exists
|
||||
// in the freezer.
|
||||
func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
|
||||
if table := f.tables[kind]; table != nil {
|
||||
return table.has(number), nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
||||
func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
if table := f.tables[kind]; table != nil {
|
||||
|
|
|
|||
|
|
@ -45,14 +45,6 @@ func newMemoryTable(name string, config freezerTableConfig) *memoryTable {
|
|||
return &memoryTable{name: name, config: config}
|
||||
}
|
||||
|
||||
// has returns an indicator whether the specified data exists.
|
||||
func (t *memoryTable) has(number uint64) bool {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
|
||||
return number >= t.offset && number < t.items
|
||||
}
|
||||
|
||||
// retrieve retrieves multiple items in sequence, starting from the index 'start'.
|
||||
// It will return:
|
||||
// - at most 'count' items,
|
||||
|
|
@ -232,17 +224,6 @@ func NewMemoryFreezer(readonly bool, tableName map[string]freezerTableConfig) *M
|
|||
}
|
||||
}
|
||||
|
||||
// HasAncient returns an indicator whether the specified data exists.
|
||||
func (f *MemoryFreezer) HasAncient(kind string, number uint64) (bool, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
if table := f.tables[kind]; table != nil {
|
||||
return table.has(number), nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ancient retrieves an ancient binary blob from the in-memory freezer.
|
||||
func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
f.lock.RLock()
|
||||
|
|
|
|||
|
|
@ -105,15 +105,6 @@ func (f *resettableFreezer) Close() error {
|
|||
return f.freezer.Close()
|
||||
}
|
||||
|
||||
// HasAncient returns an indicator whether the specified ancient data exists
|
||||
// in the freezer
|
||||
func (f *resettableFreezer) HasAncient(kind string, number uint64) (bool, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
return f.freezer.HasAncient(kind, number)
|
||||
}
|
||||
|
||||
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
||||
func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
f.lock.RLock()
|
||||
|
|
|
|||
|
|
@ -1106,12 +1106,6 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
|
|||
return output, sizes, nil
|
||||
}
|
||||
|
||||
// has returns an indicator whether the specified number data is still accessible
|
||||
// in the freezer table.
|
||||
func (t *freezerTable) has(number uint64) bool {
|
||||
return t.items.Load() > number && t.itemHidden.Load() <= number
|
||||
}
|
||||
|
||||
// size returns the total data size in the freezer table.
|
||||
func (t *freezerTable) size() (uint64, error) {
|
||||
t.lock.RLock()
|
||||
|
|
|
|||
|
|
@ -357,9 +357,6 @@ func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
|
|||
// Check at index n-1.
|
||||
if n > 0 {
|
||||
index := n - 1
|
||||
if ok, _ := f.HasAncient(kind, index); !ok {
|
||||
t.Errorf("HasAncient(%q, %d) returned false unexpectedly", kind, index)
|
||||
}
|
||||
if _, err := f.Ancient(kind, index); err != nil {
|
||||
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
|
||||
}
|
||||
|
|
@ -367,9 +364,6 @@ func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
|
|||
|
||||
// Check at index n.
|
||||
index := n
|
||||
if ok, _ := f.HasAncient(kind, index); ok {
|
||||
t.Errorf("HasAncient(%q, %d) returned true unexpectedly", kind, index)
|
||||
}
|
||||
if _, err := f.Ancient(kind, index); err == nil {
|
||||
t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index)
|
||||
} else if err != errOutOfBounds {
|
||||
|
|
|
|||
|
|
@ -50,12 +50,6 @@ func (t *table) Get(key []byte) ([]byte, error) {
|
|||
return t.db.Get(append([]byte(t.prefix), key...))
|
||||
}
|
||||
|
||||
// HasAncient is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) HasAncient(kind string, number uint64) (bool, error) {
|
||||
return t.db.HasAncient(kind, number)
|
||||
}
|
||||
|
||||
// Ancient is a noop passthrough that just forwards the request to the underlying
|
||||
// database.
|
||||
func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ func TestTxIndexer(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...)))
|
||||
|
||||
// Index the initial blocks from ancient store
|
||||
|
|
@ -235,7 +235,7 @@ func TestTxIndexerRepair(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...))
|
||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts)
|
||||
|
||||
|
|
@ -426,7 +426,7 @@ func TestTxIndexerReport(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, _ := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
encReceipts := types.EncodeBlockReceiptLists(append([]types.Receipts{{}}, receipts...))
|
||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), encReceipts)
|
||||
|
||||
|
|
|
|||
|
|
@ -128,7 +128,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||
}
|
||||
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
|
||||
|
||||
chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false)
|
||||
dbOptions := node.DatabaseOptions{
|
||||
Cache: config.DatabaseCache,
|
||||
Handles: config.DatabaseHandles,
|
||||
AncientsDirectory: config.DatabaseFreezer,
|
||||
EraDirectory: config.DatabaseEra,
|
||||
MetricsNamespace: "eth/db/chaindata/",
|
||||
}
|
||||
chainDb, err := stack.OpenDatabaseWithOptions("chaindata", dbOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func newTester(t *testing.T) *downloadTester {
|
|||
|
||||
// newTesterWithNotification creates a new downloader test mocker.
|
||||
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
||||
db, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,6 +120,7 @@ type Config struct {
|
|||
DatabaseHandles int `toml:"-"`
|
||||
DatabaseCache int
|
||||
DatabaseFreezer string
|
||||
DatabaseEra string
|
||||
|
||||
TrieCleanCache int
|
||||
TrieDirtyCache int
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
DatabaseHandles int `toml:"-"`
|
||||
DatabaseCache int
|
||||
DatabaseFreezer string
|
||||
DatabaseEra string
|
||||
TrieCleanCache int
|
||||
TrieDirtyCache int
|
||||
TrieTimeout time.Duration
|
||||
|
|
@ -77,6 +78,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
enc.DatabaseHandles = c.DatabaseHandles
|
||||
enc.DatabaseCache = c.DatabaseCache
|
||||
enc.DatabaseFreezer = c.DatabaseFreezer
|
||||
enc.DatabaseEra = c.DatabaseEra
|
||||
enc.TrieCleanCache = c.TrieCleanCache
|
||||
enc.TrieDirtyCache = c.TrieDirtyCache
|
||||
enc.TrieTimeout = c.TrieTimeout
|
||||
|
|
@ -121,6 +123,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
DatabaseHandles *int `toml:"-"`
|
||||
DatabaseCache *int
|
||||
DatabaseFreezer *string
|
||||
DatabaseEra *string
|
||||
TrieCleanCache *int
|
||||
TrieDirtyCache *int
|
||||
TrieTimeout *time.Duration
|
||||
|
|
@ -204,6 +207,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
if dec.DatabaseFreezer != nil {
|
||||
c.DatabaseFreezer = *dec.DatabaseFreezer
|
||||
}
|
||||
if dec.DatabaseEra != nil {
|
||||
c.DatabaseEra = *dec.DatabaseEra
|
||||
}
|
||||
if dec.TrieCleanCache != nil {
|
||||
c.TrieCleanCache = *dec.TrieCleanCache
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,10 +92,6 @@ type KeyValueStore interface {
|
|||
|
||||
// AncientReaderOp contains the methods required to read from immutable ancient data.
|
||||
type AncientReaderOp interface {
|
||||
// HasAncient returns an indicator whether the specified data exists in the
|
||||
// ancient store.
|
||||
HasAncient(kind string, number uint64) (bool, error)
|
||||
|
||||
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
||||
Ancient(kind string, number uint64) ([]byte, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -48,13 +48,6 @@ func (db *Database) Get(key []byte) ([]byte, error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (db *Database) HasAncient(kind string, number uint64) (bool, error) {
|
||||
if _, err := db.Ancient(kind, number); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (db *Database) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
var resp hexutil.Bytes
|
||||
err := db.remote.Call(&resp, "debug_dbAncient", kind, number)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package era
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
|
@ -126,32 +125,10 @@ func (e *Era) Close() error {
|
|||
return e.f.Close()
|
||||
}
|
||||
|
||||
// GetHeaderByNumber returns the header for the given block number.
|
||||
func (e *Era) GetHeaderByNumber(num uint64) (*types.Header, error) {
|
||||
if e.m.start > num || e.m.start+e.m.count <= num {
|
||||
return nil, errors.New("out-of-bounds")
|
||||
}
|
||||
off, err := e.readOffset(num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read and decompress header.
|
||||
r, _, err := newSnappyReader(e.s, TypeCompressedHeader, off)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var header types.Header
|
||||
if err := rlp.Decode(r, &header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &header, nil
|
||||
}
|
||||
|
||||
// GetBlockByNumber returns the block for the given block number.
|
||||
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
|
||||
if e.m.start > num || e.m.start+e.m.count <= num {
|
||||
return nil, errors.New("out-of-bounds")
|
||||
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
|
||||
}
|
||||
off, err := e.readOffset(num)
|
||||
if err != nil {
|
||||
|
|
@ -177,10 +154,30 @@ func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
|
|||
return types.NewBlockWithHeader(&header).WithBody(body), nil
|
||||
}
|
||||
|
||||
// GetReceiptsByNumber returns the receipts for the given block number.
|
||||
func (e *Era) GetReceiptsByNumber(num uint64) (types.Receipts, error) {
|
||||
// GetRawBodyByNumber returns the RLP-encoded body for the given block number.
|
||||
func (e *Era) GetRawBodyByNumber(num uint64) ([]byte, error) {
|
||||
if e.m.start > num || e.m.start+e.m.count <= num {
|
||||
return nil, errors.New("out-of-bounds")
|
||||
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
|
||||
}
|
||||
off, err := e.readOffset(num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
off, err = e.s.SkipN(off, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, _, err := newSnappyReader(e.s, TypeCompressedBody, off)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return io.ReadAll(r)
|
||||
}
|
||||
|
||||
// GetRawReceiptsByNumber returns the RLP-encoded receipts for the given block number.
|
||||
func (e *Era) GetRawReceiptsByNumber(num uint64) ([]byte, error) {
|
||||
if e.m.start > num || e.m.start+e.m.count <= num {
|
||||
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
|
||||
}
|
||||
off, err := e.readOffset(num)
|
||||
if err != nil {
|
||||
|
|
@ -193,16 +190,11 @@ func (e *Era) GetReceiptsByNumber(num uint64) (types.Receipts, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Read and decompress receipts.
|
||||
r, _, err := newSnappyReader(e.s, TypeCompressedReceipts, off)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var receipts types.Receipts
|
||||
if err := rlp.Decode(r, &receipts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return receipts, nil
|
||||
return io.ReadAll(r)
|
||||
}
|
||||
|
||||
// Accumulator reads the accumulator entry in the Era1 file.
|
||||
|
|
|
|||
|
|
@ -101,17 +101,6 @@ func TestEra1Builder(t *testing.T) {
|
|||
if !bytes.Equal(rawHeader, chain.headers[i]) {
|
||||
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], rawHeader)
|
||||
}
|
||||
header, err := e.GetHeaderByNumber(i)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading header: %v", err)
|
||||
}
|
||||
encHeader, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
t.Fatalf("error encoding header: %v", err)
|
||||
}
|
||||
if !bytes.Equal(encHeader, chain.headers[i]) {
|
||||
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], encHeader)
|
||||
}
|
||||
|
||||
// Check bodies.
|
||||
body, err := io.ReadAll(it.Body)
|
||||
|
|
@ -130,7 +119,7 @@ func TestEra1Builder(t *testing.T) {
|
|||
if !bytes.Equal(rawReceipts, chain.receipts[i]) {
|
||||
t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], rawReceipts)
|
||||
}
|
||||
receipts, err := e.GetReceiptsByNumber(i)
|
||||
receipts, err := getReceiptsByNumber(e, i)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading receipts: %v", err)
|
||||
}
|
||||
|
|
@ -179,3 +168,15 @@ func mustEncode(obj any) []byte {
|
|||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getReceiptsByNumber(e *Era, number uint64) (types.Receipts, error) {
|
||||
r, err := e.GetRawReceiptsByNumber(number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var receipts types.Receipts
|
||||
if err := rlp.DecodeBytes(r, &receipts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return receipts, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,16 +26,25 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// openOptions contains the options to apply when opening a database.
|
||||
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
|
||||
type openOptions struct {
|
||||
Type string // "leveldb" | "pebble"
|
||||
Directory string // the datadir
|
||||
AncientsDirectory string // the ancients-dir
|
||||
Namespace string // the namespace for database relevant metrics
|
||||
// DatabaseOptions contains the options to apply when opening a database.
|
||||
type DatabaseOptions struct {
|
||||
// Directory for storing chain history ("freezer").
|
||||
AncientsDirectory string
|
||||
|
||||
// The optional Era folder, which can be either a subfolder under
|
||||
// ancient/chain or a directory specified via an absolute path.
|
||||
EraDirectory string
|
||||
|
||||
MetricsNamespace string // the namespace for database relevant metrics
|
||||
Cache int // the capacity(in megabytes) of the data caching
|
||||
Handles int // number of files to be open simultaneously
|
||||
ReadOnly bool
|
||||
ReadOnly bool // if true, no writes can be performed
|
||||
}
|
||||
|
||||
type internalOpenOptions struct {
|
||||
directory string
|
||||
dbEngine string // "leveldb" | "pebble"
|
||||
DatabaseOptions
|
||||
}
|
||||
|
||||
// openDatabase opens both a disk-based key-value database such as leveldb or pebble, but also
|
||||
|
|
@ -43,15 +52,18 @@ type openOptions struct {
|
|||
// set on the provided OpenOptions.
|
||||
// The passed o.AncientDir indicates the path of root ancient directory where
|
||||
// the chain freezer can be opened.
|
||||
func openDatabase(o openOptions) (ethdb.Database, error) {
|
||||
func openDatabase(o internalOpenOptions) (ethdb.Database, error) {
|
||||
kvdb, err := openKeyValueDatabase(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(o.AncientsDirectory) == 0 {
|
||||
return kvdb, nil
|
||||
opts := rawdb.OpenOptions{
|
||||
Ancient: o.AncientsDirectory,
|
||||
Era: o.EraDirectory,
|
||||
MetricsNamespace: o.MetricsNamespace,
|
||||
ReadOnly: o.ReadOnly,
|
||||
}
|
||||
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly)
|
||||
frdb, err := rawdb.Open(kvdb, opts)
|
||||
if err != nil {
|
||||
kvdb.Close()
|
||||
return nil, err
|
||||
|
|
@ -65,33 +77,33 @@ func openDatabase(o openOptions) (ethdb.Database, error) {
|
|||
// +----------------------------------------
|
||||
// db is non-existent | pebble default | specified type
|
||||
// db is existent | from db | specified type (if compatible)
|
||||
func openKeyValueDatabase(o openOptions) (ethdb.Database, error) {
|
||||
func openKeyValueDatabase(o internalOpenOptions) (ethdb.KeyValueStore, error) {
|
||||
// Reject any unsupported database type
|
||||
if len(o.Type) != 0 && o.Type != rawdb.DBLeveldb && o.Type != rawdb.DBPebble {
|
||||
return nil, fmt.Errorf("unknown db.engine %v", o.Type)
|
||||
if len(o.dbEngine) != 0 && o.dbEngine != rawdb.DBLeveldb && o.dbEngine != rawdb.DBPebble {
|
||||
return nil, fmt.Errorf("unknown db.engine %v", o.dbEngine)
|
||||
}
|
||||
// Retrieve any pre-existing database's type and use that or the requested one
|
||||
// as long as there's no conflict between the two types
|
||||
existingDb := rawdb.PreexistingDatabase(o.Directory)
|
||||
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
|
||||
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
|
||||
existingDb := rawdb.PreexistingDatabase(o.directory)
|
||||
if len(existingDb) != 0 && len(o.dbEngine) != 0 && o.dbEngine != existingDb {
|
||||
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.dbEngine, existingDb)
|
||||
}
|
||||
if o.Type == rawdb.DBPebble || existingDb == rawdb.DBPebble {
|
||||
if o.dbEngine == rawdb.DBPebble || existingDb == rawdb.DBPebble {
|
||||
log.Info("Using pebble as the backing database")
|
||||
return newPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
return newPebbleDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
|
||||
}
|
||||
if o.Type == rawdb.DBLeveldb || existingDb == rawdb.DBLeveldb {
|
||||
if o.dbEngine == rawdb.DBLeveldb || existingDb == rawdb.DBLeveldb {
|
||||
log.Info("Using leveldb as the backing database")
|
||||
return newLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
return newLevelDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
|
||||
}
|
||||
// No pre-existing database, no user-requested one either. Default to Pebble.
|
||||
log.Info("Defaulting to pebble as the backing database")
|
||||
return newPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
return newPebbleDBDatabase(o.directory, o.Cache, o.Handles, o.MetricsNamespace, o.ReadOnly)
|
||||
}
|
||||
|
||||
// newLevelDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func newLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
func newLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.KeyValueStore, error) {
|
||||
db, err := leveldb.New(file, cache, handles, namespace, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -102,7 +114,7 @@ func newLevelDBDatabase(file string, cache int, handles int, namespace string, r
|
|||
|
||||
// newPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||
// moving immutable chain segments into cold storage.
|
||||
func newPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
func newPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.KeyValueStore, error) {
|
||||
db, err := pebble.New(file, cache, handles, namespace, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
71
node/node.go
71
node/node.go
|
|
@ -697,27 +697,27 @@ func (n *Node) EventMux() *event.TypeMux {
|
|||
}
|
||||
|
||||
// OpenDatabase opens an existing database with the given name (or creates one if no
|
||||
// previous can be found) from within the node's instance directory. If the node is
|
||||
// ephemeral, a memory database is returned.
|
||||
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
// previous can be found) from within the node's instance directory. If the node has no
|
||||
// data directory, an in-memory database is returned.
|
||||
func (n *Node) OpenDatabaseWithOptions(name string, opt DatabaseOptions) (ethdb.Database, error) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
if n.state == closedState {
|
||||
return nil, ErrNodeStopped
|
||||
}
|
||||
|
||||
var db ethdb.Database
|
||||
var err error
|
||||
if n.config.DataDir == "" {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
db, _ = rawdb.Open(memorydb.New(), rawdb.OpenOptions{
|
||||
MetricsNamespace: opt.MetricsNamespace,
|
||||
ReadOnly: opt.ReadOnly,
|
||||
})
|
||||
} else {
|
||||
db, err = openDatabase(openOptions{
|
||||
Type: n.config.DBEngine,
|
||||
Directory: n.ResolvePath(name),
|
||||
Namespace: namespace,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
ReadOnly: readonly,
|
||||
opt.AncientsDirectory = n.ResolveAncient(name, opt.AncientsDirectory)
|
||||
db, err = openDatabase(internalOpenOptions{
|
||||
directory: n.ResolvePath(name),
|
||||
dbEngine: n.config.DBEngine,
|
||||
DatabaseOptions: opt,
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
|
|
@ -726,36 +726,31 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r
|
|||
return db, err
|
||||
}
|
||||
|
||||
// OpenDatabaseWithFreezer opens an existing database with the given name (or
|
||||
// creates one if no previous can be found) from within the node's data directory,
|
||||
// also attaching a chain freezer to it that moves ancient chain data from the
|
||||
// database to immutable append-only files. If the node is an ephemeral one, a
|
||||
// memory database is returned.
|
||||
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
if n.state == closedState {
|
||||
return nil, ErrNodeStopped
|
||||
}
|
||||
var db ethdb.Database
|
||||
var err error
|
||||
if n.config.DataDir == "" {
|
||||
db, err = rawdb.NewDatabaseWithFreezer(memorydb.New(), "", namespace, readonly)
|
||||
} else {
|
||||
db, err = openDatabase(openOptions{
|
||||
Type: n.config.DBEngine,
|
||||
Directory: n.ResolvePath(name),
|
||||
AncientsDirectory: n.ResolveAncient(name, ancient),
|
||||
Namespace: namespace,
|
||||
// OpenDatabase opens an existing database with the given name (or creates one if no
|
||||
// previous can be found) from within the node's instance directory.
|
||||
// If the node has no data directory, an in-memory database is returned.
|
||||
// Deprecated: use OpenDatabaseWithOptions instead.
|
||||
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
return n.OpenDatabaseWithOptions(name, DatabaseOptions{
|
||||
MetricsNamespace: namespace,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
ReadOnly: readonly,
|
||||
})
|
||||
}
|
||||
|
||||
// OpenDatabaseWithFreezer opens an existing database with the given name (or
|
||||
// creates one if no previous can be found) from within the node's data directory.
|
||||
// If the node has no data directory, an in-memory database is returned.
|
||||
// Deprecated: use OpenDatabaseWithOptions instead.
|
||||
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
|
||||
return n.OpenDatabaseWithOptions(name, DatabaseOptions{
|
||||
AncientsDirectory: n.ResolveAncient(name, ancient),
|
||||
MetricsNamespace: namespace,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
ReadOnly: readonly,
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
db = n.wrapDatabase(db)
|
||||
}
|
||||
return db, err
|
||||
}
|
||||
|
||||
// ResolvePath returns the absolute path of a resource in the instance directory.
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ type tester struct {
|
|||
|
||||
func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int) *tester {
|
||||
var (
|
||||
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
||||
disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
|
||||
db = New(disk, &Config{
|
||||
StateHistory: historyLimit,
|
||||
TrieCleanSize: 256 * 1024,
|
||||
|
|
|
|||
Loading…
Reference in a new issue