internal/era: New EraE implementation (#32157)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run

Here is a draft for the New EraE implementation. The code follows along
with the spec listed at https://hackmd.io/pIZlxnitSciV5wUgW6W20w.

---------

Co-authored-by: shantichanal <158101918+shantichanal@users.noreply.github.com>
Co-authored-by: lightclient <lightclient@protonmail.com>
Co-authored-by: MariusVanDerWijden <m.vanderwijden@live.de>
Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
This commit is contained in:
shazam8253 2026-02-09 10:30:19 -05:00 committed by GitHub
parent c12959dc8c
commit c9b7ae422c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 2151 additions and 545 deletions

View file

@ -30,6 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/execdb"
"github.com/ethereum/go-ethereum/internal/era/onedb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params"
@ -53,7 +55,7 @@ var (
eraSizeFlag = &cli.IntFlag{
Name: "size",
Usage: "number of blocks per era",
Value: era.MaxEra1Size,
Value: era.MaxSize,
}
txsFlag = &cli.BoolFlag{
Name: "txs",
@ -131,7 +133,7 @@ func block(ctx *cli.Context) error {
return nil
}
// info prints some high-level information about the era1 file.
// info prints some high-level information about the era file.
func info(ctx *cli.Context) error {
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
if err != nil {
@ -142,33 +144,34 @@ func info(ctx *cli.Context) error {
return err
}
defer e.Close()
acc, err := e.Accumulator()
if err != nil {
return fmt.Errorf("error reading accumulator: %w", err)
var (
accHex string
tdStr string
)
if acc, err := e.Accumulator(); err == nil {
accHex = acc.Hex()
}
td, err := e.InitialTD()
if err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
if td, err := e.InitialTD(); err == nil {
tdStr = td.String()
}
info := struct {
Accumulator common.Hash `json:"accumulator"`
TotalDifficulty *big.Int `json:"totalDifficulty"`
Accumulator string `json:"accumulator,omitempty"`
TotalDifficulty string `json:"totalDifficulty,omitempty"`
StartBlock uint64 `json:"startBlock"`
Count uint64 `json:"count"`
}{
acc, td, e.Start(), e.Count(),
accHex, tdStr, e.Start(), e.Count(),
}
b, _ := json.MarshalIndent(info, "", " ")
fmt.Println(string(b))
return nil
}
// open opens an era1 file at a certain epoch.
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
var (
dir = ctx.String(dirFlag.Name)
network = ctx.String(networkFlag.Name)
)
// open opens an era file at a certain epoch.
func open(ctx *cli.Context, epoch uint64) (era.Era, error) {
dir := ctx.String(dirFlag.Name)
network := ctx.String(networkFlag.Name)
entries, err := era.ReadDir(dir, network)
if err != nil {
return nil, fmt.Errorf("error reading era dir: %w", err)
@ -176,7 +179,28 @@ func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
if epoch >= uint64(len(entries)) {
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
}
return era.Open(filepath.Join(dir, entries[epoch]))
path := filepath.Join(dir, entries[epoch])
return openByPath(path)
}
// openByPath tries to open a single file as either eraE or era1 based on extension,
// falling back to the other reader if needed.
func openByPath(path string) (era.Era, error) {
switch strings.ToLower(filepath.Ext(path)) {
case ".erae":
if e, err := execdb.Open(path); err != nil {
return nil, err
} else {
return e, nil
}
case ".era1":
if e, err := onedb.Open(path); err != nil {
return nil, err
} else {
return e, nil
}
}
return nil, fmt.Errorf("unsupported or unreadable era file: %s", path)
}
// verify checks each era1 file in a directory to ensure it is well-formed and
@ -203,18 +227,58 @@ func verify(ctx *cli.Context) error {
return fmt.Errorf("error reading %s: %w", dir, err)
}
if len(entries) != len(roots) {
return errors.New("number of era1 files should match the number of accumulator hashes")
// Build the verification list respecting the rule:
// era1: must have accumulator, always verify
// erae: verify only if accumulator exists (pre-merge)
// Build list of files to verify.
verify := make([]string, 0, len(entries))
for _, name := range entries {
path := filepath.Join(dir, name)
ext := strings.ToLower(filepath.Ext(name))
switch ext {
case ".era1":
e, err := onedb.Open(path)
if err != nil {
return fmt.Errorf("error opening era1 file %s: %w", name, err)
}
_, accErr := e.Accumulator()
e.Close()
if accErr != nil {
return fmt.Errorf("era1 file %s missing accumulator: %w", name, accErr)
}
verify = append(verify, path)
case ".erae":
e, err := execdb.Open(path)
if err != nil {
return fmt.Errorf("error opening erae file %s: %w", name, err)
}
_, accErr := e.Accumulator()
e.Close()
if accErr == nil {
verify = append(verify, path) // pre-merge only
}
default:
return fmt.Errorf("unsupported era file: %s", name)
}
}
if len(verify) != len(roots) {
return fmt.Errorf("mismatch between eras to verify (%d) and provided roots (%d)", len(verify), len(roots))
}
// Verify each epoch matches the expected root.
for i, want := range roots {
// Wrap in function so defers don't stack.
err := func() error {
name := entries[i]
e, err := era.Open(filepath.Join(dir, name))
path := verify[i]
name := filepath.Base(path)
e, err := openByPath(path)
if err != nil {
return fmt.Errorf("error opening era1 file %s: %w", name, err)
return fmt.Errorf("error opening era file %s: %w", name, err)
}
defer e.Close()
// Read accumulator and check against expected.
@ -243,7 +307,7 @@ func verify(ctx *cli.Context) error {
}
// checkAccumulator verifies the accumulator matches the data in the Era.
func checkAccumulator(e *era.Era) error {
func checkAccumulator(e era.Era) error {
var (
err error
want common.Hash
@ -257,7 +321,7 @@ func checkAccumulator(e *era.Era) error {
if td, err = e.InitialTD(); err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
}
it, err := era.NewIterator(e)
it, err := e.Iterator()
if err != nil {
return fmt.Errorf("error making era iterator: %w", err)
}
@ -290,10 +354,14 @@ func checkAccumulator(e *era.Era) error {
if rr != block.ReceiptHash() {
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
}
// Only include pre-merge blocks in accumulator calculation.
// Post-merge blocks have difficulty == 0.
if block.Difficulty().Sign() > 0 {
hashes = append(hashes, block.Hash())
td.Add(td, block.Difficulty())
tds = append(tds, new(big.Int).Set(td))
}
}
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error())
}

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
@ -43,6 +44,8 @@ import (
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/eradl"
"github.com/ethereum/go-ethereum/internal/era/execdb"
"github.com/ethereum/go-ethereum/internal/era/onedb"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@ -153,7 +156,7 @@ be gzipped.`,
Name: "import-history",
Usage: "Import an Era archive",
ArgsUsage: "<dir>",
Flags: slices.Concat([]cli.Flag{utils.TxLookupLimitFlag, utils.TransactionHistoryFlag}, utils.DatabaseFlags, utils.NetworkFlags),
Flags: slices.Concat([]cli.Flag{utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, utils.EraFormatFlag}, utils.DatabaseFlags, utils.NetworkFlags),
Description: `
The import-history command will import blocks and their corresponding receipts
from Era archives.
@ -164,7 +167,7 @@ from Era archives.
Name: "export-history",
Usage: "Export blockchain history to Era archives",
ArgsUsage: "<dir> <first> <last>",
Flags: utils.DatabaseFlags,
Flags: slices.Concat([]cli.Flag{utils.EraFormatFlag}, utils.DatabaseFlags),
Description: `
The export-history command will export blocks and their corresponding receipts
into Era archives. Eras are typically packaged in steps of 8192 blocks.
@ -516,15 +519,27 @@ func importHistory(ctx *cli.Context) error {
network = networks[0]
}
if err := utils.ImportHistory(chain, dir, network); err != nil {
var (
format = ctx.String(utils.EraFormatFlag.Name)
from func(era.ReadAtSeekCloser) (era.Era, error)
)
switch format {
case "era1", "era":
from = onedb.From
case "erae":
from = execdb.From
default:
return fmt.Errorf("unknown --era.format %q (expected 'era1' or 'erae')", format)
}
if err := utils.ImportHistory(chain, dir, network, from); err != nil {
return err
}
fmt.Printf("Import done in %v\n", time.Since(start))
return nil
}
// exportHistory exports chain history in Era archives at a specified
// directory.
// exportHistory exports chain history in Era archives at a specified directory.
func exportHistory(ctx *cli.Context) error {
if ctx.Args().Len() != 3 {
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
@ -550,10 +565,26 @@ func exportHistory(ctx *cli.Context) error {
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
}
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
if err != nil {
var (
format = ctx.String(utils.EraFormatFlag.Name)
filename func(network string, epoch int, root common.Hash) string
newBuilder func(w io.Writer) era.Builder
)
switch format {
case "era1", "era":
newBuilder = func(w io.Writer) era.Builder { return onedb.NewBuilder(w) }
filename = func(network string, epoch int, root common.Hash) string { return onedb.Filename(network, epoch, root) }
case "erae":
newBuilder = func(w io.Writer) era.Builder { return execdb.NewBuilder(w) }
filename = func(network string, epoch int, root common.Hash) string { return execdb.Filename(network, epoch, root) }
default:
return fmt.Errorf("unknown archive format %q (use 'era1' or 'erae')", format)
}
if err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), newBuilder, filename); err != nil {
utils.Fatalf("Export error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
return nil
}

View file

@ -57,6 +57,8 @@ const (
importBatchSize = 2500
)
type EraFileFormat int
// ErrImportInterrupted is returned when the user interrupts the import process.
var ErrImportInterrupted = errors.New("interrupted")
@ -250,7 +252,7 @@ func readList(filename string) ([]string, error) {
// ImportHistory imports Era1 files containing historical block information,
// starting from genesis. The assumption is held that the provided chain
// segment in Era1 file should all be canonical and verified.
func ImportHistory(chain *core.BlockChain, dir string, network string) error {
func ImportHistory(chain *core.BlockChain, dir string, network string, from func(f era.ReadAtSeekCloser) (era.Era, error)) error {
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
return errors.New("history import only supported when starting from genesis")
}
@ -263,42 +265,49 @@ func ImportHistory(chain *core.BlockChain, dir string, network string) error {
return fmt.Errorf("unable to read checksums.txt: %w", err)
}
if len(checksums) != len(entries) {
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries",
len(checksums), len(entries))
}
var (
start = time.Now()
reported = time.Now()
imported = 0
h = sha256.New()
buf = bytes.NewBuffer(nil)
scratch = bytes.NewBuffer(nil)
)
for i, filename := range entries {
for i, file := range entries {
err := func() error {
f, err := os.Open(filepath.Join(dir, filename))
path := filepath.Join(dir, file)
// validate against checksum file in directory
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("unable to open era: %w", err)
return fmt.Errorf("open %s: %w", path, err)
}
defer f.Close()
// Validate checksum.
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to recalculate checksum: %w", err)
}
if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
return fmt.Errorf("checksum %s: %w", path, err)
}
got := common.BytesToHash(h.Sum(scratch.Bytes()[:])).Hex()
want := checksums[i]
h.Reset()
buf.Reset()
scratch.Reset()
if got != want {
return fmt.Errorf("%s checksum mismatch: have %s want %s", file, got, want)
}
// Import all block data from Era1.
e, err := era.From(f)
e, err := from(f)
if err != nil {
return fmt.Errorf("error opening era: %w", err)
}
it, err := era.NewIterator(e)
it, err := e.Iterator()
if err != nil {
return fmt.Errorf("error making era reader: %w", err)
return fmt.Errorf("error creating iterator: %w", err)
}
for it.Next() {
block, err := it.Block()
if err != nil {
@ -311,26 +320,28 @@ func ImportHistory(chain *core.BlockChain, dir string, network string) error {
if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
encReceipts := types.EncodeBlockReceiptLists([]types.Receipts{receipts})
if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, math.MaxUint64); err != nil {
enc := types.EncodeBlockReceiptLists([]types.Receipts{receipts})
if _, err := chain.InsertReceiptChain([]*types.Block{block}, enc, math.MaxUint64); err != nil {
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
}
imported += 1
imported++
// Give the user some feedback that something is happening.
if time.Since(reported) >= 8*time.Second {
log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
log.Info("Importing Era files", "head", it.Number(), "imported", imported,
"elapsed", common.PrettyDuration(time.Since(start)))
imported = 0
reported = time.Now()
}
}
if err := it.Error(); err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
@ -389,7 +400,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
@ -405,7 +415,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
// ExportHistory exports blockchain history into the specified directory,
// following the Era format.
func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
func ExportHistory(bc *core.BlockChain, dir string, first, last uint64, newBuilder func(io.Writer) era.Builder, filename func(network string, epoch int, lastBlockHash common.Hash) string) error {
log.Info("Exporting blockchain history", "dir", dir)
if head := bc.CurrentBlock().Number.Uint64(); head < last {
log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
@ -418,76 +428,100 @@ func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) er
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("error creating output directory: %w", err)
}
var (
start = time.Now()
reported = time.Now()
h = sha256.New()
buf = bytes.NewBuffer(nil)
td = new(big.Int)
checksums []string
)
td := new(big.Int)
for i := uint64(0); i < first; i++ {
td.Add(td, bc.GetHeaderByNumber(i).Difficulty)
// Compute initial TD by accumulating difficulty from genesis to first-1.
// This is necessary because TD is no longer stored in the database. Only
// compute if a segment of the export is pre-merge.
b := bc.GetBlockByNumber(first)
if b == nil {
return fmt.Errorf("block #%d not found", first)
}
for i := first; i <= last; i += step {
err := func() error {
filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
f, err := os.Create(filename)
if first > 0 && b.Difficulty().Sign() != 0 {
log.Info("Computing initial total difficulty", "from", 0, "to", first-1)
for i := uint64(0); i < first; i++ {
b := bc.GetBlockByNumber(i)
if b == nil {
return fmt.Errorf("block #%d not found while computing initial TD", i)
}
td.Add(td, b.Difficulty())
}
log.Info("Initial total difficulty computed", "td", td)
}
for batch := first; batch <= last; batch += uint64(era.MaxSize) {
idx := int(batch / uint64(era.MaxSize))
tmpPath := filepath.Join(dir, filename(network, idx, common.Hash{}))
if err := func() error {
f, err := os.Create(tmpPath)
if err != nil {
return fmt.Errorf("could not create era file: %w", err)
return err
}
defer f.Close()
w := era.NewBuilder(f)
for j := uint64(0); j < step && j <= last-i; j++ {
var (
n = i + j
block = bc.GetBlockByNumber(n)
)
builder := newBuilder(f)
for j := uint64(0); j < uint64(era.MaxSize) && batch+j <= last; j++ {
n := batch + j
block := bc.GetBlockByNumber(n)
if block == nil {
return fmt.Errorf("export failed on #%d: not found", n)
return fmt.Errorf("block #%d not found", n)
}
receipts := bc.GetReceiptsByHash(block.Hash())
if receipts == nil {
return fmt.Errorf("export failed on #%d: receipts not found", n)
receipt := bc.GetReceiptsByHash(block.Hash())
if receipt == nil {
return fmt.Errorf("receipts for #%d missing", n)
}
// For pre-merge blocks, pass accumulated TD.
// For post-merge blocks (difficulty == 0), pass nil TD.
var blockTD *big.Int
if block.Difficulty().Sign() != 0 {
td.Add(td, block.Difficulty())
if err := w.Add(block, receipts, new(big.Int).Set(td)); err != nil {
blockTD = new(big.Int).Set(td)
}
if err := builder.Add(block, receipt, blockTD); err != nil {
return err
}
}
root, err := w.Finalize()
id, err := builder.Finalize()
if err != nil {
return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
return err
}
// Set correct filename with root.
os.Rename(filename, filepath.Join(dir, era.Filename(network, int(i/step), root)))
// Compute checksum of entire Era1.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return err
}
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to calculate checksum: %w", err)
}
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
h.Reset()
buf.Reset()
return nil
}()
if err != nil {
if _, err := io.Copy(h, f); err != nil {
return err
}
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
// Close before rename. It's required on Windows.
f.Close()
final := filepath.Join(dir, filename(network, idx, id))
return os.Rename(tmpPath, final)
}(); err != nil {
return err
}
if time.Since(reported) >= 8*time.Second {
log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
log.Info("export progress", "exported", batch, "elapsed", common.PrettyDuration(time.Since(start)))
reported = time.Now()
}
}
os.WriteFile(filepath.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
log.Info("Exported blockchain to", "dir", dir)
_ = os.WriteFile(filepath.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
return nil
}

View file

@ -1042,6 +1042,12 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: metrics.DefaultConfig.InfluxDBOrganization,
Category: flags.MetricsCategory,
}
// Era flags are a group of flags related to the era archive format.
EraFormatFlag = &cli.StringFlag{
Name: "era.format",
Usage: "Archive format: 'era1' or 'erae'",
}
)
var (

View file

@ -33,6 +33,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/execdb"
"github.com/ethereum/go-ethereum/internal/era/onedb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
@ -44,6 +46,16 @@ var (
)
func TestHistoryImportAndExport(t *testing.T) {
for _, tt := range []struct {
name string
builder func(io.Writer) era.Builder
filename func(network string, epoch int, root common.Hash) string
from func(f era.ReadAtSeekCloser) (era.Era, error)
}{
{"era1", onedb.NewBuilder, onedb.Filename, onedb.From},
{"erae", execdb.NewBuilder, execdb.Filename, execdb.From},
} {
t.Run(tt.name, func(t *testing.T) {
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
@ -89,7 +101,7 @@ func TestHistoryImportAndExport(t *testing.T) {
dir := t.TempDir()
// Export history to temp directory.
if err := ExportHistory(chain, dir, 0, count, step); err != nil {
if err := ExportHistory(chain, dir, 0, count, tt.builder, tt.filename); err != nil {
t.Fatalf("error exporting history: %v", err)
}
@ -118,12 +130,12 @@ func TestHistoryImportAndExport(t *testing.T) {
if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
}
e, err := era.From(f)
e, err := tt.from(f)
if err != nil {
t.Fatalf("error opening era: %v", err)
}
defer e.Close()
it, err := era.NewIterator(e)
it, err := e.Iterator()
if err != nil {
t.Fatalf("error making era reader: %v", err)
}
@ -170,10 +182,12 @@ func TestHistoryImportAndExport(t *testing.T) {
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
}
if err := ImportHistory(imported, dir, "mainnet"); err != nil {
if err := ImportHistory(imported, dir, "mainnet", tt.from); err != nil {
t.Fatalf("failed to import chain: %v", err)
}
if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
}
})
}
}

View file

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/onedb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@ -51,7 +52,7 @@ type Store struct {
type fileCacheEntry struct {
refcount int // reference count. This is protected by Store.mu!
opened chan struct{} // signals opening of file has completed
file *era.Era // the file
file *onedb.Era // the file
err error // error from opening the file
}
@ -102,7 +103,7 @@ func (db *Store) Close() {
// GetRawBody returns the raw body for a given block number.
func (db *Store) GetRawBody(number uint64) ([]byte, error) {
epoch := number / uint64(era.MaxEra1Size)
epoch := number / uint64(era.MaxSize)
entry := db.getEraByEpoch(epoch)
if entry.err != nil {
if errors.Is(entry.err, fs.ErrNotExist) {
@ -117,7 +118,7 @@ func (db *Store) GetRawBody(number uint64) ([]byte, error) {
// GetRawReceipts returns the raw receipts for a given block number.
func (db *Store) GetRawReceipts(number uint64) ([]byte, error) {
epoch := number / uint64(era.MaxEra1Size)
epoch := number / uint64(era.MaxSize)
entry := db.getEraByEpoch(epoch)
if entry.err != nil {
if errors.Is(entry.err, fs.ErrNotExist) {
@ -249,7 +250,7 @@ func (db *Store) getCacheEntry(epoch uint64) (stat fileCacheStatus, entry *fileC
}
// fileOpened is called after an era file has been successfully opened.
func (db *Store) fileOpened(epoch uint64, entry *fileCacheEntry, file *era.Era) {
func (db *Store) fileOpened(epoch uint64, entry *fileCacheEntry, file *onedb.Era) {
db.mu.Lock()
defer db.mu.Unlock()
@ -282,7 +283,7 @@ func (db *Store) fileFailedToOpen(epoch uint64, entry *fileCacheEntry, err error
entry.err = err
}
func (db *Store) openEraFile(epoch uint64) (*era.Era, error) {
func (db *Store) openEraFile(epoch uint64) (*onedb.Era, error) {
// File name scheme is <network>-<epoch>-<root>.
glob := fmt.Sprintf("*-%05d-*.era1", epoch)
matches, err := filepath.Glob(filepath.Join(db.datadir, glob))
@ -297,17 +298,17 @@ func (db *Store) openEraFile(epoch uint64) (*era.Era, error) {
}
filename := matches[0]
e, err := era.Open(filename)
e, err := onedb.Open(filename)
if err != nil {
return nil, err
}
// Sanity-check start block.
if e.Start()%uint64(era.MaxEra1Size) != 0 {
if e.Start()%uint64(era.MaxSize) != 0 {
e.Close()
return nil, fmt.Errorf("pre-merge era1 file has invalid boundary. %d %% %d != 0", e.Start(), era.MaxEra1Size)
return nil, fmt.Errorf("pre-merge era1 file has invalid boundary. %d %% %d != 0", e.Start(), era.MaxSize)
}
log.Debug("Opened era1 file", "epoch", epoch)
return e, nil
return e.(*onedb.Era), nil
}
// doneWithFile signals that the caller has finished using a file.

View file

@ -424,3 +424,33 @@ func EncodeBlockReceiptLists(receipts []Receipts) []rlp.RawValue {
}
return result
}
// SlimReceipt is a wrapper around a Receipt with RLP serialization that omits
// the Bloom field and includes the tx type. Used for era files.
type SlimReceipt Receipt
type slimReceiptRLP struct {
Type uint8
StatusEncoding []byte
CumulativeGasUsed uint64
Logs []*Log
}
// EncodeRLP implements rlp.Encoder, encoding the receipt as
// [tx-type, post-state-or-status, cumulative-gas, logs].
func (r *SlimReceipt) EncodeRLP(w io.Writer) error {
data := &slimReceiptRLP{r.Type, (*Receipt)(r).statusEncoding(), r.CumulativeGasUsed, r.Logs}
return rlp.Encode(w, data)
}
// DecodeRLP implements rlp.Decoder.
func (r *SlimReceipt) DecodeRLP(s *rlp.Stream) error {
var data slimReceiptRLP
if err := s.Decode(&data); err != nil {
return err
}
r.Type = data.Type
r.CumulativeGasUsed = data.CumulativeGasUsed
r.Logs = data.Logs
return (*Receipt)(r).setStatus(data.StatusEncoding)
}

View file

@ -512,6 +512,45 @@ func TestReceiptUnmarshalBinary(t *testing.T) {
}
}
func TestSlimReceiptEncodingDecoding(t *testing.T) {
tests := []*Receipt{
legacyReceipt,
accessListReceipt,
eip1559Receipt,
{
Type: BlobTxType,
Status: ReceiptStatusSuccessful,
CumulativeGasUsed: 100,
Logs: []*Log{},
},
}
for i, want := range tests {
enc, err := rlp.EncodeToBytes((*SlimReceipt)(want))
if err != nil {
t.Fatalf("test %d: encode error: %v", i, err)
}
got := new(SlimReceipt)
if err := rlp.DecodeBytes(enc, got); err != nil {
t.Fatalf("test %d: decode error: %v", i, err)
}
if got.Type != want.Type {
t.Errorf("test %d: Type mismatch: got %d, want %d", i, got.Type, want.Type)
}
if got.Status != want.Status {
t.Errorf("test %d: Status mismatch: got %d, want %d", i, got.Status, want.Status)
}
if !bytes.Equal(got.PostState, want.PostState) {
t.Errorf("test %d: PostState mismatch: got %x, want %x", i, got.PostState, want.PostState)
}
if got.CumulativeGasUsed != want.CumulativeGasUsed {
t.Errorf("test %d: CumulativeGasUsed mismatch: got %d, want %d", i, got.CumulativeGasUsed, want.CumulativeGasUsed)
}
if len(got.Logs) != len(want.Logs) {
t.Errorf("test %d: Logs length mismatch: got %d, want %d", i, len(got.Logs), len(want.Logs))
}
}
}
func clearComputedFieldsOnReceipts(receipts []*Receipt) []*Receipt {
r := make([]*Receipt, len(receipts))
for i, receipt := range receipts {

2
go.mod
View file

@ -44,6 +44,7 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52
github.com/klauspost/compress v1.17.8
github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
@ -126,7 +127,6 @@ require (
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect

View file

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/big"
"slices"
"github.com/ethereum/go-ethereum/common"
ssz "github.com/ferranbt/fastssz"
@ -31,8 +32,8 @@ func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, erro
if len(hashes) != len(tds) {
return common.Hash{}, errors.New("must have equal number hashes as td values")
}
if len(hashes) > MaxEra1Size {
return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxEra1Size)
if len(hashes) > MaxSize {
return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxSize)
}
hh := ssz.NewHasher()
for i := range hashes {
@ -43,7 +44,7 @@ func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, erro
}
hh.Append(root[:])
}
hh.MerkleizeWithMixin(0, uint64(len(hashes)), uint64(MaxEra1Size))
hh.MerkleizeWithMixin(0, uint64(len(hashes)), uint64(MaxSize))
return hh.HashRoot()
}
@ -69,23 +70,15 @@ func (h *headerRecord) HashTreeRoot() ([32]byte, error) {
// HashTreeRootWith ssz hashes the headerRecord object with a hasher.
func (h *headerRecord) HashTreeRootWith(hh ssz.HashWalker) (err error) {
hh.PutBytes(h.Hash[:])
td := bigToBytes32(h.TotalDifficulty)
td := BigToBytes32(h.TotalDifficulty)
hh.PutBytes(td[:])
hh.Merkleize(0)
return
}
// bigToBytes32 converts a big.Int into a little-endian 32-byte array.
func bigToBytes32(n *big.Int) (b [32]byte) {
func BigToBytes32(n *big.Int) (b [32]byte) {
n.FillBytes(b[:])
reverseOrder(b[:])
slices.Reverse(b[:])
return
}
// reverseOrder reverses the byte order of a slice.
func reverseOrder(b []byte) []byte {
for i := 0; i < 16; i++ {
b[i], b[32-i-1] = b[32-i-1], b[i]
}
return b
}

View file

@ -1,4 +1,4 @@
// Copyright 2024 The go-ethereum Authors
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@ -17,7 +17,6 @@
package era
import (
"encoding/binary"
"fmt"
"io"
"math/big"
@ -25,15 +24,12 @@ import (
"path"
"strconv"
"strings"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
)
// Type constants for the e2store entries in the Era1 and EraE formats.
var (
TypeVersion uint16 = 0x3265
TypeCompressedHeader uint16 = 0x03
@ -41,277 +37,119 @@ var (
TypeCompressedReceipts uint16 = 0x05
TypeTotalDifficulty uint16 = 0x06
TypeAccumulator uint16 = 0x07
TypeCompressedSlimReceipts uint16 = 0x08 // uses eth/69 encoding
TypeProof uint16 = 0x09
TypeBlockIndex uint16 = 0x3266
TypeComponentIndex uint16 = 0x3267
MaxEra1Size = 8192
MaxSize = 8192
// headerSize uint64 = 8
)
// Filename returns a recognizable Era1-formatted file name for the specified
// epoch and network.
func Filename(network string, epoch int, root common.Hash) string {
return fmt.Sprintf("%s-%05d-%s.era1", network, epoch, root.Hex()[2:10])
}
// ReadDir reads all the era1 files in a directory for a given network.
// Format: <network>-<epoch>-<hexroot>.era1
func ReadDir(dir, network string) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("error reading directory %s: %w", dir, err)
}
var (
next = uint64(0)
eras []string
)
for _, entry := range entries {
if path.Ext(entry.Name()) != ".era1" {
continue
}
parts := strings.Split(entry.Name(), "-")
if len(parts) != 3 || parts[0] != network {
// Invalid era1 filename, skip.
continue
}
if epoch, err := strconv.ParseUint(parts[1], 10, 64); err != nil {
return nil, fmt.Errorf("malformed era1 filename: %s", entry.Name())
} else if epoch != next {
return nil, fmt.Errorf("missing epoch %d", next)
}
next += 1
eras = append(eras, entry.Name())
}
return eras, nil
}
type ReadAtSeekCloser interface {
io.ReaderAt
io.Seeker
io.Closer
}
// Era reads and Era1 file.
type Era struct {
f ReadAtSeekCloser // backing era1 file
s *e2store.Reader // e2store reader over f
m metadata // start, count, length info
mu *sync.Mutex // lock for buf
buf [8]byte // buffer reading entry offsets
// Iterator provides sequential access to blocks in an era file.
type Iterator interface {
// Next advances to the next block. Returns true if a block is available,
// false when iteration is complete or an error occurred.
Next() bool
// Number returns the block number of the current block.
Number() uint64
// Block returns the current block.
Block() (*types.Block, error)
// BlockAndReceipts returns the current block and its receipts.
BlockAndReceipts() (*types.Block, types.Receipts, error)
// Receipts returns the receipts for the current block.
Receipts() (types.Receipts, error)
// Error returns any error encountered during iteration.
Error() error
}
// From returns an Era backed by f.
func From(f ReadAtSeekCloser) (*Era, error) {
m, err := readMetadata(f)
if err != nil {
return nil, err
}
return &Era{
f: f,
s: e2store.NewReader(f),
m: m,
mu: new(sync.Mutex),
}, nil
// Builder constructs era files from blocks and receipts.
//
// Builders handle three epoch types automatically:
// - Pre-merge: all blocks have difficulty > 0, TD is stored for each block
// - Transition: starts pre-merge, ends post-merge; TD stored for all blocks
// - Post-merge: all blocks have difficulty == 0, no TD stored
type Builder interface {
// Add appends a block and its receipts to the era file.
// For pre-merge blocks, td must be provided.
// For post-merge blocks, td should be nil.
Add(block *types.Block, receipts types.Receipts, td *big.Int) error
// AddRLP appends RLP-encoded block components to the era file.
// For pre-merge blocks, td and difficulty must be provided.
// For post-merge blocks, td and difficulty should be nil.
AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error
// Finalize writes all collected entries and returns the epoch identifier.
// For Era1 (onedb): returns the accumulator root.
// For EraE (execdb): returns the last block hash.
Finalize() (common.Hash, error)
// Accumulator returns the accumulator root after Finalize has been called.
// Returns nil for post-merge epochs where no accumulator exists.
Accumulator() *common.Hash
}
// Open returns an Era backed by the given filename.
func Open(filename string) (*Era, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
return From(f)
// Era represents the interface for reading era data.
type Era interface {
Close() error
Start() uint64
Count() uint64
Iterator() (Iterator, error)
GetBlockByNumber(num uint64) (*types.Block, error)
GetRawBodyByNumber(num uint64) ([]byte, error)
GetRawReceiptsByNumber(num uint64) ([]byte, error)
InitialTD() (*big.Int, error)
Accumulator() (common.Hash, error)
}
func (e *Era) Close() error {
return e.f.Close()
}
// ReadDir reads all the era files in a directory for a given network.
// Format: <network>-<epoch>-<hexroot>.erae or <network>-<epoch>-<hexroot>.era1
func ReadDir(dir, network string) ([]string, error) {
entries, err := os.ReadDir(dir)
// GetBlockByNumber returns the block for the given block number.
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
return nil, fmt.Errorf("error reading directory %s: %w", dir, err)
}
r, n, err := newSnappyReader(e.s, TypeCompressedHeader, off)
if err != nil {
return nil, err
}
var header types.Header
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
off += n
r, _, err = newSnappyReader(e.s, TypeCompressedBody, off)
if err != nil {
return nil, err
}
var body types.Body
if err := rlp.Decode(r, &body); err != nil {
return nil, err
}
return types.NewBlockWithHeader(&header).WithBody(body), nil
}
// GetRawBodyByNumber returns the RLP-encoded body for the given block number.
func (e *Era) GetRawBodyByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
off, err = e.s.SkipN(off, 1)
if err != nil {
return nil, err
}
r, _, err := newSnappyReader(e.s, TypeCompressedBody, off)
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
// GetRawReceiptsByNumber returns the RLP-encoded receipts for the given block number.
func (e *Era) GetRawReceiptsByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
// Skip over header and body.
off, err = e.s.SkipN(off, 2)
if err != nil {
return nil, err
}
r, _, err := newSnappyReader(e.s, TypeCompressedReceipts, off)
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
// Accumulator reads the accumulator entry in the Era1 file.
func (e *Era) Accumulator() (common.Hash, error) {
entry, err := e.s.Find(TypeAccumulator)
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(entry.Value), nil
}
// InitialTD returns initial total difficulty before the difficulty of the
// first block of the Era1 is applied.
func (e *Era) InitialTD() (*big.Int, error) {
var (
r io.Reader
header types.Header
rawTd []byte
n int64
off int64
err error
next = uint64(0)
eras []string
dirType string
)
// Read first header.
if off, err = e.readOffset(e.m.start); err != nil {
return nil, err
for _, entry := range entries {
ext := path.Ext(entry.Name())
if ext != ".erae" && ext != ".era1" {
continue
}
if r, n, err = newSnappyReader(e.s, TypeCompressedHeader, off); err != nil {
return nil, err
if dirType == "" {
dirType = ext
}
if err := rlp.Decode(r, &header); err != nil {
return nil, err
parts := strings.Split(entry.Name(), "-")
if len(parts) != 3 || parts[0] != network {
// Invalid era filename, skip.
continue
}
off += n
// Skip over header and body.
off, err = e.s.SkipN(off, 2)
if err != nil {
return nil, err
if epoch, err := strconv.ParseUint(parts[1], 10, 64); err != nil {
return nil, fmt.Errorf("malformed era filenames: %s", entry.Name())
} else if epoch != next {
return nil, fmt.Errorf("missing epoch %d", next)
}
// Read total difficulty after first block.
if r, _, err = e.s.ReaderAt(TypeTotalDifficulty, off); err != nil {
return nil, err
if dirType != ext {
return nil, fmt.Errorf("directory %s contains mixed era file formats: want %s, have %s", dir, dirType, ext)
}
rawTd, err = io.ReadAll(r)
if err != nil {
return nil, err
next += 1
eras = append(eras, entry.Name())
}
td := new(big.Int).SetBytes(reverseOrder(rawTd))
return td.Sub(td, header.Difficulty), nil
}
// Start returns the listed start block.
func (e *Era) Start() uint64 {
return e.m.start
}
// Count returns the total number of blocks in the Era1.
func (e *Era) Count() uint64 {
return e.m.count
}
// readOffset reads a specific block's offset from the block index. The value n
// is the absolute block number desired.
func (e *Era) readOffset(n uint64) (int64, error) {
var (
blockIndexRecordOffset = e.m.length - 24 - int64(e.m.count)*8 // skips start, count, and header
firstIndex = blockIndexRecordOffset + 16 // first index after header / start-num
indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
offOffset = firstIndex + indexOffset // offset of block offset
)
e.mu.Lock()
defer e.mu.Unlock()
clear(e.buf[:])
if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
return 0, err
}
// Since the block offset is relative from the start of the block index record
// we need to add the record offset to it's offset to get the block's absolute
// offset.
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
}
// newSnappyReader returns a snappy.Reader for the e2store entry value at off.
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
r, n, err := e.ReaderAt(expectedType, off)
if err != nil {
return nil, 0, err
}
return snappy.NewReader(r), int64(n), err
}
// metadata wraps the metadata in the block index.
type metadata struct {
start uint64
count uint64
length int64
}
// readMetadata reads the metadata stored in an Era1 file's block index.
func readMetadata(f ReadAtSeekCloser) (m metadata, err error) {
// Determine length of reader.
if m.length, err = f.Seek(0, io.SeekEnd); err != nil {
return
}
b := make([]byte, 16)
// Read count. It's the last 8 bytes of the file.
if _, err = f.ReadAt(b[:8], m.length-8); err != nil {
return
}
m.count = binary.LittleEndian.Uint64(b)
// Read start. It's at the offset -sizeof(m.count) -
// count*sizeof(indexEntry) - sizeof(m.start)
if _, err = f.ReadAt(b[8:], m.length-16-int64(m.count*8)); err != nil {
return
}
m.start = binary.LittleEndian.Uint64(b[8:])
return
return eras, nil
}

View file

@ -86,8 +86,8 @@ func (l *Loader) DownloadAll(destDir string) error {
// DownloadBlockRange fetches the era1 files for the given block range.
func (l *Loader) DownloadBlockRange(start, end uint64, destDir string) error {
startEpoch := start / uint64(era.MaxEra1Size)
endEpoch := end / uint64(era.MaxEra1Size)
startEpoch := start / uint64(era.MaxSize)
endEpoch := end / uint64(era.MaxSize)
return l.DownloadEpochRange(startEpoch, endEpoch, destDir)
}

View file

@ -0,0 +1,332 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package execdb
// EraE file format specification.
//
// The format can be summarized with the following expression:
//
// eraE := Version | CompressedHeader* | CompressedBody* | CompressedSlimReceipts* | TotalDifficulty* | other-entries* | Accumulator? | ComponentIndex
//
// Each basic element is its own e2store entry:
//
// Version = { type: 0x3265, data: nil }
// CompressedHeader = { type: 0x03, data: snappyFramed(rlp(header)) }
// CompressedBody = { type: 0x04, data: snappyFramed(rlp(body)) }
// CompressedSlimReceipts = { type: 0x08, data: snappyFramed(rlp([tx-type, post-state-or-status, cumulative-gas, logs])) }
// TotalDifficulty = { type: 0x06, data: uint256 (header.total_difficulty) }
// AccumulatorRoot = { type: 0x07, data: hash_tree_root(List(HeaderRecord, 8192)) }
// ComponentIndex = { type: 0x3267, data: component-index }
//
// Notes:
// - TotalDifficulty is present for pre-merge and merge transition epochs.
// For pure post-merge epochs, TotalDifficulty is omitted entirely.
// - In merge transition epochs, post-merge blocks store the final total
// difficulty (the TD at which the merge occurred).
// - AccumulatorRoot is only written for pre-merge epochs.
// - HeaderRecord is defined in the Portal Network specification.
// - Proofs (type 0x09) are defined in the spec but not yet supported in this implementation.
//
// ComponentIndex stores relative offsets to each block's components:
//
// component-index := starting-number | indexes | indexes | ... | component-count | count
// indexes := header-offset | body-offset | receipts-offset | td-offset?
//
// All values are little-endian uint64.
//
// Due to the accumulator size limit of 8192, the maximum number of blocks in an
// EraE file is also 8192.
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
)
// Builder is used to build an EraE e2store file. It collects block entries and
// writes them to the underlying e2store.Writer.
type Builder struct {
w *e2store.Writer
headers [][]byte
hashes []common.Hash // only pre-merge block hashes, for accumulator
bodies [][]byte
receipts [][]byte
tds []*big.Int
startNum *uint64
ttd *big.Int // terminal total difficulty
last common.Hash // hash of last block added
accumulator *common.Hash // accumulator root, set by Finalize (nil for post-merge)
written uint64
buf *bytes.Buffer
snappy *snappy.Writer
}
// NewBuilder returns a new Builder instance.
func NewBuilder(w io.Writer) era.Builder {
return &Builder{
w: e2store.NewWriter(w),
}
}
// Add writes a block entry and its receipts into the e2store file.
func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int) error {
eh, err := rlp.EncodeToBytes(block.Header())
if err != nil {
return fmt.Errorf("encode header: %w", err)
}
eb, err := rlp.EncodeToBytes(block.Body())
if err != nil {
return fmt.Errorf("encode body: %w", err)
}
rs := make([]*types.SlimReceipt, len(receipts))
for i, receipt := range receipts {
rs[i] = (*types.SlimReceipt)(receipt)
}
er, err := rlp.EncodeToBytes(rs)
if err != nil {
return fmt.Errorf("encode receipts: %w", err)
}
return b.AddRLP(eh, eb, er, block.Number().Uint64(), block.Hash(), td, block.Difficulty())
}
// AddRLP takes the RLP encoded block components and writes them to the underlying e2store file.
// The builder automatically handles transition epochs where both pre and post-merge blocks exist.
func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, blockHash common.Hash, td, difficulty *big.Int) error {
if len(b.headers) >= era.MaxSize {
return fmt.Errorf("exceeds max size %d", era.MaxSize)
}
// Set starting block number on first add.
if b.startNum == nil {
b.startNum = new(uint64)
*b.startNum = number
}
if difficulty == nil {
return fmt.Errorf("invalid block: difficulty is nil")
}
hasDifficulty := difficulty.Sign() > 0
// Expect td to be nil for post-merge blocks
// and non-nil for pre-merge blocks.
if hasDifficulty != (td != nil) {
return fmt.Errorf("TD and difficulty mismatch: expected both nil or both non-nil")
}
// After the merge, difficulty must be nil.
post := (b.tds == nil && len(b.headers) > 0) || b.ttd != nil
if post && hasDifficulty {
return fmt.Errorf("post-merge epoch: cannot accept total difficulty for block %d", number)
}
// If this marks the start of the transition, record final total
// difficulty value.
if b.ttd == nil && len(b.tds) > 0 && !hasDifficulty {
b.ttd = new(big.Int).Set(b.tds[len(b.tds)-1])
}
// Record block data.
b.headers = append(b.headers, header)
b.bodies = append(b.bodies, body)
b.receipts = append(b.receipts, receipts)
b.last = blockHash
// Conditionally write the total difficulty and block hashes.
// - Pre-merge: store total difficulty and block hashes.
// - Transition: only store total difficulty.
// - Post-merge: store neither.
if hasDifficulty {
b.hashes = append(b.hashes, blockHash)
b.tds = append(b.tds, new(big.Int).Set(td))
} else if b.ttd != nil {
b.tds = append(b.tds, new(big.Int).Set(b.ttd))
} else {
// Post-merge: no TD or block hashes stored.
}
return nil
}
// Accumulator returns the accumulator root after Finalize has been called.
// Returns nil for post-merge epochs where no accumulator exists.
func (b *Builder) Accumulator() *common.Hash {
return b.accumulator
}
type offsets struct {
headers []uint64
bodies []uint64
receipts []uint64
tds []uint64
}
// Finalize writes all collected block entries to the e2store file.
// For pre-merge or transition epochs, the accumulator root is computed over
// pre-merge blocks and written. For pure post-merge epochs, no accumulator
// is written. Always returns the last block hash as the epoch identifier.
func (b *Builder) Finalize() (common.Hash, error) {
if b.startNum == nil {
return common.Hash{}, errors.New("no blocks added, cannot finalize")
}
// Write version before writing any blocks.
if n, err := b.w.Write(era.TypeVersion, nil); err != nil {
return common.Hash{}, fmt.Errorf("write version entry: %w", err)
} else {
b.written += uint64(n)
}
// Convert TD values to byte-level LE representation.
var tds [][]byte
for _, td := range b.tds {
tds = append(tds, uint256LE(td))
}
// Create snappy writer.
b.buf = bytes.NewBuffer(nil)
b.snappy = snappy.NewBufferedWriter(b.buf)
var o offsets
for _, section := range []struct {
typ uint16
data [][]byte
compressed bool
offsets *[]uint64
}{
{era.TypeCompressedHeader, b.headers, true, &o.headers},
{era.TypeCompressedBody, b.bodies, true, &o.bodies},
{era.TypeCompressedSlimReceipts, b.receipts, true, &o.receipts},
{era.TypeTotalDifficulty, tds, false, &o.tds},
} {
for _, data := range section.data {
*section.offsets = append(*section.offsets, b.written)
if section.compressed {
// Write snappy compressed data.
if err := b.snappyWrite(section.typ, data); err != nil {
return common.Hash{}, err
}
} else {
// Directly write uncompressed data.
n, err := b.w.Write(section.typ, data)
if err != nil {
return common.Hash{}, err
}
b.written += uint64(n)
}
}
}
// Compute and write accumulator root only for epochs that started pre-merge.
// The accumulator is computed over only the pre-merge blocks (b.hashes).
// Pure post-merge epochs have no accumulator.
if len(b.tds) > 0 {
accRoot, err := era.ComputeAccumulator(b.hashes, b.tds[:len(b.hashes)])
if err != nil {
return common.Hash{}, fmt.Errorf("compute accumulator: %w", err)
}
if n, err := b.w.Write(era.TypeAccumulator, accRoot[:]); err != nil {
return common.Hash{}, fmt.Errorf("write accumulator: %w", err)
} else {
b.written += uint64(n)
}
b.accumulator = &accRoot
if err := b.writeIndex(&o); err != nil {
return common.Hash{}, err
}
return b.last, nil
}
// Pure post-merge epoch: no accumulator.
if err := b.writeIndex(&o); err != nil {
return common.Hash{}, err
}
return b.last, nil
}
// uin256LE writes 32 byte big integers to little endian.
func uint256LE(v *big.Int) []byte {
b := v.FillBytes(make([]byte, 32))
for i := 0; i < 16; i++ {
b[i], b[31-i] = b[31-i], b[i]
}
return b
}
// SnappyWrite compresses the input data using snappy and writes it to the e2store file.
func (b *Builder) snappyWrite(typ uint16, in []byte) error {
b.buf.Reset()
b.snappy.Reset(b.buf)
if _, err := b.snappy.Write(in); err != nil {
return fmt.Errorf("error snappy encoding: %w", err)
}
if err := b.snappy.Flush(); err != nil {
return fmt.Errorf("error flushing snappy encoding: %w", err)
}
n, err := b.w.Write(typ, b.buf.Bytes())
b.written += uint64(n)
if err != nil {
return fmt.Errorf("error writing e2store entry: %w", err)
}
return nil
}
// writeIndex writes the component index to the file.
func (b *Builder) writeIndex(o *offsets) error {
count := len(o.headers)
// Post-merge, we only index headers, bodies, and receipts. Pre-merge, we also
// need to index the total difficulties.
componentCount := 3
if len(o.tds) > 0 {
componentCount++
}
// Offsets are stored relative to the index position (negative, stored as uint64).
base := int64(b.written)
rel := func(abs uint64) uint64 { return uint64(int64(abs) - base) }
var buf bytes.Buffer
write := func(v uint64) { binary.Write(&buf, binary.LittleEndian, v) }
write(*b.startNum)
for i := range o.headers {
write(rel(o.headers[i]))
write(rel(o.bodies[i]))
write(rel(o.receipts[i]))
if len(o.tds) > 0 {
write(rel(o.tds[i]))
}
}
write(uint64(componentCount))
write(uint64(count))
n, err := b.w.Write(era.TypeComponentIndex, buf.Bytes())
b.written += uint64(n)
return err
}

View file

@ -0,0 +1,348 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package execdb
import (
"bytes"
"fmt"
"io"
"math/big"
"os"
"slices"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
func TestEraE(t *testing.T) {
t.Parallel()
tests := []struct {
name string
start uint64
preMerge int
postMerge int
accumulator bool // whether accumulator should exist
}{
{
name: "pre-merge",
start: 0,
preMerge: 128,
postMerge: 0,
accumulator: true,
},
{
name: "post-merge",
start: 0,
preMerge: 0,
postMerge: 64,
accumulator: false,
},
{
name: "transition",
start: 0,
preMerge: 32,
postMerge: 32,
accumulator: true,
},
{
name: "non-zero-start",
start: 8192,
preMerge: 64,
postMerge: 0,
accumulator: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
f, err := os.CreateTemp(t.TempDir(), "erae-test")
if err != nil {
t.Fatalf("error creating temp file: %v", err)
}
defer f.Close()
// Build test data.
type blockData struct {
header, body, receipts []byte
hash common.Hash
td *big.Int
difficulty *big.Int
}
var (
builder = NewBuilder(f)
blocks []blockData
totalBlocks = tt.preMerge + tt.postMerge
finalTD = big.NewInt(int64(tt.preMerge))
)
// Add pre-merge blocks.
for i := 0; i < tt.preMerge; i++ {
num := tt.start + uint64(i)
blk := blockData{
header: mustEncode(&types.Header{Number: big.NewInt(int64(num)), Difficulty: big.NewInt(1)}),
body: mustEncode(&types.Body{Transactions: []*types.Transaction{types.NewTransaction(0, common.Address{byte(i)}, nil, 0, nil, nil)}}),
receipts: mustEncode([]types.SlimReceipt{{CumulativeGasUsed: uint64(i)}}),
hash: common.Hash{byte(i)},
td: big.NewInt(int64(i + 1)),
difficulty: big.NewInt(1),
}
blocks = append(blocks, blk)
if err := builder.AddRLP(blk.header, blk.body, blk.receipts, num, blk.hash, blk.td, blk.difficulty); err != nil {
t.Fatalf("error adding pre-merge block %d: %v", i, err)
}
}
// Add post-merge blocks.
for i := 0; i < tt.postMerge; i++ {
idx := tt.preMerge + i
num := tt.start + uint64(idx)
blk := blockData{
header: mustEncode(&types.Header{Number: big.NewInt(int64(num)), Difficulty: big.NewInt(0)}),
body: mustEncode(&types.Body{}),
receipts: mustEncode([]types.SlimReceipt{}),
hash: common.Hash{byte(idx)},
difficulty: big.NewInt(0),
}
blocks = append(blocks, blk)
if err := builder.AddRLP(blk.header, blk.body, blk.receipts, num, blk.hash, nil, big.NewInt(0)); err != nil {
t.Fatalf("error adding post-merge block %d: %v", idx, err)
}
}
// Finalize and check return values.
epochID, err := builder.Finalize()
if err != nil {
t.Fatalf("error finalizing: %v", err)
}
// Verify epoch ID is always the last block hash.
expectedLastHash := blocks[len(blocks)-1].hash
if epochID != expectedLastHash {
t.Fatalf("wrong epoch ID: want %s, got %s", expectedLastHash.Hex(), epochID.Hex())
}
// Verify accumulator presence.
if tt.accumulator {
if builder.Accumulator() == nil {
t.Fatal("expected non-nil accumulator")
}
} else {
if builder.Accumulator() != nil {
t.Fatalf("expected nil accumulator, got %s", builder.Accumulator().Hex())
}
}
// Open and verify the era file.
e, err := Open(f.Name())
if err != nil {
t.Fatalf("failed to open era: %v", err)
}
defer e.Close()
// Verify metadata.
if e.Start() != tt.start {
t.Fatalf("wrong start block: want %d, got %d", tt.start, e.Start())
}
if e.Count() != uint64(totalBlocks) {
t.Fatalf("wrong block count: want %d, got %d", totalBlocks, e.Count())
}
// Verify accumulator in file.
if tt.accumulator {
accRoot, err := e.Accumulator()
if err != nil {
t.Fatalf("error getting accumulator: %v", err)
}
if accRoot != *builder.Accumulator() {
t.Fatalf("accumulator mismatch: builder has %s, file contains %s",
builder.Accumulator().Hex(), accRoot.Hex())
}
} else {
if _, err := e.Accumulator(); err == nil {
t.Fatal("expected error when reading accumulator from post-merge epoch")
}
}
// Verify blocks via raw iterator.
it, err := NewRawIterator(e)
if err != nil {
t.Fatalf("failed to make iterator: %v", err)
}
for i := 0; i < totalBlocks; i++ {
if !it.Next() {
t.Fatalf("expected more entries at %d", i)
}
if it.Error() != nil {
t.Fatalf("unexpected error: %v", it.Error())
}
// Check header.
rawHeader, err := io.ReadAll(it.Header)
if err != nil {
t.Fatalf("error reading header: %v", err)
}
if !bytes.Equal(rawHeader, blocks[i].header) {
t.Fatalf("mismatched header at %d", i)
}
// Check body.
rawBody, err := io.ReadAll(it.Body)
if err != nil {
t.Fatalf("error reading body: %v", err)
}
if !bytes.Equal(rawBody, blocks[i].body) {
t.Fatalf("mismatched body at %d", i)
}
// Check receipts.
rawReceipts, err := io.ReadAll(it.Receipts)
if err != nil {
t.Fatalf("error reading receipts: %v", err)
}
if !bytes.Equal(rawReceipts, blocks[i].receipts) {
t.Fatalf("mismatched receipts at %d", i)
}
// Check TD (only for epochs that have TD stored).
if tt.preMerge > 0 && it.TotalDifficulty != nil {
rawTd, err := io.ReadAll(it.TotalDifficulty)
if err != nil {
t.Fatalf("error reading TD: %v", err)
}
slices.Reverse(rawTd)
td := new(big.Int).SetBytes(rawTd)
var expectedTD *big.Int
if i < tt.preMerge {
expectedTD = blocks[i].td
} else {
// Post-merge blocks in transition epoch use final TD.
expectedTD = finalTD
}
if td.Cmp(expectedTD) != 0 {
t.Fatalf("mismatched TD at %d: want %s, got %s", i, expectedTD, td)
}
}
}
// Verify random access.
for _, blockNum := range []uint64{tt.start, tt.start + uint64(totalBlocks) - 1} {
blk, err := e.GetBlockByNumber(blockNum)
if err != nil {
t.Fatalf("error getting block %d: %v", blockNum, err)
}
if blk.Number().Uint64() != blockNum {
t.Fatalf("wrong block number: want %d, got %d", blockNum, blk.Number().Uint64())
}
}
// Verify out-of-range access fails.
if _, err := e.GetBlockByNumber(tt.start + uint64(totalBlocks)); err == nil {
t.Fatal("expected error for out-of-range block")
}
if tt.start > 0 {
if _, err := e.GetBlockByNumber(tt.start - 1); err == nil {
t.Fatal("expected error for block before start")
}
}
// Verify high-level iterator.
hlIt, err := e.Iterator()
if err != nil {
t.Fatalf("failed to create iterator: %v", err)
}
count := 0
for hlIt.Next() {
blk, err := hlIt.Block()
if err != nil {
t.Fatalf("error getting block: %v", err)
}
if blk.Number().Uint64() != tt.start+uint64(count) {
t.Fatalf("wrong block number: want %d, got %d", tt.start+uint64(count), blk.Number().Uint64())
}
count++
}
if hlIt.Error() != nil {
t.Fatalf("iterator error: %v", hlIt.Error())
}
if count != totalBlocks {
t.Fatalf("wrong iteration count: want %d, got %d", totalBlocks, count)
}
})
}
}
// TestInitialTD tests the InitialTD calculation separately since it requires
// specific TD/difficulty values.
func TestInitialTD(t *testing.T) {
t.Parallel()
f, err := os.CreateTemp(t.TempDir(), "erae-initial-td-test")
if err != nil {
t.Fatalf("error creating temp file: %v", err)
}
defer f.Close()
builder := NewBuilder(f)
// First block: difficulty=5, TD=10, so initial TD = 10-5 = 5.
header := mustEncode(&types.Header{Number: big.NewInt(0), Difficulty: big.NewInt(5)})
body := mustEncode(&types.Body{})
receipts := mustEncode([]types.SlimReceipt{})
if err := builder.AddRLP(header, body, receipts, 0, common.Hash{0}, big.NewInt(10), big.NewInt(5)); err != nil {
t.Fatalf("error adding block: %v", err)
}
// Second block: difficulty=3, TD=13.
header2 := mustEncode(&types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(3)})
if err := builder.AddRLP(header2, body, receipts, 1, common.Hash{1}, big.NewInt(13), big.NewInt(3)); err != nil {
t.Fatalf("error adding block: %v", err)
}
if _, err := builder.Finalize(); err != nil {
t.Fatalf("error finalizing: %v", err)
}
e, err := Open(f.Name())
if err != nil {
t.Fatalf("failed to open era: %v", err)
}
defer e.Close()
initialTD, err := e.InitialTD()
if err != nil {
t.Fatalf("error getting initial TD: %v", err)
}
// Initial TD should be TD[0] - Difficulty[0] = 10 - 5 = 5.
if initialTD.Cmp(big.NewInt(5)) != 0 {
t.Fatalf("wrong initial TD: want 5, got %s", initialTD)
}
}
func mustEncode(obj any) []byte {
b, err := rlp.EncodeToBytes(obj)
if err != nil {
panic(fmt.Sprintf("failed to encode obj: %v", err))
}
return b
}

View file

@ -0,0 +1,240 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package execdb
import (
"errors"
"io"
"math/big"
"slices"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/klauspost/compress/snappy"
)
type Iterator struct {
inner *RawIterator
block *types.Block // cache for decoded block
}
// NewIterator returns a header/body/receipt iterator over the archive.
// Call Next immediately to position on the first block.
func NewIterator(e era.Era) (era.Iterator, error) {
inner, err := NewRawIterator(e.(*Era))
if err != nil {
return nil, err
}
return &Iterator{inner: inner}, nil
}
// Next advances to the next block entry.
func (it *Iterator) Next() bool {
it.block = nil
return it.inner.Next()
}
// Number is the number of the block currently loaded.
func (it *Iterator) Number() uint64 { return it.inner.next - 1 }
// Error returns any iteration error (EOF is reported as nil, identical
// to the Era1 iterator behaviour).
func (it *Iterator) Error() error { return it.inner.Error() }
// Block decodes the current header+body into a *types.Block.
func (it *Iterator) Block() (*types.Block, error) {
if it.block != nil {
return it.block, nil
}
if it.inner.Header == nil || it.inner.Body == nil {
return nil, errors.New("header and body must be nonnil")
}
var (
h types.Header
b types.Body
)
if err := rlp.Decode(it.inner.Header, &h); err != nil {
return nil, err
}
if err := rlp.Decode(it.inner.Body, &b); err != nil {
return nil, err
}
it.block = types.NewBlockWithHeader(&h).WithBody(b)
return it.block, nil
}
// Receipts decodes receipts for the current block.
func (it *Iterator) Receipts() (types.Receipts, error) {
block, err := it.Block()
if err != nil {
return nil, err
}
if it.inner.Receipts == nil {
return nil, errors.New("receipts must be nonnil")
}
var rs []*types.SlimReceipt
if err := rlp.Decode(it.inner.Receipts, &rs); err != nil {
return nil, err
}
if len(rs) != len(block.Transactions()) {
return nil, errors.New("number of txs does not match receipts")
}
receipts := make([]*types.Receipt, len(rs))
for i, receipt := range rs {
receipts[i] = (*types.Receipt)(receipt)
receipts[i].Bloom = types.CreateBloom(receipts[i])
}
return receipts, nil
}
// BlockAndReceipts is a convenience wrapper.
func (it *Iterator) BlockAndReceipts() (*types.Block, types.Receipts, error) {
b, err := it.Block()
if err != nil {
return nil, nil, err
}
r, err := it.Receipts()
if err != nil {
return nil, nil, err
}
return b, r, nil
}
// TotalDifficulty returns the TD at the current position (if present).
func (it *Iterator) TotalDifficulty() (*big.Int, error) {
if it.inner.TotalDifficulty == nil {
return nil, errors.New("totaldifficulty stream is nil")
}
tdBytes, err := io.ReadAll(it.inner.TotalDifficulty)
if err != nil {
return nil, err
}
slices.Reverse(tdBytes)
return new(big.Int).SetBytes(tdBytes), nil
}
// -----------------------------------------------------------------------------
// Lowlevel iterator (raw TLV/offset handling, no decoding)
// -----------------------------------------------------------------------------
type RawIterator struct {
e *Era
next uint64 // next block to pull
err error
Header io.Reader
Body io.Reader
Receipts io.Reader
TotalDifficulty io.Reader // nil when archive omits TD
}
// NewRawIterator creates an iterator positioned *before* the first block.
func NewRawIterator(e *Era) (*RawIterator, error) {
return &RawIterator{e: e, next: e.m.start}, nil
}
// Next loads the next blocks components; returns false on EOF or error.
func (it *RawIterator) Next() bool {
it.err = nil // clear previous error
if it.next >= it.e.m.start+it.e.m.count {
it.clear()
return false
}
headerOffset, err := it.e.headerOff(it.next)
if err != nil {
it.setErr(err)
return false
}
it.Header, _, err = newSnappyReader(it.e.s, era.TypeCompressedHeader, headerOffset)
if err != nil {
it.setErr(err)
return false
}
bodyOffset, err := it.e.bodyOff(it.next)
if err != nil {
it.setErr(err)
return false
}
it.Body, _, err = newSnappyReader(it.e.s, era.TypeCompressedBody, bodyOffset)
if err != nil {
it.setErr(err)
return false
}
receiptsOffset, err := it.e.receiptOff(it.next)
if err != nil {
it.setErr(err)
return false
}
it.Receipts, _, err = newSnappyReader(it.e.s, era.TypeCompressedSlimReceipts, receiptsOffset)
if err != nil {
it.setErr(err)
return false
}
// Check if TD component is present in this file (pre-merge or merge-transition epoch).
if int(td) < int(it.e.m.components) {
tdOffset, err := it.e.tdOff(it.next)
if err != nil {
it.setErr(err)
return false
}
it.TotalDifficulty, _, err = it.e.s.ReaderAt(era.TypeTotalDifficulty, tdOffset)
if err != nil {
it.setErr(err)
return false
}
} else {
it.TotalDifficulty = nil
}
it.next++
return true
}
func (it *RawIterator) Number() uint64 { return it.next - 1 }
func (it *RawIterator) Error() error {
if it.err == io.EOF {
return nil
}
return it.err
}
func (it *RawIterator) setErr(err error) {
it.err = err
it.clear()
}
func (it *RawIterator) clear() {
it.Header, it.Body, it.Receipts, it.TotalDifficulty = nil, nil, nil, nil
}
// newSnappyReader behaves like era.newSnappyReader: returns a snappy.Reader
// plus the length of the underlying TLV payload so callers can advance offsets.
func newSnappyReader(r *e2store.Reader, typ uint16, off int64) (io.Reader, int64, error) {
raw, n, err := r.ReaderAt(typ, off)
if err != nil {
return nil, 0, err
}
return snappy.NewReader(raw), int64(n), nil
}

View file

@ -0,0 +1,296 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package execdb
import (
"encoding/binary"
"fmt"
"io"
"math/big"
"os"
"slices"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/klauspost/compress/snappy"
)
// Era object represents an era file that contains blocks and their components.
type Era struct {
f era.ReadAtSeekCloser
s *e2store.Reader
m metadata // metadata for the Era file
}
// Filename returns a recognizable filename for an EraE file.
// The filename uses the last block hash to uniquely identify the epoch's content.
func Filename(network string, epoch int, lastBlockHash common.Hash) string {
return fmt.Sprintf("%s-%05d-%s.erae", network, epoch, lastBlockHash.Hex()[2:10])
}
// Open accesses the era file.
func Open(path string) (*Era, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
e := &Era{f: f, s: e2store.NewReader(f)}
if err := e.loadIndex(); err != nil {
f.Close()
return nil, err
}
return e, nil
}
// Close closes the era file safely.
func (e *Era) Close() error {
if e.f == nil {
return nil
}
err := e.f.Close()
e.f = nil
return err
}
// From returns an Era backed by f.
func From(f era.ReadAtSeekCloser) (era.Era, error) {
e := &Era{f: f, s: e2store.NewReader(f)}
if err := e.loadIndex(); err != nil {
f.Close()
return nil, err
}
return e, nil
}
// Start retrieves the starting block number.
func (e *Era) Start() uint64 {
return e.m.start
}
// Count retrieves the count of blocks present.
func (e *Era) Count() uint64 {
return e.m.count
}
// Iterator returns an iterator over the era file.
func (e *Era) Iterator() (era.Iterator, error) {
return NewIterator(e)
}
// GetBlockByNumber retrieves the block if present within the era file.
func (e *Era) GetBlockByNumber(blockNum uint64) (*types.Block, error) {
h, err := e.GetHeader(blockNum)
if err != nil {
return nil, err
}
b, err := e.GetBody(blockNum)
if err != nil {
return nil, err
}
return types.NewBlockWithHeader(h).WithBody(*b), nil
}
// GetHeader retrieves the header from the era file through the cached offset table.
func (e *Era) GetHeader(num uint64) (*types.Header, error) {
off, err := e.headerOff(num)
if err != nil {
return nil, err
}
r, _, err := e.s.ReaderAt(era.TypeCompressedHeader, off)
if err != nil {
return nil, err
}
r = snappy.NewReader(r)
var h types.Header
return &h, rlp.Decode(r, &h)
}
// GetBody retrieves the body from the era file through cached offset table.
func (e *Era) GetBody(num uint64) (*types.Body, error) {
off, err := e.bodyOff(num)
if err != nil {
return nil, err
}
r, _, err := e.s.ReaderAt(era.TypeCompressedBody, off)
if err != nil {
return nil, err
}
r = snappy.NewReader(r)
var b types.Body
return &b, rlp.Decode(r, &b)
}
// GetTD retrieves the td from the era file through cached offset table.
func (e *Era) GetTD(blockNum uint64) (*big.Int, error) {
off, err := e.tdOff(blockNum)
if err != nil {
return nil, err
}
r, _, err := e.s.ReaderAt(era.TypeTotalDifficulty, off)
if err != nil {
return nil, err
}
buf, _ := io.ReadAll(r)
slices.Reverse(buf)
td := new(big.Int).SetBytes(buf)
return td, nil
}
// GetRawBodyByNumber returns the RLP-encoded body for the given block number.
func (e *Era) GetRawBodyByNumber(blockNum uint64) ([]byte, error) {
off, err := e.bodyOff(blockNum)
if err != nil {
return nil, err
}
r, _, err := e.s.ReaderAt(era.TypeCompressedBody, off)
if err != nil {
return nil, err
}
r = snappy.NewReader(r)
return io.ReadAll(r)
}
// GetRawReceiptsByNumber returns the RLP-encoded receipts for the given block number.
func (e *Era) GetRawReceiptsByNumber(blockNum uint64) ([]byte, error) {
off, err := e.receiptOff(blockNum)
if err != nil {
return nil, err
}
r, _, err := e.s.ReaderAt(era.TypeCompressedSlimReceipts, off)
if err != nil {
return nil, err
}
r = snappy.NewReader(r)
return io.ReadAll(r)
}
// InitialTD returns initial total difficulty before the difficulty of the
// first block of the Era is applied. Returns an error if TD is not available
// (e.g., post-merge epoch).
func (e *Era) InitialTD() (*big.Int, error) {
// Check if TD component exists.
if int(td) >= int(e.m.components) {
return nil, fmt.Errorf("total difficulty not available in this epoch")
}
// Get first header to read its difficulty.
header, err := e.GetHeader(e.m.start)
if err != nil {
return nil, fmt.Errorf("read first header: %w", err)
}
// Get TD after first block using the index.
firstTD, err := e.GetTD(e.m.start)
if err != nil {
return nil, fmt.Errorf("read first TD: %w", err)
}
// Initial TD = TD[0] - Difficulty[0]
return new(big.Int).Sub(firstTD, header.Difficulty), nil
}
// Accumulator reads the accumulator entry in the EraE file if it exists.
// Note that one premerge erae files will contain an accumulator entry.
func (e *Era) Accumulator() (common.Hash, error) {
entry, err := e.s.Find(era.TypeAccumulator)
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(entry.Value), nil
}
// loadIndex loads in the index table containing all offsets and caches it.
func (e *Era) loadIndex() error {
var err error
e.m.length, err = e.f.Seek(0, io.SeekEnd)
if err != nil {
return err
}
b := make([]byte, 16)
if _, err = e.f.ReadAt(b, e.m.length-16); err != nil {
return err
}
e.m.components = binary.LittleEndian.Uint64(b[0:8])
e.m.count = binary.LittleEndian.Uint64(b[8:16])
payloadlen := 8 + 8*e.m.count*e.m.components + 16 // 8 for start block, 8 per property per block, 16 for the number of properties and the number of blocks
tlvstart := e.m.length - int64(payloadlen) - 8
_, err = e.f.ReadAt(b[:8], tlvstart+8)
if err != nil {
return err
}
e.m.start = binary.LittleEndian.Uint64(b[:8])
return nil
}
// headerOff, bodyOff, receiptOff, and tdOff return the offsets of the respective components for a given block number.
func (e *Era) headerOff(num uint64) (int64, error) { return e.indexOffset(num, header) }
func (e *Era) bodyOff(num uint64) (int64, error) { return e.indexOffset(num, body) }
func (e *Era) receiptOff(num uint64) (int64, error) { return e.indexOffset(num, receipts) }
func (e *Era) tdOff(num uint64) (int64, error) { return e.indexOffset(num, td) }
// indexOffset calculates offset to a certain component for a block number within a file.
func (e *Era) indexOffset(n uint64, component componentType) (int64, error) {
if n < e.m.start || n >= e.m.start+e.m.count {
return 0, fmt.Errorf("block %d out of range [%d,%d)", n, e.m.start, e.m.start+e.m.count)
}
if int(component) >= int(e.m.components) {
return 0, fmt.Errorf("component %d not present", component)
}
payloadlen := 8 + 8*e.m.count*e.m.components + 16 // 8 for start block, 8 per property per block, 16 for the number of properties and the number of blocks
indstart := e.m.length - int64(payloadlen) - 8
rec := (n-e.m.start)*e.m.components + uint64(component)
pos := indstart + 8 + 8 + int64(rec*8)
var buf [8]byte
if _, err := e.f.ReadAt(buf[:], pos); err != nil {
return 0, err
}
rel := binary.LittleEndian.Uint64(buf[:])
return int64(rel) + indstart, nil
}
// metadata contains the information about the era file that is written into the file.
type metadata struct {
start uint64 // start block number
count uint64 // number of blocks in the era
components uint64 // number of properties
length int64 // length of the file in bytes
}
// componentType represents the integer form of a specific type that can be present in the era file.
type componentType int
// header, body, receipts, td, and proof are the different types of components that can be present in the era file.
const (
header componentType = iota
body
receipts
td
proof
)

View file

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package era
package onedb
import (
"bytes"
@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
@ -78,6 +79,8 @@ type Builder struct {
indexes []uint64
hashes []common.Hash
tds []*big.Int
accumulator *common.Hash // accumulator root, set by Finalize
written int
buf *bytes.Buffer
@ -85,7 +88,7 @@ type Builder struct {
}
// NewBuilder returns a new Builder instance.
func NewBuilder(w io.Writer) *Builder {
func NewBuilder(w io.Writer) era.Builder {
buf := bytes.NewBuffer(nil)
return &Builder{
w: e2store.NewWriter(w),
@ -117,7 +120,7 @@ func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int)
func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error {
// Write Era1 version entry before first block.
if b.startNum == nil {
n, err := b.w.Write(TypeVersion, nil)
n, err := b.w.Write(era.TypeVersion, nil)
if err != nil {
return err
}
@ -126,8 +129,8 @@ func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash comm
b.startTd = new(big.Int).Sub(td, difficulty)
b.written += n
}
if len(b.indexes) >= MaxEra1Size {
return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size)
if len(b.indexes) >= era.MaxSize {
return fmt.Errorf("exceeds maximum batch size of %d", era.MaxSize)
}
b.indexes = append(b.indexes, uint64(b.written))
@ -135,19 +138,19 @@ func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash comm
b.tds = append(b.tds, td)
// Write block data.
if err := b.snappyWrite(TypeCompressedHeader, header); err != nil {
if err := b.snappyWrite(era.TypeCompressedHeader, header); err != nil {
return err
}
if err := b.snappyWrite(TypeCompressedBody, body); err != nil {
if err := b.snappyWrite(era.TypeCompressedBody, body); err != nil {
return err
}
if err := b.snappyWrite(TypeCompressedReceipts, receipts); err != nil {
if err := b.snappyWrite(era.TypeCompressedReceipts, receipts); err != nil {
return err
}
// Also write total difficulty, but don't snappy encode.
btd := bigToBytes32(td)
n, err := b.w.Write(TypeTotalDifficulty, btd[:])
btd := era.BigToBytes32(td)
n, err := b.w.Write(era.TypeTotalDifficulty, btd[:])
b.written += n
if err != nil {
return err
@ -157,21 +160,24 @@ func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash comm
}
// Finalize computes the accumulator and block index values, then writes the
// corresponding e2store entries.
// corresponding e2store entries. Era1 always has an accumulator, so this
// always returns a valid hash.
func (b *Builder) Finalize() (common.Hash, error) {
if b.startNum == nil {
return common.Hash{}, errors.New("finalize called on empty builder")
}
// Compute accumulator root and write entry.
root, err := ComputeAccumulator(b.hashes, b.tds)
root, err := era.ComputeAccumulator(b.hashes, b.tds)
if err != nil {
return common.Hash{}, fmt.Errorf("error calculating accumulator root: %w", err)
}
n, err := b.w.Write(TypeAccumulator, root[:])
n, err := b.w.Write(era.TypeAccumulator, root[:])
b.written += n
if err != nil {
return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err)
}
b.accumulator = &root
// Get beginning of index entry to calculate block relative offset.
base := int64(b.written)
@ -196,13 +202,19 @@ func (b *Builder) Finalize() (common.Hash, error) {
binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count))
// Finally, write the block index entry.
if _, err := b.w.Write(TypeBlockIndex, index); err != nil {
if _, err := b.w.Write(era.TypeBlockIndex, index); err != nil {
return common.Hash{}, fmt.Errorf("unable to write block index: %w", err)
}
return root, nil
}
// Accumulator returns the accumulator root after Finalize has been called.
// For Era1, this always returns a non-nil value since all blocks are pre-merge.
func (b *Builder) Accumulator() *common.Hash {
return b.accumulator
}
// snappyWrite is a small helper to take care snappy encoding and writing an e2store entry.
func (b *Builder) snappyWrite(typ uint16, in []byte) error {
var (

View file

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package era
package onedb
import (
"bytes"
@ -22,6 +22,7 @@ import (
"io"
"math/big"
"os"
"slices"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -82,7 +83,11 @@ func TestEra1Builder(t *testing.T) {
t.Fatalf("failed to open era: %v", err)
}
defer e.Close()
it, err := NewRawIterator(e)
eraPtr, ok := e.(*Era)
if !ok {
t.Fatalf("failed to assert *Era type")
}
it, err := NewRawIterator(eraPtr)
if err != nil {
t.Fatalf("failed to make iterator: %s", err)
}
@ -119,7 +124,7 @@ func TestEra1Builder(t *testing.T) {
if !bytes.Equal(rawReceipts, chain.receipts[i]) {
t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], rawReceipts)
}
receipts, err := getReceiptsByNumber(e, i)
receipts, err := getReceiptsByNumber(eraPtr, i)
if err != nil {
t.Fatalf("error reading receipts: %v", err)
}
@ -136,7 +141,8 @@ func TestEra1Builder(t *testing.T) {
if err != nil {
t.Fatalf("error reading td: %v", err)
}
td := new(big.Int).SetBytes(reverseOrder(rawTd))
slices.Reverse(rawTd)
td := new(big.Int).SetBytes(rawTd)
if td.Cmp(chain.tds[i]) != 0 {
t.Fatalf("mismatched tds: want %s, got %s", chain.tds[i], td)
}

View file

@ -14,14 +14,16 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package era
package onedb
import (
"errors"
"io"
"math/big"
"slices"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/rlp"
)
@ -32,8 +34,8 @@ type Iterator struct {
// NewIterator returns a new Iterator instance. Next must be immediately
// called on new iterators to load the first item.
func NewIterator(e *Era) (*Iterator, error) {
inner, err := NewRawIterator(e)
func NewIterator(e era.Era) (era.Iterator, error) {
inner, err := NewRawIterator(e.(*Era))
if err != nil {
return nil, err
}
@ -107,7 +109,8 @@ func (it *Iterator) TotalDifficulty() (*big.Int, error) {
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(reverseOrder(td)), nil
slices.Reverse(td)
return new(big.Int).SetBytes(td), nil
}
// RawIterator reads an RLP-encode Era1 entries.
@ -151,22 +154,22 @@ func (it *RawIterator) Next() bool {
return false
}
var n int64
if it.Header, n, it.err = newSnappyReader(it.e.s, TypeCompressedHeader, off); it.err != nil {
if it.Header, n, it.err = newSnappyReader(it.e.s, era.TypeCompressedHeader, off); it.err != nil {
it.clear()
return true
}
off += n
if it.Body, n, it.err = newSnappyReader(it.e.s, TypeCompressedBody, off); it.err != nil {
if it.Body, n, it.err = newSnappyReader(it.e.s, era.TypeCompressedBody, off); it.err != nil {
it.clear()
return true
}
off += n
if it.Receipts, n, it.err = newSnappyReader(it.e.s, TypeCompressedReceipts, off); it.err != nil {
if it.Receipts, n, it.err = newSnappyReader(it.e.s, era.TypeCompressedReceipts, off); it.err != nil {
it.clear()
return true
}
off += n
if it.TotalDifficulty, _, it.err = it.e.s.ReaderAt(TypeTotalDifficulty, off); it.err != nil {
if it.TotalDifficulty, _, it.err = it.e.s.ReaderAt(era.TypeTotalDifficulty, off); it.err != nil {
it.clear()
return true
}

View file

@ -0,0 +1,279 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package onedb
import (
"encoding/binary"
"fmt"
"io"
"math/big"
"os"
"slices"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
)
// Filename returns a recognizable Era1-formatted file name for the specified
// epoch and network.
func Filename(network string, epoch int, root common.Hash) string {
return fmt.Sprintf("%s-%05d-%s.era1", network, epoch, root.Hex()[2:10])
}
type ReadAtSeekCloser interface {
io.ReaderAt
io.Seeker
io.Closer
}
// Era reads and Era1 file.
type Era struct {
f ReadAtSeekCloser // backing era1 file
s *e2store.Reader // e2store reader over f
m metadata // start, count, length info
mu *sync.Mutex // lock for buf
buf [8]byte // buffer reading entry offsets
}
// From returns an Era backed by f.
func From(f era.ReadAtSeekCloser) (era.Era, error) {
m, err := readMetadata(f)
if err != nil {
return nil, err
}
return &Era{
f: f,
s: e2store.NewReader(f),
m: m,
mu: new(sync.Mutex),
}, nil
}
// Open returns an Era backed by the given filename.
func Open(filename string) (era.Era, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
return From(f)
}
func (e *Era) Close() error {
return e.f.Close()
}
// Iterator returns an iterator over the era file.
func (e *Era) Iterator() (era.Iterator, error) {
return NewIterator(e)
}
// GetBlockByNumber returns the block for the given block number.
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
r, n, err := newSnappyReader(e.s, era.TypeCompressedHeader, off)
if err != nil {
return nil, err
}
var header types.Header
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
off += n
r, _, err = newSnappyReader(e.s, era.TypeCompressedBody, off)
if err != nil {
return nil, err
}
var body types.Body
if err := rlp.Decode(r, &body); err != nil {
return nil, err
}
return types.NewBlockWithHeader(&header).WithBody(body), nil
}
// GetRawBodyByNumber returns the RLP-encoded body for the given block number.
func (e *Era) GetRawBodyByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
off, err = e.s.SkipN(off, 1)
if err != nil {
return nil, err
}
r, _, err := newSnappyReader(e.s, era.TypeCompressedBody, off)
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
// GetRawReceiptsByNumber returns the RLP-encoded receipts for the given block number.
func (e *Era) GetRawReceiptsByNumber(num uint64) ([]byte, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds: %d not in [%d, %d)", num, e.m.start, e.m.start+e.m.count)
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
// Skip over header and body.
off, err = e.s.SkipN(off, 2)
if err != nil {
return nil, err
}
r, _, err := newSnappyReader(e.s, era.TypeCompressedReceipts, off)
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
// Accumulator reads the accumulator entry in the Era1 file.
func (e *Era) Accumulator() (common.Hash, error) {
entry, err := e.s.Find(era.TypeAccumulator)
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(entry.Value), nil
}
// InitialTD returns initial total difficulty before the difficulty of the
// first block of the Era1 is applied.
func (e *Era) InitialTD() (*big.Int, error) {
var (
r io.Reader
header types.Header
rawTd []byte
n int64
off int64
err error
)
// Read first header.
if off, err = e.readOffset(e.m.start); err != nil {
return nil, err
}
if r, n, err = newSnappyReader(e.s, era.TypeCompressedHeader, off); err != nil {
return nil, err
}
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
off += n
// Skip over header and body.
off, err = e.s.SkipN(off, 2)
if err != nil {
return nil, err
}
// Read total difficulty after first block.
if r, _, err = e.s.ReaderAt(era.TypeTotalDifficulty, off); err != nil {
return nil, err
}
rawTd, err = io.ReadAll(r)
if err != nil {
return nil, err
}
slices.Reverse(rawTd)
td := new(big.Int).SetBytes(rawTd)
return td.Sub(td, header.Difficulty), nil
}
// Start returns the listed start block.
func (e *Era) Start() uint64 {
return e.m.start
}
// Count returns the total number of blocks in the Era1.
func (e *Era) Count() uint64 {
return e.m.count
}
// readOffset reads a specific block's offset from the block index. The value n
// is the absolute block number desired.
func (e *Era) readOffset(n uint64) (int64, error) {
var (
blockIndexRecordOffset = e.m.length - 24 - int64(e.m.count)*8 // skips start, count, and header
firstIndex = blockIndexRecordOffset + 16 // first index after header / start-num
indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
offOffset = firstIndex + indexOffset // offset of block offset
)
e.mu.Lock()
defer e.mu.Unlock()
clear(e.buf[:])
if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
return 0, err
}
// Since the block offset is relative from the start of the block index record
// we need to add the record offset to it's offset to get the block's absolute
// offset.
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
}
// newSnappyReader returns a snappy.Reader for the e2store entry value at off.
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
r, n, err := e.ReaderAt(expectedType, off)
if err != nil {
return nil, 0, err
}
return snappy.NewReader(r), int64(n), err
}
// metadata wraps the metadata in the block index.
type metadata struct {
start uint64
count uint64
length int64
}
// readMetadata reads the metadata stored in an Era1 file's block index.
func readMetadata(f ReadAtSeekCloser) (m metadata, err error) {
// Determine length of reader.
if m.length, err = f.Seek(0, io.SeekEnd); err != nil {
return
}
b := make([]byte, 16)
// Read count. It's the last 8 bytes of the file.
if _, err = f.ReadAt(b[:8], m.length-8); err != nil {
return
}
m.count = binary.LittleEndian.Uint64(b)
// Read start. It's at the offset -sizeof(m.count) -
// count*sizeof(indexEntry) - sizeof(m.start)
if _, err = f.ReadAt(b[8:], m.length-16-int64(m.count*8)); err != nil {
return
}
m.start = binary.LittleEndian.Uint64(b[8:])
return
}

36
internal/era/proof.go Normal file
View file

@ -0,0 +1,36 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"io"
"github.com/ethereum/go-ethereum/rlp"
)
type ProofVariant uint16
const (
ProofNone ProofVariant = iota
)
// Proof is the interface for all block proof types in the package.
// It's a stub for later integration into Era.
type Proof interface {
EncodeRLP(w io.Writer) error
DecodeRLP(s *rlp.Stream) error
Variant() ProofVariant
}