* factor bal size when enforcing block size limit while adding txs to payload in miner

* add support for three access list execution mode presets exposed via the --bal.executionmode flag:
- sequential: no performance acceleration
- full: parallel transaction execution, state root calculation, async warming of access list reads
- nobatchio: same as 'full', but without async warming of access list reads

* fix the way metrics are reported when executing access-list-containing blocks to be in-line with how it's done for other blocks.

* fix blockchain tests runner
This commit is contained in:
Jared Wasinger 2026-02-09 14:04:45 -05:00
parent 7ddef250f7
commit 57c631e2a8
20 changed files with 406 additions and 178 deletions

View file

@ -239,6 +239,20 @@ func makeFullNode(ctx *cli.Context) *node.Node {
cfg.Eth.OverrideVerkle = &v
}
if ctx.IsSet(utils.BlockAccessListExecutionModeFlag.Name) {
val := ctx.String(utils.BlockAccessListExecutionModeFlag.Name)
switch val {
case utils.BalExecutionModeFull:
cfg.Eth.BALExecutionMode = 0
case utils.BalExecutionModeNoBatchIO:
cfg.Eth.BALExecutionMode = 1
case utils.BalExecutionModeSequential:
cfg.Eth.BALExecutionMode = 2
default:
utils.Fatalf("invalid option for --bal.executionmode: %s. acceptable values are full|nobatchio|sequential", val)
}
}
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics)

View file

@ -265,6 +265,7 @@ func init() {
consoleFlags,
debug.Flags,
metricsFlags,
[]cli.Flag{utils.BlockAccessListExecutionModeFlag},
)
flags.AutoEnvVars(app.Flags, "GETH")

View file

@ -1042,6 +1042,25 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: metrics.DefaultConfig.InfluxDBOrganization,
Category: flags.MetricsCategory,
}
// block access list flags
BlockAccessListExecutionModeFlag = &cli.StringFlag{
Name: "bal.executionmode",
Usage: `
block access list execution type. possible inputs are:
- sequential: no performance acceleration
- full: parallel transaction execution, state root calculation, async warming of access list reads
- nobatchio: same as 'full', but without async warming of access list reads`,
Value: BalExecutionModeFull,
Category: flags.MiscCategory,
}
)
const (
BalExecutionModeFull = "full"
BalExecutionModeNoBatchIO = "nobatchio"
BalExecutionModeSequential = "sequential"
)
var (

View file

@ -79,6 +79,14 @@ func (a *BlockAccessListTracer) OnSystemCallEnd() {
}
}
func (a *BlockAccessListTracer) Checkpoint() {
a.builder.Checkpoint()
}
func (a *BlockAccessListTracer) ResetToCheckpoint() {
a.builder.ResetToCheckpoint()
}
func (a *BlockAccessListTracer) TxStartHook(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
a.builder.EnterTx(tx.Hash())
}

View file

@ -176,6 +176,12 @@ const (
BlockChainVersion uint64 = 9
)
const (
BALExecutionModeFull = 0
BALExecutionModeNoBatchIO = iota
BALExecutionModeSequential = iota
)
// BlockChainConfig contains the configuration of the BlockChain object.
type BlockChainConfig struct {
// Trie database related options
@ -231,6 +237,8 @@ type BlockChainConfig struct {
// SlowBlockThreshold is the block execution time threshold beyond which
// detailed statistics will be logged.
SlowBlockThreshold time.Duration
BALExecutionMode int
}
// DefaultConfig returns the default config.
@ -591,12 +599,13 @@ func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *
procTime time.Duration
)
reader, err := bc.statedb.Reader(parentRoot)
_, reader, err := bc.statedb.ReadersWithCacheStats(parentRoot)
if err != nil {
return nil, err
}
stateReader := state.NewBALReader(block, reader)
useAsyncReads := bc.cfg.BALExecutionMode != BALExecutionModeNoBatchIO
stateReader := state.NewBALReader(block, reader, useAsyncReads)
stateTransition, err := state.NewBALStateTransition(stateReader, bc.statedb, parentRoot)
if err != nil {
return nil, err
@ -631,7 +640,7 @@ func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *
}
procTime = time.Since(startTime)
writeStart := time.Now()
// Write the block to the chain and get the status.
var (
//wstart = time.Now()
@ -646,41 +655,52 @@ func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *
if err != nil {
return nil, err
}
writeTime := time.Since(writeStart)
var stats ExecuteStats
/*
// TODO: implement the gathering of this data
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)
stats.CodeReads = statedb.CodeReads
stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
stats.AccountDeleted = statedb.AccountDeleted
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())
stats.CodeLoaded = statedb.CodeLoaded
stats.CodeLoadBytes = statedb.CodeLoadBytes
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
*/
// Update the metrics touched during block commit
accountCommitTimer.Update(stateTransition.Metrics().AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(stateTransition.Metrics().StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(stateTransition.Metrics().SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(stateTransition.Metrics().TrieDBCommits) // Trie database commits are complete, we can mark them
stats.AccountCommits = stateTransition.Metrics().AccountCommits
stats.StorageCommits = stateTransition.Metrics().StorageCommits
stats.SnapshotCommit = stateTransition.Metrics().SnapshotCommits
stats.TrieDBCommit = stateTransition.Metrics().TrieDBCommits
stats.StateReadCacheStats = reader.GetStats()
// blockWriteTimer.Update(time.Since(wstart + max(stateTransition.Metrics().AccountCommits, stateTransition.Metrics().StorageCommits) /* concurrent */ statedb.SnapshotCommits + statedb.TrieDBCommits))
elapsed := time.Since(startTime) + 1 // prevent zero division
blockInsertTimer.Update(elapsed)
stats.TotalTime = elapsed
stats.MgasPerSecond = float64(res.ProcessResult.GasUsed) * 1000 / float64(elapsed)
stats.BlockWrite = writeTime
// TODO(rjl493456442) generalize the ResettingTimer
mgasps := float64(res.ProcessResult.GasUsed) * 1000 / float64(elapsed)
chainMgaspsMeter.Update(time.Duration(mgasps))
blockPreprocessingTimer.Update(res.PreProcessTime)
txExecutionTimer.Update(res.ExecTime)
// update the metrics from the block state root update
stateTriePrefetchTimer.Update(res.StateTransitionMetrics.StatePrefetch)
accountTriesUpdateTimer.Update(res.StateTransitionMetrics.AccountUpdate)
stateTrieUpdateTimer.Update(res.StateTransitionMetrics.StateUpdate)
stateTrieHashTimer.Update(res.StateTransitionMetrics.StateHash)
stateRootComputeTimer.Update(res.StateTransitionMetrics.AccountUpdate + res.StateTransitionMetrics.StateUpdate + res.StateTransitionMetrics.StateHash)
originStorageLoadTimer.Update(res.StateTransitionMetrics.OriginStorageLoadTime)
stateCommitTimer.Update(res.StateTransitionMetrics.TotalCommitTime)
blockPostprocessingTimer.Update(res.PostProcessTime)
stats.balTransitionStats = res.StateTransitionMetrics
return &blockProcessingResult{
usedGas: res.ProcessResult.GasUsed,
procTime: procTime,
status: status,
witness: nil,
stats: &ExecuteStats{}, // TODO: actually implement this in the future
stats: &stats,
}, nil
}
@ -2103,11 +2123,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// The traced section of block import.
start := time.Now()
blockHasAccessList := block.AccessList() != nil
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
if err != nil {
return nil, it.index, err
}
res.stats.reportMetrics()
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
res.stats.reportBALMetrics()
} else {
res.stats.reportMetrics()
}
// Log slow block only if a single block is inserted (usually after the
// initial sync) to not overwhelm the users.
@ -2195,7 +2221,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
blockHasAccessList := block.AccessList() != nil
// optimized execution path for blocks which contain BALs
if blockHasAccessList {
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
return bc.processBlockWithAccessList(parentRoot, block, setHead)
}

View file

@ -60,6 +60,9 @@ type ExecuteStats struct {
// Cache hit rates
StateReadCacheStats state.ReaderStats
StatePrefetchCacheStats state.ReaderStats
// Stats specific to BAL state update
balTransitionStats *state.BALStateTransitionMetrics
}
// reportMetrics uploads execution statistics to the metrics system.
@ -104,6 +107,56 @@ func (s *ExecuteStats) reportMetrics() {
storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss)
}
func (s *ExecuteStats) reportBALMetrics() {
/*
if s.AccountLoaded != 0 {
accountReadTimer.Update(s.AccountReads)
accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded))
}
if s.StorageLoaded != 0 {
storageReadTimer.Update(s.StorageReads)
storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded))
}
if s.CodeLoaded != 0 {
codeReadTimer.Update(s.CodeReads)
codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded))
codeReadBytesTimer.Update(time.Duration(s.CodeLoadBytes))
}
// TODO: implement these ^
*/
//accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation)
//storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation)
//accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation)
accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them
stateTriePrefetchTimer.Update(s.balTransitionStats.StatePrefetch)
accountTriesUpdateTimer.Update(s.balTransitionStats.AccountUpdate)
stateTrieUpdateTimer.Update(s.balTransitionStats.StateUpdate)
stateTrieHashTimer.Update(s.balTransitionStats.StateHash)
stateRootComputeTimer.Update(s.balTransitionStats.AccountUpdate + s.balTransitionStats.StateUpdate + s.balTransitionStats.StateHash)
//blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing
// ^basically impossible to get this metric with parallel execution
//blockValidationTimer.Update(s.Validation) // The time spent on block validation
//blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation
snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them
blockWriteTimer.Update(s.BlockWrite) // The time spent on block write
blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution
chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer
// Cache hit rates
accountCacheHitMeter.Mark(s.StateReadCacheStats.AccountCacheHit)
accountCacheMissMeter.Mark(s.StateReadCacheStats.AccountCacheMiss)
storageCacheHitMeter.Mark(s.StateReadCacheStats.StorageCacheHit)
storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss)
}
// logSlow prints the detailed execution statistics if the block is regarded as slow.
func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Duration) {
if slowBlockThreshold == 0 {

View file

@ -248,16 +248,12 @@ type stateRootCalculationResult struct {
// calcAndVerifyRoot performs the post-state root hash calculation, verifying
// it against what is reported by the block and returning a result on resCh.
func (p *ParallelStateProcessor) calcAndVerifyRoot(preState *state.StateDB, block *types.Block, stateTransition *state.BALStateTransition, resCh chan stateRootCalculationResult) {
// calculate and apply the block state modifications
//root, prestateLoadTime, rootCalcTime := preState.BlockAccessList().StateRoot(preState)
root := stateTransition.IntermediateRoot(false)
res := stateRootCalculationResult{
// TODO: I think we can remove the root from this struct
metrics: stateTransition.Metrics(),
}
// TODO: validate state root in block validator?
if root != block.Root() {
res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x", root, block.Root())
}
@ -328,7 +324,6 @@ func (p *ParallelStateProcessor) Process(block *types.Block, stateTransition *st
balTracer, hooks := NewBlockAccessListTracer()
tracingStateDB := state.NewHookedState(statedb, hooks)
// TODO: figure out exactly why we need to set the hooks on the TracingStateDB and the vm.Config
cfg.Tracer = hooks
context = NewEVMBlockContext(header, p.chain, nil)

View file

@ -9,6 +9,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
"maps"
"sync"
)
@ -22,38 +24,75 @@ import (
type prestateResolver struct {
inProgress map[common.Address]chan struct{}
resolved sync.Map
ctx context.Context
cancel func()
inProgressStorage map[common.Address]map[common.Hash]chan struct{}
resolvedStorage map[common.Address]*sync.Map
ctx context.Context
cancel func()
}
// schedule begins the retrieval of a set of state accounts running on
// a background goroutine.
func (p *prestateResolver) schedule(r Reader, addrs []common.Address) {
func (p *prestateResolver) schedule(r Reader, accounts []common.Address, storage map[common.Address][]common.Hash) {
p.inProgress = make(map[common.Address]chan struct{})
p.inProgressStorage = make(map[common.Address]map[common.Hash]chan struct{})
p.resolvedStorage = make(map[common.Address]*sync.Map)
p.ctx, p.cancel = context.WithCancel(context.Background())
for _, addr := range addrs {
var workers errgroup.Group
for _, addr := range accounts {
p.inProgress[addr] = make(chan struct{})
}
// TODO: probably we can retrieve these on a single go-routine
// the transaction execution will also load them
for _, addr := range addrs {
for addr, slots := range storage {
p.inProgressStorage[addr] = make(map[common.Hash]chan struct{})
for _, slot := range slots {
p.inProgressStorage[addr][slot] = make(chan struct{})
}
p.resolvedStorage[addr] = &sync.Map{}
}
for _, addr := range accounts {
resolveAddr := addr
go func() {
workers.Go(func() error {
select {
case <-p.ctx.Done():
return
return nil
default:
}
acct, err := r.Account(resolveAddr)
if err != nil {
// TODO: what do here?
return err
}
p.resolved.Store(resolveAddr, acct)
close(p.inProgress[resolveAddr])
}()
return nil
})
}
for addr, slots := range storage {
resolveAddr := addr
for _, s := range slots {
slot := s
workers.Go(func() error {
select {
case <-p.ctx.Done():
return nil
default:
}
value, err := r.Storage(resolveAddr, slot)
if err != nil {
// TODO: need to surface this error somehow so that execution can quit.
// right now, it's silently consumed because we don't block using workers.Wait() anywhere...
return err
}
p.resolvedStorage[resolveAddr].Store(slot, value)
close(p.inProgressStorage[resolveAddr][slot])
return nil
})
}
}
}
@ -61,8 +100,29 @@ func (p *prestateResolver) stop() {
p.cancel()
}
func (p *prestateResolver) storage(addr common.Address, key common.Hash) common.Hash {
return common.Hash{}
func (p *prestateResolver) storage(addr common.Address, key common.Hash) *common.Hash {
// check that the slot was actually scheduled
storages, ok := p.inProgressStorage[addr]
if !ok {
return nil
}
_, ok = storages[key]
if !ok {
return nil
}
// block if the value of the slot is still being fetched
select {
case <-p.inProgressStorage[addr][key]:
}
res, exist := p.resolvedStorage[addr].Load(key)
if !exist {
// storage was scheduled, attempted to retrieve, but not set.
// TODO: this is an error case that should be explicitly dealt with (the underlying reader failed to retrieve the storage slot)
return nil
}
hashRes := res.(common.Hash)
return &hashRes
}
// account returns the state account for the given address, blocking if it is
@ -128,15 +188,38 @@ type BALReader struct {
}
// NewBALReader constructs a new reader from an access list. db is expected to have been instantiated with a reader.
func NewBALReader(block *types.Block, reader Reader) *BALReader {
func NewBALReader(block *types.Block, reader Reader, useAsyncReads bool) *BALReader {
r := &BALReader{accesses: make(map[common.Address]*bal.AccountAccess), block: block}
finalIdx := len(block.Transactions()) + 1
for _, acctDiff := range *block.AccessList() {
r.accesses[acctDiff.Address] = &acctDiff
}
r.prestateReader.schedule(reader, r.ModifiedAccounts())
modifiedAccounts := r.ModifiedAccounts()
storage := make(map[common.Address][]common.Hash)
for _, addr := range modifiedAccounts {
diff := r.readAccountDiff(addr, finalIdx)
var scheduledStorageKeys []common.Hash
if len(diff.StorageWrites) > 0 {
writtenKeys := maps.Keys(diff.StorageWrites)
for key := range writtenKeys {
scheduledStorageKeys = append(scheduledStorageKeys, key)
}
}
if useAsyncReads {
scheduledStorageKeys = append(scheduledStorageKeys, r.accountStorageReads(addr)...)
}
if len(scheduledStorageKeys) > 0 {
storage[addr] = scheduledStorageKeys
}
}
r.prestateReader.schedule(reader, r.ModifiedAccounts(), storage)
return r
}
func (r *BALReader) Storage(addr common.Address, key common.Hash) *common.Hash {
return r.prestateReader.storage(addr, key)
}
// ModifiedAccounts returns a list of all accounts with mutations in the access list
func (r *BALReader) ModifiedAccounts() (res []common.Address) {
for addr, access := range r.accesses {
@ -307,6 +390,19 @@ func (r *BALReader) readAccount(db *StateDB, addr common.Address, idx int) *stat
return r.initObjFromDiff(db, addr, prestate, diff)
}
func (r *BALReader) accountStorageReads(addr common.Address) []common.Hash {
diff, exist := r.accesses[addr]
if !exist {
return []common.Hash{}
}
var reads []common.Hash
for _, key := range diff.StorageReads {
reads = append(reads, key.ToHash())
}
return reads
}
// readAccountDiff returns the accumulated state changes of an account up
// through, and including the given index.
func (r *BALReader) readAccountDiff(addr common.Address, idx int) *bal.AccountMutations {

View file

@ -39,9 +39,6 @@ type BALStateTransition struct {
tries sync.Map //map[common.Address]Trie
deletions map[common.Address]struct{}
originStorages map[common.Address]map[common.Hash]common.Hash
originStoragesWG sync.WaitGroup
accountDeleted int64
accountUpdated int64
storageDeleted atomic.Int64
@ -85,20 +82,18 @@ func NewBALStateTransition(accessList *BALReader, db Database, parentRoot common
}
return &BALStateTransition{
accessList: accessList,
db: db,
reader: reader,
stateTrie: stateTrie,
parentRoot: parentRoot,
rootHash: common.Hash{},
diffs: make(map[common.Address]*bal.AccountMutations),
prestates: sync.Map{},
postStates: make(map[common.Address]*types.StateAccount),
tries: sync.Map{},
deletions: make(map[common.Address]struct{}),
originStorages: make(map[common.Address]map[common.Hash]common.Hash),
originStoragesWG: sync.WaitGroup{},
stateUpdate: nil,
accessList: accessList,
db: db,
reader: reader,
stateTrie: stateTrie,
parentRoot: parentRoot,
rootHash: common.Hash{},
diffs: make(map[common.Address]*bal.AccountMutations),
prestates: sync.Map{},
postStates: make(map[common.Address]*types.StateAccount),
tries: sync.Map{},
deletions: make(map[common.Address]struct{}),
stateUpdate: nil,
}, nil
}
@ -204,10 +199,10 @@ func (s *BALStateTransition) commitAccount(addr common.Address) (*accountUpdate,
for key, value := range s.diffs[addr].StorageWrites {
hash := crypto.Keccak256Hash(key[:])
op.storages[hash] = encode(common.Hash(value))
origin := encode(s.originStorages[addr][common.Hash(key)])
op.storages[hash] = encode(value)
origin := encode(*s.accessList.Storage(addr, key))
op.storagesOriginByHash[hash] = origin
op.storagesOriginByKey[common.Hash(key)] = origin
op.storagesOriginByKey[key] = origin
}
tr, _ := s.tries.Load(addr)
root, nodes := tr.(Trie).Commit(false)
@ -305,8 +300,6 @@ func (s *BALStateTransition) CommitWithUpdate(block uint64, deleteEmptyObjects b
return nil
})
s.originStoragesWG.Wait()
// Schedule each of the storage tries that need to be updated, so they can
// run concurrently to one another.
//
@ -368,54 +361,6 @@ func (s *BALStateTransition) Commit(block uint64, deleteEmptyObjects bool, noSto
return root, err
}
func (s *BALStateTransition) loadOriginStorages() {
lastIdx := len(s.accessList.block.Transactions()) + 1
type originStorage struct {
address common.Address
key common.Hash
value common.Hash
}
originStoragesCh := make(chan *originStorage)
var pendingStorageCount int
for _, addr := range s.accessList.ModifiedAccounts() {
diff := s.accessList.readAccountDiff(addr, lastIdx)
pendingStorageCount += len(diff.StorageWrites)
s.originStorages[addr] = make(map[common.Hash]common.Hash)
for key := range diff.StorageWrites {
storageKey := key
go func() {
val, err := s.reader.Storage(addr, common.Hash(storageKey))
if err != nil {
s.setError(err)
return
}
originStoragesCh <- &originStorage{
addr,
common.Hash(storageKey),
val,
}
}()
}
}
if pendingStorageCount == 0 {
return
}
for {
select {
case acctStorage := <-originStoragesCh:
s.originStorages[acctStorage.address][acctStorage.key] = acctStorage.value
pendingStorageCount--
if pendingStorageCount == 0 {
return
}
}
}
}
// IntermediateRoot applies block state mutations and computes the updated state
// trie root.
func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
@ -425,7 +370,6 @@ func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
// State root calculation proceeds as follows:
// 1 (a): load the prestate state accounts for addresses which were modified in the block
// 1 (b): load the origin storage values for all slots which were modified during the block (this is needed for computing the stateUpdate)
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
@ -437,14 +381,6 @@ func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
start := time.Now()
lastIdx := len(s.accessList.block.Transactions()) + 1
//1 (b): load the origin storage values for all slots which were modified during the block
s.originStoragesWG.Add(1)
go func() {
defer s.originStoragesWG.Done()
s.loadOriginStorages()
s.metrics.OriginStorageLoadTime = time.Since(start)
}()
var wg sync.WaitGroup
for _, addr := range s.accessList.ModifiedAccounts() {

View file

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
"log/slog"
"maps"
"slices"
)
@ -40,18 +39,14 @@ type idxAccessListBuilder struct {
// and terminating a frame merges the accesses/modifications into the
// intermediate access list of the calling frame.
accessesStack []map[common.Address]*constructionAccountAccess
// context logger for instrumenting debug logging
logger *slog.Logger
}
func newAccessListBuilder(logger *slog.Logger) *idxAccessListBuilder {
func newAccessListBuilder() *idxAccessListBuilder {
return &idxAccessListBuilder{
make(map[common.Address]*accountIdxPrestate),
[]map[common.Address]*constructionAccountAccess{
make(map[common.Address]*constructionAccountAccess),
},
logger,
}
}
@ -239,7 +234,7 @@ func (a *idxAccessListBuilder) finalise() (*StateDiff, StateAccesses) {
}
func (c *AccessListBuilder) EnterTx(txHash common.Hash) {
c.idxBuilder = newAccessListBuilder(slog.New(slog.DiscardHandler))
c.idxBuilder = newAccessListBuilder()
}
// FinaliseIdxChanges records all pending state mutations/accesses in the
@ -247,7 +242,7 @@ func (c *AccessListBuilder) EnterTx(txHash common.Hash) {
// then emptied.
func (c *AccessListBuilder) FinaliseIdxChanges(idx uint16) {
pendingDiff, pendingAccesses := c.idxBuilder.finalise()
c.idxBuilder = newAccessListBuilder(slog.New(slog.DiscardHandler))
c.idxBuilder = newAccessListBuilder()
for addr, pendingAcctDiff := range pendingDiff.Mutations {
finalizedAcctChanges, ok := c.FinalizedAccesses[addr]
@ -382,6 +377,28 @@ type ConstructionAccountAccesses struct {
CodeChanges map[uint16]CodeChange
}
func (c *ConstructionAccountAccesses) Copy() (res ConstructionAccountAccesses) {
if c.StorageWrites != nil {
res.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
for slot, writes := range c.StorageWrites {
res.StorageWrites[slot] = maps.Clone(writes)
}
}
if c.StorageReads != nil {
res.StorageReads = maps.Clone(c.StorageReads)
}
if c.BalanceChanges != nil {
res.BalanceChanges = maps.Clone(c.BalanceChanges)
}
if c.NonceChanges != nil {
res.NonceChanges = maps.Clone(c.NonceChanges)
}
if c.CodeChanges != nil {
res.CodeChanges = maps.Clone(c.CodeChanges)
}
return res
}
// constructionAccountAccess contains fields for an account which were modified
// during execution of the current access list index.
// It also accumulates a set of storage slots which were accessed but not
@ -491,6 +508,15 @@ func (c *constructionAccountAccess) NonceChange(cur uint64) {
type ConstructionBlockAccessList map[common.Address]*ConstructionAccountAccesses
func (c ConstructionBlockAccessList) Copy() ConstructionBlockAccessList {
res := make(ConstructionBlockAccessList)
for addr, accountAccess := range c {
aaCopy := accountAccess.Copy()
res[addr] = &aaCopy
}
return res
}
// AccessListBuilder is used to build an EIP-7928 block access list
type AccessListBuilder struct {
FinalizedAccesses ConstructionBlockAccessList
@ -499,18 +525,46 @@ type AccessListBuilder struct {
lastFinalizedMutations *StateDiff
lastFinalizedAccesses StateAccesses
logger *slog.Logger
checkpointAccessList ConstructionBlockAccessList
checkpointMutations *StateDiff
checkpointAccesses StateAccesses
}
func (e *AccessListBuilder) Checkpoint() {
e.checkpointAccessList = e.FinalizedAccesses.Copy()
if e.lastFinalizedMutations != nil {
e.checkpointMutations = &StateDiff{
make(map[common.Address]*AccountMutations),
}
for addr, mut := range e.lastFinalizedMutations.Mutations {
e.checkpointMutations.Mutations[addr] = mut.Copy()
}
}
if e.lastFinalizedAccesses != nil {
e.checkpointAccesses = make(StateAccesses)
for addr, accesses := range e.lastFinalizedAccesses {
e.checkpointAccesses[addr] = maps.Clone(accesses)
}
}
}
func (e *AccessListBuilder) ResetToCheckpoint() {
e.FinalizedAccesses = e.checkpointAccessList
e.lastFinalizedAccesses = e.checkpointAccesses
e.lastFinalizedMutations = e.checkpointMutations
}
// NewAccessListBuilder instantiates an empty access list.
func NewAccessListBuilder() *AccessListBuilder {
logger := slog.New(slog.DiscardHandler)
return &AccessListBuilder{
make(map[common.Address]*ConstructionAccountAccesses),
newAccessListBuilder(logger),
newAccessListBuilder(),
nil,
nil,
nil,
nil,
nil,
logger,
}
}

View file

@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"io"
"maps"
"slices"
@ -69,6 +70,15 @@ func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
return nil
}
func (e *BlockAccessList) EncodedSize() int {
b, err := rlp.EncodeToBytes(e)
if err != nil {
// TODO: proper to crit here?
log.Crit("failed to rlp encode access list", "err", err)
}
return len(b)
}
func (e *BlockAccessList) JSONString() string {
res, _ := json.MarshalIndent(e.StringableRepresentation(), "", " ")
return string(res)

View file

@ -275,6 +275,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
overrides.OverrideVerkle = config.OverrideVerkle
}
options.Overrides = &overrides
options.BALExecutionMode = config.BALExecutionMode
eth.blockchain, err = core.NewBlockChain(chainDb, config.Genesis, eth.engine, options)
if err != nil {

View file

@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
txs, uncles, withdrawals, accessLists := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, access list hashes}
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], accessLists, hashsets[3])
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")

View file

@ -22,7 +22,6 @@ package downloader
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"sync"
"sync/atomic"
"time"
@ -73,7 +72,6 @@ type fetchResult struct {
Transactions types.Transactions
Receipts rlp.RawValue
Withdrawals types.Withdrawals
AccessList *bal.BlockAccessList
}
func newFetchResult(header *types.Header, snapSync bool) *fetchResult {
@ -564,7 +562,6 @@ func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
blockAccessLists []*bal.BlockAccessList, accessListHashes []common.Hash,
) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -589,19 +586,6 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
if header.BlockAccessListHash == nil {
// nil hash means that access list should not be present in body
if blockAccessLists[index] != nil {
return errInvalidBody
}
} else { // non-nil hash: body must have access list
if blockAccessLists[index] == nil {
return errInvalidBody
}
if accessListHashes[index] != *header.BlockAccessListHash {
return errInvalidBody
}
}
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int
@ -640,7 +624,6 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
result.Transactions = txLists[index]
result.Uncles = uncleLists[index]
result.Withdrawals = withdrawalLists[index]
result.AccessList = blockAccessLists[index]
result.SetBodyDone()
}
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,

View file

@ -208,6 +208,8 @@ type Config struct {
// RangeLimit restricts the maximum range (end - start) for range queries.
RangeLimit uint64 `toml:",omitempty"`
BALExecutionMode int
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -381,7 +381,6 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
accessListHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@ -390,11 +389,8 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
if body.AccessList != nil {
accessListHashes[i] = body.AccessList.Hash()
}
}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, accessListHashes}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,

View file

@ -19,7 +19,6 @@ package eth
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"io"
"math/big"
@ -240,22 +239,20 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, []*bal.BlockAccessList) {
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
accessListSet = make([]*bal.BlockAccessList, len(*p))
)
for i, body := range *p {
txset[i], uncleset[i], withdrawalset[i], accessListSet[i] = body.Transactions, body.Uncles, body.Withdrawals, body.AccessList
txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
}
return txset, uncleset, withdrawalset, accessListSet
return txset, uncleset, withdrawalset
}
// GetReceiptsRequest represents a block receipts query.

View file

@ -335,14 +335,28 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
}, nil
}
func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) error {
var (
errAccessListOversized = errors.New("access list oversized")
)
func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) (err error) {
if tx.Type() == types.BlobTxType {
return miner.commitBlobTransaction(env, tx)
}
if env.alTracer != nil {
env.alTracer.Checkpoint()
defer func() {
if err != nil {
env.alTracer.ResetToCheckpoint()
}
}()
}
receipt, err := miner.applyTransaction(env, tx)
if err != nil {
return err
}
env.txs = append(env.txs, tx)
env.receipts = append(env.receipts, receipt)
env.size += tx.Size()
@ -350,7 +364,7 @@ func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) e
return nil
}
func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) error {
func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) (err error) {
sc := tx.BlobTxSidecar()
if sc == nil {
panic("blob transaction without blobs in miner")
@ -363,6 +377,14 @@ func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transactio
if env.blobs+len(sc.Blobs) > maxBlobs {
return errors.New("max data blobs reached")
}
if env.alTracer != nil {
env.alTracer.Checkpoint()
defer func() {
if err != nil {
env.alTracer.ResetToCheckpoint()
}
}()
}
receipt, err := miner.applyTransaction(env, tx)
if err != nil {
return err
@ -384,12 +406,19 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
snap = env.state.Snapshot()
gp = env.gasPool.Gas()
)
receipt, cumulativeGas, err := core.ApplyTransaction(env.evm, env.gasPool, env.state, env.header, tx, env.cumulativeGas)
if err != nil {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
return nil, err
}
isOversizedAccessList := env.alTracer != nil && env.size+tx.Size()+uint64(env.alTracer.AccessList().ToEncodingObj().EncodedSize()) >= params.MaxBlockSize-maxBlockSizeBufferZone
if isOversizedAccessList {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
return nil, errAccessListOversized
}
env.cumulativeGas = cumulativeGas
env.header.GasUsed += receipt.GasUsed
return receipt, nil
@ -403,6 +432,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
}
loop:
for {
// Check interruption signal and abort building if it's fired.
if interrupt != nil {
@ -501,7 +531,12 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
txs.Shift()
case errors.Is(err, errAccessListOversized):
// Transaction can't be applied because it would cause the block to be oversized due to the
// contribution of the state accesses/modifications it makes.
// terminate the payload construction as it's not guaranteed we will be able to find a transaction
// that can fit in a short amount of time.
break loop
default:
// Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause.

View file

@ -178,9 +178,11 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest, buildAndVerif
for _, dbscheme := range dbschemeConf {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, buildAndVerifyBAL, nil, nil)); err != nil {
failures++
if failures > 10 {
panic("adsf")
}
/*
if failures > 10 {
panic("adsf")
}
*/
t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err)
return
}

View file

@ -226,7 +226,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, createAnd
var blocksWithBAL types.Blocks
for i := uint64(1); i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := chain.GetBlockByNumber(i)
if block.AccessList() == nil {
if chain.Config().IsAmsterdam(block.Number(), block.Time()) && block.AccessList() == nil {
return fmt.Errorf("block %d missing BAL", block.NumberU64())
}
blocksWithBAL = append(blocksWithBAL, block)