all: add block access list construction via flag --experimentalbal. When enabled, post-Cancun blocks which lack access lists will have them constructed on execution during import. When importing blocks which contain access lists, transaction execution and state root calculation is performed in parallel.

This commit is contained in:
Jared Wasinger 2025-09-29 14:44:19 +08:00
parent 0a2c21acd5
commit b33cf06ce7
103 changed files with 13473 additions and 835 deletions

View file

@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
)
var _ = (*executableDataMarshaling)(nil)
@ -34,6 +35,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var enc ExecutableData
@ -59,6 +61,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.BlockAccessList = e.BlockAccessList
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
@ -83,6 +86,7 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var dec ExecutableData
@ -157,6 +161,9 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.BlockAccessList != nil {
e.BlockAccessList = dec.BlockAccessList
}
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}

View file

@ -18,6 +18,7 @@ package engine
import (
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"slices"
@ -50,6 +51,7 @@ var (
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
// fields: blobGasUsed and excessBlobGas.
PayloadV3 PayloadVersion = 0x3
PayloadV4 PayloadVersion = 0x4
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
@ -90,6 +92,7 @@ type ExecutableData struct {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
@ -293,30 +296,39 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
requestsHash = &h
}
var blockAccessListHash *common.Hash
body := types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}
if data.BlockAccessList != nil {
body.AccessList = data.BlockAccessList
balHash := data.BlockAccessList.Hash()
blockAccessListHash = &balHash
}
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient,
Root: data.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number),
GasLimit: data.GasLimit,
GasUsed: data.GasUsed,
Time: data.Timestamp,
BaseFee: data.BaseFeePerGas,
Extra: data.ExtraData,
MixDigest: data.Random,
WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient,
Root: data.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number),
GasLimit: data.GasLimit,
GasUsed: data.GasUsed,
Time: data.Timestamp,
BaseFee: data.BaseFeePerGas,
Extra: data.ExtraData,
MixDigest: data.Random,
WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
BlockAccessListHash: blockAccessListHash,
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
WithBody(body).
WithWitness(data.ExecutionWitness),
nil
}
@ -343,6 +355,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
BlockAccessList: block.Body().AccessList,
}
// Add blobs.

View file

@ -89,7 +89,7 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
continue
}
result := &testResult{Name: name, Pass: true}
if err := tests[name].Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
if err := tests[name].Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), false, tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil {
result.State = dump(s)

View file

@ -316,11 +316,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
}
// EIP-7002
if err := core.ProcessWithdrawalQueue(&requests, evm); err != nil {
if _, _, err := core.ProcessWithdrawalQueue(&requests, evm); err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not process withdrawal requests: %v", err))
}
// EIP-7251
if err := core.ProcessConsolidationQueue(&requests, evm); err != nil {
if _, _, err := core.ProcessConsolidationQueue(&requests, evm); err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not process consolidation requests: %v", err))
}
}

View file

@ -316,7 +316,7 @@ func runCmd(ctx *cli.Context) error {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
runtimeConfig.State = prestate.Copy().(*state.StateDB)
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
@ -326,7 +326,7 @@ func runCmd(ctx *cli.Context) error {
}
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
runtimeConfig.State = prestate.Copy()
runtimeConfig.State = prestate.Copy().(*state.StateDB)
output, gasLeft, err := runtime.Call(receiver, input, &runtimeConfig)
return output, initialGas - gasLeft, err
}

View file

@ -155,6 +155,7 @@ var (
utils.BeaconGenesisTimeFlag,
utils.BeaconCheckpointFlag,
utils.BeaconCheckpointFileFlag,
utils.ExperimentalBALFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{

View file

@ -1009,6 +1009,14 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: metrics.DefaultConfig.InfluxDBOrganization,
Category: flags.MetricsCategory,
}
// Block Access List flags
ExperimentalBALFlag = &cli.BoolFlag{
Name: "experimental.bal",
Usage: "Enable block-access-list building when importing post-Cancun blocks, and validation that access lists contained in post-Cancun blocks correctly correspond to the state changes in those blocks. This is used for development purposes only. Do not enable it otherwise.",
Category: flags.MiscCategory,
}
)
var (
@ -1917,6 +1925,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
}
}
cfg.ExperimentalBAL = ctx.Bool(ExperimentalBALFlag.Name)
}
// MakeBeaconLightConfig constructs a beacon light client config based on the
@ -2319,6 +2329,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
options.VmConfig = vmcfg
options.EnableBALForTesting = ctx.Bool(ExperimentalBALFlag.Name)
chain, err := core.NewBlockChain(chainDb, gspec, engine, options)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)

View file

@ -272,6 +272,14 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return err
}
}
if chain.Config().IsAmsterdam(header.Number, header.Time) {
if header.BlockAccessListHash == nil {
// TODO: (self study). figure out if any of the header checks here overlap with the
// other location that I validate the block access list hash.
return fmt.Errorf("block access list hash must be set in header post-Amsterdam")
}
}
return nil
}
@ -343,9 +351,9 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalization func()) (*types.Block, error) {
if !beacon.IsPoSHeader(header) {
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts)
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts, onFinalization)
}
shanghai := chain.Config().IsShanghai(header.Number, header.Time)
if shanghai {
@ -364,6 +372,10 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(true)
if onFinalization != nil {
onFinalization()
}
// Assemble the final block.
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))

View file

@ -579,7 +579,7 @@ func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalize func()) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("clique does not support withdrawals")
}
@ -589,6 +589,10 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalize != nil {
onFinalize()
}
// Assemble and return the final block for sealing.
return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -92,7 +92,7 @@ type Engine interface {
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error)
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalization func()) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.

View file

@ -511,7 +511,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalize func()) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("ethash does not support withdrawals")
}
@ -521,6 +521,9 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalize != nil {
onFinalize()
}
// Header seems complete, assemble into a block and return
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -63,6 +63,7 @@ func latestBlobConfig(cfg *params.ChainConfig, time uint64) *BlobConfig {
bc *params.BlobConfig
)
switch {
case cfg.IsBPO5(london, time) && s.BPO5 != nil:
bc = s.BPO5
case cfg.IsBPO4(london, time) && s.BPO4 != nil:
@ -73,6 +74,8 @@ func latestBlobConfig(cfg *params.ChainConfig, time uint64) *BlobConfig {
bc = s.BPO2
case cfg.IsBPO1(london, time) && s.BPO1 != nil:
bc = s.BPO1
case cfg.IsAmsterdam(london, time) && s.Amsterdam != nil:
bc = s.BPO1
case cfg.IsOsaka(london, time) && s.Osaka != nil:
bc = s.Osaka
case cfg.IsPrague(london, time) && s.Prague != nil:

View file

@ -0,0 +1,140 @@
package core
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/holiman/uint256"
"math/big"
)
type accountPrestate struct {
balance *uint256.Int
nonce *uint64
code []byte
}
// BlockAccessListTracer constructs an EIP-7928 block access list from the
// execution of a block
type BlockAccessListTracer struct {
// this is a set of access lists for each call scope. the overall block access lists
// is accrued at index 0, while the access lists of various nested execution
// scopes are in the proceeding indices.
// When an execution scope terminates in a non-reverting fashion, the changes are
// merged into the access list of the parent scope.
blockTxCount int
accessList *bal.ConstructionBlockAccessList
balIdx uint16
accessListBuilder *bal.AccessListBuilder
// mutations and state reads from currently-executing bal index
idxMutations *bal.StateDiff
idxReads bal.StateAccesses
}
// NewBlockAccessListTracer returns an BlockAccessListTracer and a set of hooks
func NewBlockAccessListTracer(startIdx int) (*BlockAccessListTracer, *tracing.Hooks) {
balTracer := &BlockAccessListTracer{
accessList: bal.NewConstructionBlockAccessList(),
//balIdx: uint16(startIdx),
accessListBuilder: bal.NewAccessListBuilder(),
}
hooks := &tracing.Hooks{
OnBlockFinalization: balTracer.OnBlockFinalization,
OnPreTxExecutionDone: balTracer.OnPreTxExecutionDone,
OnTxEnd: balTracer.TxEndHook,
OnEnter: balTracer.OnEnter,
OnExit: balTracer.OnExit,
OnCodeChangeV2: balTracer.OnCodeChange,
OnBalanceChange: balTracer.OnBalanceChange,
OnNonceChangeV2: balTracer.OnNonceChange,
OnStorageChange: balTracer.OnStorageChange,
OnColdAccountRead: balTracer.OnColdAccountRead,
OnColdStorageRead: balTracer.OnColdStorageRead,
OnSelfDestructChange: balTracer.OnSelfDestruct,
}
wrappedHooks, err := tracing.WrapWithJournal(hooks)
if err != nil {
panic(err) // TODO: ....
}
return balTracer, wrappedHooks
}
// AccessList returns the constructed access list.
// It is assumed that this is only called after all the block state changes
// have been executed and the block has been finalized.
func (a *BlockAccessListTracer) AccessList() *bal.ConstructionBlockAccessList {
return a.accessList
}
func (a *BlockAccessListTracer) OnPreTxExecutionDone() {
a.idxMutations, a.idxReads = a.accessListBuilder.FinaliseIdxChanges()
a.accessList.Apply(0, a.idxMutations, a.idxReads)
a.accessListBuilder = bal.NewAccessListBuilder()
a.balIdx++
}
// TODO: I don't like that AccessList and this do slightly different things,
// and that they mutate the access list builder (not apparent in the naming of the methods)
//
// ^ idea: add Finalize() which returns the diff/accesses, also accumulating them in the BAL.
// AccessList just returns the constructed BAL.
func (a *BlockAccessListTracer) IdxChanges() (*bal.StateDiff, bal.StateAccesses) {
return a.idxMutations, a.idxReads
}
func (a *BlockAccessListTracer) TxEndHook(receipt *types.Receipt, err error) {
a.idxMutations, a.idxReads = a.accessListBuilder.FinaliseIdxChanges()
a.accessList.Apply(a.balIdx, a.idxMutations, a.idxReads)
a.accessListBuilder = bal.NewAccessListBuilder()
a.balIdx++
}
func (a *BlockAccessListTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
a.accessListBuilder.EnterScope()
}
func (a *BlockAccessListTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
a.accessListBuilder.ExitScope(reverted)
}
func (a *BlockAccessListTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) {
// TODO: if we don't have this equality check, some tests fail. should be investigated.
// probably the tracer shouldn't invoke code change if the code didn't actually change tho.
if prevCodeHash != codeHash {
a.accessListBuilder.CodeChange(addr, prevCode, code)
}
}
func (a *BlockAccessListTracer) OnSelfDestruct(addr common.Address) {
a.accessListBuilder.SelfDestruct(addr)
}
func (a *BlockAccessListTracer) OnBlockFinalization() {
a.idxMutations, a.idxReads = a.accessListBuilder.FinaliseIdxChanges()
a.accessList.Apply(a.balIdx, a.idxMutations, a.idxReads)
a.accessListBuilder = bal.NewAccessListBuilder()
}
func (a *BlockAccessListTracer) OnBalanceChange(addr common.Address, prevBalance, newBalance *big.Int, _ tracing.BalanceChangeReason) {
newU256 := new(uint256.Int).SetBytes(newBalance.Bytes())
prevU256 := new(uint256.Int).SetBytes(prevBalance.Bytes())
a.accessListBuilder.BalanceChange(addr, prevU256, newU256)
}
func (a *BlockAccessListTracer) OnNonceChange(addr common.Address, prev uint64, new uint64, reason tracing.NonceChangeReason) {
a.accessListBuilder.NonceChange(addr, prev, new)
}
func (a *BlockAccessListTracer) OnColdStorageRead(addr common.Address, key common.Hash) {
a.accessListBuilder.StorageRead(addr, key)
}
func (a *BlockAccessListTracer) OnColdAccountRead(addr common.Address) {
a.accessListBuilder.AccountRead(addr)
}
func (a *BlockAccessListTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
a.accessListBuilder.StorageWrite(addr, slot, prev, new)
}

View file

@ -19,7 +19,6 @@ package core
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@ -111,6 +110,33 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
}
// block access lists must be present after the Amsterdam hard fork
if v.config.IsAmsterdam(block.Number(), block.Time()) {
if block.Body().AccessList == nil {
return fmt.Errorf("access list not present in block body")
} else if block.Header().BlockAccessListHash == nil {
return fmt.Errorf("access list hash not present in block header")
} else if *block.Header().BlockAccessListHash != block.Body().AccessList.Hash() {
return fmt.Errorf("access list hash mismatch. local: %x. remote: %x\n", block.Body().AccessList.Hash(), *block.Header().BlockAccessListHash)
} else if err := block.Body().AccessList.Validate(); err != nil {
return fmt.Errorf("invalid block access list: %v", err)
}
} else if !v.bc.cfg.EnableBALForTesting {
// if --experimental.bal is not enabled, block headers cannot have access list hash and bodies cannot have access lists.
if block.Body().AccessList != nil {
return fmt.Errorf("access list not allowed in block body if not in amsterdam or --experimental.bal is set")
} else if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed when --experimental.bal is set")
}
} else {
// if --experimental.bal is enabled, the BAL hash is not allowed in the header.
// this is in order that Geth can import pre-existing chains augmented with BALs
// and not have a hash mismatch.
if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed pre-amsterdam")
}
}
// Ancestor block must be known.
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
@ -123,7 +149,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, validateStateRoot, stateless bool) error {
if res == nil {
return errors.New("nil ProcessResult value")
}
@ -160,10 +186,13 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} else if res.Requests != nil {
return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
if validateStateRoot {
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
}
}
return nil
}

View file

@ -98,6 +98,13 @@ var (
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
// BAL-specific timers
blockPreprocessingTimer = metrics.NewRegisteredResettingTimer("chain/preprocess", nil)
blockPrestateLoadTimer = metrics.NewRegisteredResettingTimer("chain/prestateload", nil)
txExecutionTimer = metrics.NewRegisteredResettingTimer("chain/txexecution", nil)
stateRootCalctimer = metrics.NewRegisteredResettingTimer("chain/rootcalculation", nil)
blockPostprocessingTimer = metrics.NewRegisteredResettingTimer("chain/postprocess", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
@ -196,6 +203,11 @@ type BlockChainConfig struct {
// If the value is -1, indexing is disabled.
TxLookupLimit int64
// If EnableBALForTesting is enabled, block access lists will be created as part of
// block processing and embedded in the block body. The block access list hash will
// not be set in the header.
EnableBALForTesting bool
// StateSizeTracking indicates whether the state size tracking is enabled.
StateSizeTracking bool
}
@ -330,14 +342,14 @@ type BlockChain struct {
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
parallelProcessor ParallelStateProcessor
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
}
// NewBlockChain returns a fully initialised block chain using information
@ -395,6 +407,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(bc.hc)
bc.parallelProcessor = NewParallelStateProcessor(bc.hc, bc.GetVMConfig())
genesisHeader := bc.GetHeaderByNumber(0)
if genesisHeader == nil {
@ -1909,9 +1922,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if parent == nil {
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
// The traced section of block import.
start := time.Now()
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
// construct or verify block access lists if BALs are enabled and
// we are post-selfdestruct removal fork.
enableBAL := (bc.cfg.EnableBALForTesting && bc.chainConfig.IsCancun(block.Number(), block.Time())) || bc.chainConfig.IsAmsterdam(block.Number(), block.Time())
blockHasAccessList := block.Body().AccessList != nil
makeBAL := enableBAL && !blockHasAccessList
validateBAL := enableBAL && blockHasAccessList
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1, makeBAL, validateBAL)
if err != nil {
return nil, it.index, err
}
@ -1983,7 +2004,7 @@ func (bpr *blockProcessingResult) Witness() *stateless.Witness {
// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool, constructBALForTesting bool, validateBAL bool) (_ *blockProcessingResult, blockEndErr error) {
var (
err error
startTime = time.Now()
@ -1994,6 +2015,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
if bc.cfg.NoPrefetch {
statedb, err = state.New(parentRoot, bc.statedb)
if err != nil {
return nil, err
}
@ -2033,7 +2055,10 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
// Disable tracing for prefetcher executions.
vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
if block.Body().AccessList == nil {
// only use the state prefetcher for non-BAL blocks.
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
}
blockPrefetchExecuteTimer.Update(time.Since(start))
if interrupt.Load() {
@ -2062,8 +2087,15 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
witnessStats = stateless.NewWitnessStats()
}
}
statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher()
// access-list containing blocks don't use the prefetcher because
// state root computation proceeds concurrently with transaction
// execution, meaning the prefetcher doesn't have any time to run
// before the trie nodes are needed for state root computation.
if block.Body().AccessList == nil {
statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher()
}
}
if bc.logger != nil && bc.logger.OnBlockStart != nil {
@ -2079,21 +2111,82 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}()
}
// Process block using the parent state as reference point
pstart := time.Now()
res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
bc.reportBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)
blockHadBAL := block.Body().AccessList != nil
var res *ProcessResult
var resWithMetrics *ProcessResultWithMetrics
var ptime, vtime time.Duration
if block.Body().AccessList != nil {
if block.NumberU64() == 0 {
return nil, fmt.Errorf("genesis block cannot have a block access list")
}
// TODO: rename 'validateBAL' to indicate that it's for validating that the BAL
// is present and we are after amsterdam fork. validateBAL=false is only used for
// testing BALs in pre-Amsterdam blocks.
if !validateBAL && !bc.chainConfig.IsAmsterdam(block.Number(), block.Time()) {
bc.reportBlock(block, res, fmt.Errorf("received block containing access list before glamsterdam activated"))
return nil, err
}
// Process block using the parent state as reference point
pstart := time.Now()
resWithMetrics, err = bc.parallelProcessor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
// TODO: okay to pass nil here as execution result?
bc.reportBlock(block, nil, err)
return nil, err
}
ptime = time.Since(pstart)
vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
bc.reportBlock(block, res, err)
return nil, err
vstart := time.Now()
var err error
err = bc.validator.ValidateState(block, statedb, resWithMetrics.ProcessResult, false, false)
if err != nil {
// TODO: okay to pass nil here as execution result?
bc.reportBlock(block, nil, err)
return nil, err
}
res = resWithMetrics.ProcessResult
vtime = time.Since(vstart)
} else {
var balTracer *BlockAccessListTracer
// Process block using the parent state as reference point
if constructBALForTesting {
balTracer, bc.cfg.VmConfig.Tracer = NewBlockAccessListTracer(0)
}
// Process block using the parent state as reference point
pstart := time.Now()
res, err = bc.processor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
bc.reportBlock(block, res, err)
return nil, err
}
ptime = time.Since(pstart)
// TODO: if I remove this check before executing balTracer.Finalise, the following test fails:
// ExecutionSpecBlocktests/shanghai/eip3855_push0/push0/push0_storage_overwrite.json
if constructBALForTesting {
balTracer.OnBlockFinalization()
}
// unset the BAL-creation tracer (dirty)
bc.cfg.VmConfig.Tracer = nil
vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, res, true, false); err != nil {
bc.reportBlock(block, res, err)
return nil, err
}
vtime = time.Since(vstart)
if constructBALForTesting {
// very ugly... deep-copy the block body before setting the block access
// list on it to prevent mutating the block instance passed by the caller.
existingBody := block.Body()
block = block.WithBody(*existingBody)
existingBody = block.Body()
existingBody.AccessList = balTracer.AccessList().ToEncodingObj()
block = block.WithBody(*existingBody)
}
}
vtime := time.Since(vstart)
// If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of
@ -2124,26 +2217,37 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}
}
xvtime := time.Since(xvstart)
proctime := time.Since(startTime) // processing + validation + cross validation
var proctime time.Duration
if blockHadBAL {
blockPreprocessingTimer.Update(resWithMetrics.PreProcessTime)
blockPrestateLoadTimer.Update(resWithMetrics.PrestateLoadTime)
txExecutionTimer.Update(resWithMetrics.ExecTime)
stateRootCalctimer.Update(resWithMetrics.RootCalcTime)
blockPostprocessingTimer.Update(resWithMetrics.PostProcessTime)
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
accountHashTimer.Update(statedb.AccountHashes)
} else {
xvtime := time.Since(xvstart)
proctime = time.Since(startTime) // processing + validation + cross validation
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
}
if statedb.StorageLoaded != 0 {
storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
}
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
triehash := statedb.AccountHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
// Write the block to the chain and get the status.
var (

View file

@ -410,7 +410,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts, nil)
if err != nil {
panic(err)
}
@ -520,7 +520,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
Uncles: b.uncles,
Withdrawals: b.withdrawals,
}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts)
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts, nil)
if err != nil {
panic(err)
}

View file

@ -5,6 +5,7 @@ package core
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -19,21 +20,22 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var enc Genesis
enc.Config = g.Config
@ -56,27 +58,29 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
enc.BlockAccessListHash = g.BlockAccessListHash
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@ -133,5 +137,9 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
fmt.Printf("dec al hash is %v\n", dec.BlockAccessListHash)
if dec.BlockAccessListHash != nil {
g.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -67,12 +67,13 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"` // EIP-7928
}
// copy copies the genesis.
@ -122,6 +123,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
genesis.BaseFee = genesisHeader.BaseFee
genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas
genesis.BlobGasUsed = genesisHeader.BlobGasUsed
genesis.BlockAccessListHash = genesisHeader.BlockAccessListHash
return &genesis, nil
}
@ -469,18 +471,19 @@ func (g *Genesis) ToBlock() *types.Block {
// toBlockWithRoot constructs the genesis block with the given genesis state root.
func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
Root: root,
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
BlockAccessListHash: g.BlockAccessListHash,
Root: root,
}
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit

View file

@ -0,0 +1,376 @@
package core
import (
"cmp"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/sync/errgroup"
"slices"
"time"
)
// ProcessResultWithMetrics wraps ProcessResult with some metrics that are
// emitted when executing blocks containing access lists.
type ProcessResultWithMetrics struct {
ProcessResult *ProcessResult
// the time it took to load modified prestate accounts from disk and instantiate statedbs for execution
PreProcessTime time.Duration
// the time it took to validate the block post transaction execution and state root calculation
PostProcessTime time.Duration
// the time it took to hash the state root, including intermediate node reads
RootCalcTime time.Duration
// the time that it took to load the prestate for accounts that were updated as part of
// the state root update
PrestateLoadTime time.Duration
// the time it took to execute all txs in the block
ExecTime time.Duration
}
// ParallelStateProcessor is used to execute and verify blocks containing
// access lists.
type ParallelStateProcessor struct {
*StateProcessor
vmCfg *vm.Config
}
// NewParallelStateProcessor returns a new ParallelStateProcessor instance.
func NewParallelStateProcessor(chain *HeaderChain, vmConfig *vm.Config) ParallelStateProcessor {
res := NewStateProcessor(chain)
return ParallelStateProcessor{
res,
vmConfig,
}
}
// called by resultHandler when all transactions have successfully executed.
// performs post-tx state transition (system contracts and withdrawals)
// and calculates the ProcessResult, returning it to be sent on resCh
// by resultHandler
func (p *ParallelStateProcessor) prepareExecResult(block *types.Block, allStateReads *bal.StateAccesses, tExecStart time.Time, postTxState *state.StateDB, receipts types.Receipts) *ProcessResultWithMetrics {
tExec := time.Since(tExecStart)
var requests [][]byte
tPostprocessStart := time.Now()
header := block.Header()
balTracer, hooks := NewBlockAccessListTracer(len(block.Transactions()) + 1)
tracingStateDB := state.NewHookedState(postTxState, hooks)
context := NewEVMBlockContext(header, p.chain, nil)
postTxState.SetAccessListIndex(len(block.Transactions()) + 1)
cfg := vm.Config{
Tracer: hooks,
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
cfg.Tracer = hooks
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
// 1. order the receipts by tx index
// 2. correctly calculate the cumulative gas used per receipt, returning bad block error if it goes over the allowed
slices.SortFunc(receipts, func(a, b *types.Receipt) int {
return cmp.Compare(a.TransactionIndex, b.TransactionIndex)
})
var cumulativeGasUsed uint64
var allLogs []*types.Log
for _, receipt := range receipts {
receipt.CumulativeGasUsed = cumulativeGasUsed + receipt.GasUsed
cumulativeGasUsed += receipt.GasUsed
if receipt.CumulativeGasUsed > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
allLogs = append(allLogs, receipt.Logs...)
}
// Read requests if Prague is enabled.
if p.chainConfig().IsPrague(block.Number(), block.Time()) {
requests = [][]byte{}
// EIP-6110
if err := ParseDepositLogs(&requests, allLogs, p.chainConfig()); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7002
err := ProcessWithdrawalQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7251
err = ProcessConsolidationQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
// invoke FinaliseIdxChanges so that withdrawals are accounted for in the state diff
postTxState.Finalise(true)
balTracer.OnBlockFinalization()
diff, stateReads := balTracer.IdxChanges()
allStateReads.Merge(stateReads)
balIdx := len(block.Transactions()) + 1
if err := postTxState.BlockAccessList().ValidateStateDiff(balIdx, diff); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
if err := postTxState.BlockAccessList().ValidateStateReads(*allStateReads); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
tPostprocess := time.Since(tPostprocessStart)
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{
Receipts: receipts,
Requests: requests,
Logs: allLogs,
GasUsed: cumulativeGasUsed,
},
PostProcessTime: tPostprocess,
ExecTime: tExec,
}
}
type txExecResult struct {
idx int // transaction index
receipt *types.Receipt
err error // non-EVM error which would render the block invalid
stateReads bal.StateAccesses
}
// resultHandler polls until all transactions have finished executing and the
// state root calculation is complete. The result is emitted on resCh.
func (p *ParallelStateProcessor) resultHandler(block *types.Block, preTxStateReads bal.StateAccesses, postTxState *state.StateDB, tExecStart time.Time, txResCh <-chan txExecResult, stateRootCalcResCh <-chan stateRootCalculationResult, resCh chan *ProcessResultWithMetrics) {
// 1. if the block has transactions, receive the execution results from all of them and return an error on resCh if any txs err'd
// 2. once all txs are executed, compute the post-tx state transition and produce the ProcessResult sending it on resCh (or an error if the post-tx state didn't match what is reported in the BAL)
var receipts []*types.Receipt
gp := new(GasPool)
gp.SetGas(block.GasLimit())
var execErr error
var numTxComplete int
allReads := make(bal.StateAccesses)
allReads.Merge(preTxStateReads)
if len(block.Transactions()) > 0 {
loop:
for {
select {
case res := <-txResCh:
if execErr == nil {
if res.err != nil {
execErr = res.err
} else {
if err := gp.SubGas(res.receipt.GasUsed); err != nil {
execErr = err
} else {
receipts = append(receipts, res.receipt)
allReads.Merge(res.stateReads)
}
}
}
numTxComplete++
if numTxComplete == len(block.Transactions()) {
break loop
}
}
}
if execErr != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: execErr}}
return
}
}
execResults := p.prepareExecResult(block, &allReads, tExecStart, postTxState, receipts)
rootCalcRes := <-stateRootCalcResCh
if execResults.ProcessResult.Error != nil {
resCh <- execResults
} else if rootCalcRes.err != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: rootCalcRes.err}}
} else {
execResults.RootCalcTime = rootCalcRes.rootCalcTime
execResults.PrestateLoadTime = rootCalcRes.prestateLoadTime
resCh <- execResults
}
}
type stateRootCalculationResult struct {
err error
prestateLoadTime time.Duration
rootCalcTime time.Duration
root common.Hash
}
// calcAndVerifyRoot performs the post-state root hash calculation, verifying
// it against what is reported by the block and returning a result on resCh.
func (p *ParallelStateProcessor) calcAndVerifyRoot(preState *state.StateDB, block *types.Block, resCh chan stateRootCalculationResult) {
// calculate and apply the block state modifications
root, prestateLoadTime, rootCalcTime := preState.BlockAccessList().StateRoot(preState)
res := stateRootCalculationResult{
root: root,
prestateLoadTime: prestateLoadTime,
rootCalcTime: rootCalcTime,
}
if root != block.Root() {
res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x", root, block.Root())
}
resCh <- res
}
// execTx executes single transaction returning a result which includes state accessed/modified
func (p *ParallelStateProcessor) execTx(block *types.Block, tx *types.Transaction, txIdx int, db *state.StateDB, signer types.Signer) *txExecResult {
header := block.Header()
balTracer, hooks := NewBlockAccessListTracer(txIdx + 1)
tracingStateDB := state.NewHookedState(db, hooks)
context := NewEVMBlockContext(header, p.chain, nil)
cfg := vm.Config{
Tracer: hooks,
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
cfg.Tracer = hooks
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
if err != nil {
err = fmt.Errorf("could not apply tx %d [%v]: %w", txIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
gp := new(GasPool)
gp.SetGas(block.GasLimit())
db.SetTxContext(tx.Hash(), txIdx)
var gasUsed uint64
receipt, err := ApplyTransactionWithEVM(msg, gp, db, block.Number(), block.Hash(), context.Time, tx, &gasUsed, evm)
if err != nil {
err := fmt.Errorf("could not apply tx %d [%v]: %w", txIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
diff, accesses := balTracer.IdxChanges()
if err := db.BlockAccessList().ValidateStateDiff(txIdx+1, diff); err != nil {
return &txExecResult{err: err}
}
return &txExecResult{
idx: txIdx,
receipt: receipt,
stateReads: accesses,
}
}
// Process performs EVM execution and state root computation for a block which is known
// to contain an access list.
func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResultWithMetrics, error) {
var (
header = block.Header()
resCh = make(chan *ProcessResultWithMetrics)
signer = types.MakeSigner(p.chainConfig(), header.Number, header.Time)
)
txResCh := make(chan txExecResult)
pStart := time.Now()
var (
tPreprocess time.Duration // time to create a set of prestates for parallel transaction execution
tExecStart time.Time
rootCalcResultCh = make(chan stateRootCalculationResult)
)
// Mutate the block and state according to any hard-fork specs
if p.chainConfig().DAOForkSupport && p.chainConfig().DAOForkBlock != nil && p.chainConfig().DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb)
}
var (
context vm.BlockContext
)
alReader := state.NewBALReader(block, statedb)
statedb.SetBlockAccessList(alReader)
balTracer, hooks := NewBlockAccessListTracer(0)
tracingStateDB := state.NewHookedState(statedb, hooks)
// TODO: figure out exactly why we need to set the hooks on the TracingStateDB and the vm.Config
cfg.Tracer = hooks
context = NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm)
}
if p.chainConfig().IsPrague(block.Number(), block.Time()) || p.chainConfig().IsVerkle(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), evm)
}
// TODO: weird that I have to manually call finalize here
balTracer.OnPreTxExecutionDone()
diff, stateReads := balTracer.IdxChanges()
if err := statedb.BlockAccessList().ValidateStateDiff(0, diff); err != nil {
return nil, err
}
// compute the post-tx state prestate (before applying final block system calls and eip-4895 withdrawals)
// the post-tx state transition is verified by resultHandler
postTxState := statedb.Copy()
tPreprocess = time.Since(pStart)
// execute transactions and state root calculation in parallel
// TODO: figure out how to funnel the state reads from the bal tracer through to the post-block-exec state/slot read
// validation
tExecStart = time.Now()
go p.resultHandler(block, stateReads, postTxState, tExecStart, txResCh, rootCalcResultCh, resCh)
var workers errgroup.Group
startingState := statedb.Copy()
for i, tx := range block.Transactions() {
tx := tx
i := i
workers.Go(func() error {
res := p.execTx(block, tx, i, startingState.Copy(), signer)
txResCh <- *res
return nil
})
}
go p.calcAndVerifyRoot(statedb, block, rootCalcResultCh)
res := <-resCh
if res.ProcessResult.Error != nil {
return nil, res.ProcessResult.Error
}
res.PreProcessTime = tPreprocess
// res.PreProcessLoadTime = tPreprocessLoad
return res, nil
}

376
core/state/bal_reader.go Normal file
View file

@ -0,0 +1,376 @@
package state
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/crypto"
"github.com/holiman/uint256"
"sync"
"time"
)
// TODO: probably unnecessary to cache the resolved state object here as it will already be in the db cache?
// ^ experiment with the performance of keeping this as-is vs just using the db cache.
type prestateResolver struct {
inProgress map[common.Address]chan struct{}
resolved sync.Map
ctx context.Context
cancel func()
}
func (p *prestateResolver) resolve(r Reader, addrs []common.Address) {
p.inProgress = make(map[common.Address]chan struct{})
p.ctx, p.cancel = context.WithCancel(context.Background())
for _, addr := range addrs {
p.inProgress[addr] = make(chan struct{})
}
for _, addr := range addrs {
resolveAddr := addr
go func() {
select {
case <-p.ctx.Done():
return
default:
}
acct, err := r.Account(resolveAddr)
if err != nil {
// TODO: what do here?
}
p.resolved.Store(resolveAddr, acct)
close(p.inProgress[resolveAddr])
}()
}
}
func (p *prestateResolver) stop() {
p.cancel()
}
func (p *prestateResolver) account(addr common.Address) *types.StateAccount {
if _, ok := p.inProgress[addr]; !ok {
return nil
}
select {
case <-p.inProgress[addr]:
}
res, exist := p.resolved.Load(addr)
if !exist {
return nil
}
return res.(*types.StateAccount)
}
func (r *BALReader) initObjFromDiff(db *StateDB, addr common.Address, a *types.StateAccount, diff *bal.AccountState) *stateObject {
var acct *types.StateAccount
if a == nil {
acct = &types.StateAccount{
Nonce: 0,
Balance: uint256.NewInt(0),
Root: types.EmptyRootHash,
CodeHash: types.EmptyCodeHash[:],
}
} else {
acct = a.Copy()
}
if diff == nil {
return newObject(db, addr, acct)
}
if diff.Nonce != nil {
acct.Nonce = *diff.Nonce
}
if diff.Balance != nil {
acct.Balance = new(uint256.Int).Set(diff.Balance)
}
obj := newObject(db, addr, acct)
if diff.Code != nil {
obj.setCode(crypto.Keccak256Hash(diff.Code), diff.Code)
}
if diff.StorageWrites != nil {
for key, val := range diff.StorageWrites {
obj.pendingStorage[key] = val
}
}
if obj.empty() {
return nil
}
return obj
}
func (s *BALReader) initMutatedObjFromDiff(db *StateDB, addr common.Address, a *types.StateAccount, diff *bal.AccountState) *stateObject {
var acct *types.StateAccount
if a == nil {
acct = &types.StateAccount{
Nonce: 0,
Balance: uint256.NewInt(0),
Root: types.EmptyRootHash,
CodeHash: types.EmptyCodeHash[:],
}
} else {
acct = a.Copy()
}
obj := newObject(db, addr, acct)
if diff.Nonce != nil {
obj.SetNonce(*diff.Nonce)
}
if diff.Balance != nil {
obj.SetBalance(new(uint256.Int).Set(diff.Balance))
}
if diff.Code != nil {
obj.SetCode(crypto.Keccak256Hash(diff.Code), diff.Code)
}
if diff.StorageWrites != nil {
for key, val := range diff.StorageWrites {
obj.SetState(key, val)
}
}
return obj
}
// BALReader provides methods for reading account state from a block access
// list. State values returned from the Reader methods must not be modified.
type BALReader struct {
block *types.Block
accesses map[common.Address]*bal.AccountAccess
prestateReader prestateResolver
}
// NewBALReader constructs a new reader from an access list. db is expected to have been instantiated with a reader.
func NewBALReader(block *types.Block, db *StateDB) *BALReader {
r := &BALReader{accesses: make(map[common.Address]*bal.AccountAccess), block: block}
for _, acctDiff := range *block.Body().AccessList {
r.accesses[acctDiff.Address] = &acctDiff
}
r.prestateReader.resolve(db.Reader(), r.ModifiedAccounts())
return r
}
// ModifiedAccounts returns a list of all accounts with mutations in the access list
func (r *BALReader) ModifiedAccounts() (res []common.Address) {
for addr, access := range r.accesses {
if len(access.NonceChanges) != 0 || len(access.CodeChanges) != 0 || len(access.StorageChanges) != 0 || len(access.BalanceChanges) != 0 {
res = append(res, addr)
}
}
return res
}
func (r *BALReader) ValidateStateReads(allReads bal.StateAccesses) error {
// 1. remove any slots from 'allReads' which were written
// 2. validate that the read set in the BAL matches 'allReads' exactly
for addr, reads := range allReads {
balAcctDiff := r.readAccountDiff(addr, len(r.block.Transactions())+2)
if balAcctDiff != nil {
for writeSlot := range balAcctDiff.StorageWrites {
delete(reads, writeSlot)
}
}
if _, ok := r.accesses[addr]; !ok {
return fmt.Errorf("%x wasn't in BAL", addr)
}
expectedReads := r.accesses[addr].StorageReads
if len(reads) != len(expectedReads) {
return fmt.Errorf("mismatch between the number of computed reads and number of expected reads")
}
for _, slot := range expectedReads {
if _, ok := reads[slot]; !ok {
return fmt.Errorf("expected read is missing from BAL")
}
}
}
// TODO: where do we validate that the storage read/write sets are distinct?
return nil
}
func (r *BALReader) AccessedState() (res map[common.Address]map[common.Hash]struct{}) {
res = make(map[common.Address]map[common.Hash]struct{})
for addr, accesses := range r.accesses {
if len(accesses.StorageReads) > 0 {
res[addr] = make(map[common.Hash]struct{})
for _, slot := range accesses.StorageReads {
res[addr][slot] = struct{}{}
}
} else if len(accesses.BalanceChanges) == 0 && len(accesses.NonceChanges) == 0 && len(accesses.StorageChanges) == 0 && len(accesses.CodeChanges) == 0 {
res[addr] = make(map[common.Hash]struct{})
}
}
return
}
// TODO: it feels weird that this modifies the prestate instance. However, it's needed because it will
// subsequently be used in Commit.
func (r *BALReader) StateRoot(prestate *StateDB) (root common.Hash, prestateLoadTime time.Duration, rootUpdateTime time.Duration) {
lastIdx := len(r.block.Transactions()) + 1
modifiedAccts := r.ModifiedAccounts()
startPrestateLoad := time.Now()
for _, addr := range modifiedAccts {
diff := r.readAccountDiff(addr, lastIdx)
acct := r.prestateReader.account(addr)
obj := r.initMutatedObjFromDiff(prestate, addr, acct, diff)
if obj != nil {
prestate.setStateObject(obj)
}
}
prestateLoadTime = time.Since(startPrestateLoad)
rootUpdateStart := time.Now()
root = prestate.IntermediateRoot(true)
rootUpdateTime = time.Since(rootUpdateStart)
return root, prestateLoadTime, rootUpdateTime
}
// changesAt returns all state changes at the given index.
func (r *BALReader) changesAt(idx int) *bal.StateDiff {
res := &bal.StateDiff{make(map[common.Address]*bal.AccountState)}
for addr, _ := range r.accesses {
accountChanges := r.accountChangesAt(addr, idx)
if accountChanges != nil {
res.Mutations[addr] = accountChanges
}
}
return res
}
// accountChangesAt returns the state changes of an account at a given index,
// or nil if there are no changes.
func (r *BALReader) accountChangesAt(addr common.Address, idx int) *bal.AccountState {
acct, exist := r.accesses[addr]
if !exist {
return nil
}
var res bal.AccountState
for i := len(acct.BalanceChanges) - 1; i >= 0; i-- {
if acct.BalanceChanges[i].TxIdx == uint16(idx) {
res.Balance = acct.BalanceChanges[i].Balance
}
if acct.BalanceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.CodeChanges) - 1; i >= 0; i-- {
if acct.CodeChanges[i].TxIdx == uint16(idx) {
res.Code = acct.CodeChanges[i].Code
break
}
if acct.CodeChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.NonceChanges) - 1; i >= 0; i-- {
if acct.NonceChanges[i].TxIdx == uint16(idx) {
res.Nonce = &acct.NonceChanges[i].Nonce
break
}
if acct.NonceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.StorageChanges) - 1; i >= 0; i-- {
if res.StorageWrites == nil {
res.StorageWrites = make(map[common.Hash]common.Hash)
}
slotWrites := acct.StorageChanges[i]
for j := len(slotWrites.Accesses) - 1; j >= 0; j-- {
if slotWrites.Accesses[j].TxIdx == uint16(idx) {
res.StorageWrites[slotWrites.Slot] = slotWrites.Accesses[j].ValueAfter
break
}
if slotWrites.Accesses[j].TxIdx < uint16(idx) {
break
}
}
if len(res.StorageWrites) == 0 {
res.StorageWrites = nil
}
}
if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
return nil
}
return &res
}
func (r *BALReader) isModified(addr common.Address) bool {
access, ok := r.accesses[addr]
if !ok {
return false
}
return len(access.StorageChanges) > 0 || len(access.BalanceChanges) > 0 || len(access.CodeChanges) > 0 || len(access.NonceChanges) > 0
}
func (r *BALReader) readAccount(db *StateDB, addr common.Address, idx int) *stateObject {
diff := r.readAccountDiff(addr, idx)
prestate := r.prestateReader.account(addr)
return r.initObjFromDiff(db, addr, prestate, diff)
}
// readAccountDiff returns the accumulated state changes of an account up through idx.
func (r *BALReader) readAccountDiff(addr common.Address, idx int) *bal.AccountState {
diff, exist := r.accesses[addr]
if !exist {
return nil
}
var res bal.AccountState
for i := 0; i < len(diff.BalanceChanges) && diff.BalanceChanges[i].TxIdx <= uint16(idx); i++ {
res.Balance = diff.BalanceChanges[i].Balance
}
for i := 0; i < len(diff.CodeChanges) && diff.CodeChanges[i].TxIdx <= uint16(idx); i++ {
res.Code = diff.CodeChanges[i].Code
}
for i := 0; i < len(diff.NonceChanges) && diff.NonceChanges[i].TxIdx <= uint16(idx); i++ {
res.Nonce = &diff.NonceChanges[i].Nonce
}
if len(diff.StorageChanges) > 0 {
res.StorageWrites = make(map[common.Hash]common.Hash)
for _, slotWrites := range diff.StorageChanges {
for i := 0; i < len(slotWrites.Accesses) && slotWrites.Accesses[i].TxIdx <= uint16(idx); i++ {
res.StorageWrites[slotWrites.Slot] = slotWrites.Accesses[i].ValueAfter
}
}
}
return &res
}
// ValidateStateDiff returns an error if the computed state diff is not equal to
// diff reported from the access list at the given index.
func (r *BALReader) ValidateStateDiff(idx int, computedDiff *bal.StateDiff) error {
balChanges := r.changesAt(idx)
for addr, state := range balChanges.Mutations {
computedAccountDiff, ok := computedDiff.Mutations[addr]
if !ok {
return fmt.Errorf("BAL contained account %x which wasn't present in computed state diff", addr)
}
if !state.Eq(computedAccountDiff) {
return fmt.Errorf("difference between computed state diff and BAL entry for account %x", addr)
}
}
if len(balChanges.Mutations) != len(computedDiff.Mutations) {
return fmt.Errorf("computed state diff contained mutated accounts which weren't reported in BAL")
}
return nil
}

View file

@ -99,12 +99,18 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
// UpdateStorageBatch attempts to update a list storages in the batch manner.
UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error

View file

@ -381,7 +381,7 @@ func (ch nonceChange) copy() journalEntry {
}
func (ch codeChange) revert(s *StateDB) {
s.getStateObject(ch.account).setCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
s.getStateObject(ch.account).setCodeModified(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
}
func (ch codeChange) dirtied() *common.Address {

View file

@ -83,6 +83,8 @@ type stateObject struct {
// the contract is just created within the current transaction, or when the
// object was previously existent and is being deployed as a contract within
// the current transaction.
//
// the flag is set upon beginning of contract initcode execution, not when the code is actually deployed to the address.
newContract bool
}
@ -176,6 +178,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
if value, pending := s.pendingStorage[key]; pending {
return value
}
if value, cached := s.originStorage[key]; cached {
return value
}
@ -244,9 +247,10 @@ func (s *stateObject) finalise() {
// The slot is reverted to its original value, delete the entry
// to avoid thrashing the data structures.
delete(s.uncommittedStorage, key)
} else if exist {
// The slot is modified to another value and the slot has been
// tracked for commit, do nothing here.
// tracked for commit in uncommittedStorage.
} else {
// The slot is different from its original value and hasn't been
// tracked for commit yet.
@ -323,8 +327,10 @@ func (s *stateObject) updateTrie() (Trie, error) {
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
updateKeys [][]byte
updateValues [][]byte
)
for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes
@ -338,10 +344,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue
}
if (value != common.Hash{}) {
if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil {
s.db.setError(err)
return nil, err
}
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(value[:]))
s.db.StorageUpdated.Add(1)
} else {
deletions = append(deletions, key)
@ -349,6 +353,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading
used = append(used, key) // Copy needed for closure
}
if len(updateKeys) > 0 {
if err := tr.UpdateStorageBatch(common.Address{}, updateKeys, updateValues); err != nil {
s.db.setError(err)
return nil, err
}
}
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@ -564,13 +574,18 @@ func (s *stateObject) CodeSize() int {
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) (prev []byte) {
prev = slices.Clone(s.code)
s.db.journal.setCode(s.address, prev)
s.setCode(codeHash, code)
s.setCodeModified(codeHash, code)
return prev
}
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
}
// setCodeModified sets the code and hash and dirty markers.
func (s *stateObject) setCodeModified(codeHash common.Hash, code []byte) {
s.setCode(codeHash, code)
s.dirtyCode = true
}

View file

@ -118,6 +118,14 @@ type StateDB struct {
// The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
sender common.Address
// block access list modifications will be recorded with this index.
// 0 - state access before transaction execution
// 1 -> len(block txs) - state access of each transaction
// len(block txs) + 1 - state access after transaction execution.
balIndex int
logs map[common.Hash][]*types.Log
logSize uint
@ -139,6 +147,8 @@ type StateDB struct {
witness *stateless.Witness
witnessStats *stateless.WitnessStats
blockAccessList *BALReader
// Measurements gathered during execution for debugging purposes
AccountReads time.Duration
AccountHashes time.Duration
@ -158,6 +168,10 @@ type StateDB struct {
StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition
}
func (s *StateDB) BlockAccessList() *BALReader {
return s.blockAccessList
}
// New creates a new state from a given trie.
func New(root common.Hash, db Database) (*StateDB, error) {
reader, err := db.Reader(root)
@ -285,6 +299,38 @@ func (s *StateDB) AddRefund(gas uint64) {
s.refund += gas
}
func (s *StateDB) SetBlockAccessList(al *BALReader) {
s.blockAccessList = al
}
// LoadModifiedPrestate instantiates the live object based on accounts
// which appeared in the total state diff of a block, and were also preexisting.
func (s *StateDB) LoadModifiedPrestate(addrs []common.Address) (res map[common.Address]*types.StateAccount) {
stateAccounts := new(sync.Map)
wg := new(sync.WaitGroup)
res = make(map[common.Address]*types.StateAccount)
for _, addr := range addrs {
wg.Add(1)
go func(addr common.Address) {
acct, err := s.reader.Account(addr)
if err == nil && acct != nil { // TODO: what should we do if the error is not nil?
stateAccounts.Store(addr, acct)
}
wg.Done()
}(addr)
}
wg.Wait()
stateAccounts.Range(func(addr any, val any) bool {
address := addr.(common.Address)
stateAccount := val.(*types.StateAccount)
res[address] = stateAccount
return true
})
return res
}
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
func (s *StateDB) SubRefund(gas uint64) {
@ -301,6 +347,11 @@ func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil
}
func (s *StateDB) ExistBeforeCurTx(addr common.Address) bool {
obj := s.getStateObject(addr)
return obj != nil && !obj.newContract
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (s *StateDB) Empty(addr common.Address) bool {
@ -576,6 +627,25 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
func (s *StateDB) updateStateObjects(objs []*stateObject) {
var addrs []common.Address
var accts []*types.StateAccount
for _, obj := range objs {
addrs = append(addrs, obj.Address())
accts = append(accts, &obj.data)
}
if err := s.trie.UpdateAccountBatch(addrs, accts, nil); err != nil {
s.setError(fmt.Errorf("updateStateObjects error: %v", err))
}
for _, obj := range objs {
if obj.dirtyCode {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
}
// deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(addr common.Address) {
@ -595,6 +665,24 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil
}
// if we are executing against a block access list, construct the account
// state at the current tx index by applying the access-list diff on top
// of the prestate value for the account.
if s.blockAccessList != nil && s.balIndex != 0 && s.blockAccessList.isModified(addr) {
acct := s.blockAccessList.readAccount(s, addr, s.balIndex-1)
if acct != nil {
s.setStateObject(acct)
return acct
}
return nil
// if the acct was nil, it might be non-existent or was not explicitly requested for loading from the blockAcccessList object.
// try to load it from the snapshot.
// TODO: if the acct was non-existent because it was deleted, we should just return nil herre.
}
s.AccountLoaded++
start := time.Now()
@ -631,6 +719,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
if obj == nil {
obj = s.createObject(addr)
}
return obj
}
@ -679,10 +768,14 @@ func (s *StateDB) Copy() *StateDB {
refund: s.refund,
thash: s.thash,
txIndex: s.txIndex,
balIndex: s.txIndex,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: maps.Clone(s.preimages),
// don't deep-copy these
blockAccessList: s.blockAccessList,
// Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never
@ -744,6 +837,9 @@ func (s *StateDB) GetRefund() uint64 {
// Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
//
// If EnableStateDiffRecording has been called, it returns a state diff containing
// the state which was mutated since the previous invocation of Finalise. Otherwise, nil.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties))
for addr := range s.journal.dirties {
@ -769,8 +865,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
} else {
obj.finalise()
s.markUpdate(addr)
}
// At this point, also ship the address off to the precacher. The precacher
} // At this point, also ship the address off to the precacher. The precacher
// will start loading tries, and when the change is eventually committed,
// the commit-phase will be a lot faster
addressesToPrefetch = append(addressesToPrefetch, addr) // Copy needed for closure
@ -780,6 +875,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err)
}
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
}
@ -788,7 +884,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Finalise all the dirty storage states and write them into the tries
// FinaliseIdxChanges all the dirty storage states and write them into the tries
s.Finalise(deleteEmptyObjects)
// Initialize the trie if it's not constructed yet. If the prefetch
@ -929,6 +1025,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
var (
usedAddrs []common.Address
deletedAddrs []common.Address
updatedObjs []*stateObject
)
for addr, op := range s.mutations {
if op.applied {
@ -939,11 +1036,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if op.isDelete() {
deletedAddrs = append(deletedAddrs, addr)
} else {
s.updateStateObject(s.stateObjects[addr])
updatedObjs = append(updatedObjs, s.stateObjects[addr])
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
}
if len(updatedObjs) > 0 {
s.updateStateObjects(updatedObjs)
}
for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1
@ -955,9 +1055,21 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
// Track the amount of time wasted on hashing the account trie
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
hash := s.trie.Hash()
/*
it, err := s.trie.NodeIterator([]byte{})
if err != nil {
panic(err)
}
fmt.Println("state trie")
for it.Next(true) {
if it.Leaf() {
fmt.Printf("%x: %x\n", it.Path(), it.LeafBlob())
} else {
fmt.Printf("%x: %x\n", it.Path(), it.Hash())
}
}
*/
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
witness := s.trie.Witness()
@ -966,6 +1078,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witnessStats.Add(witness, common.Hash{})
}
}
return hash
}
@ -975,6 +1088,19 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
s.balIndex = ti + 1
}
// SetAccessListIndex sets the current index that state mutations will
// be reported as in the BAL. It is only relevant if this StateDB instance
// is being used in the BAL construction path.
func (s *StateDB) SetAccessListIndex(idx int) {
s.balIndex = idx
}
// SetTxSender sets the sender of the currently-executing transaction.
func (s *StateDB) SetTxSender(sender common.Address) {
s.sender = sender
}
func (s *StateDB) clearJournalAndRefund() {
@ -1160,6 +1286,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
// Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects)

View file

@ -17,8 +17,6 @@
package state
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/tracing"
@ -27,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
"math/big"
)
// hookedStateDB represents a statedb which emits calls to tracing-hooks
@ -241,32 +240,16 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int {
return prev
}
func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, bool) {
var prevCode []byte
var prevCodeHash common.Hash
if s.hooks.OnCodeChange != nil {
prevCodeHash = s.inner.GetCodeHash(address)
prevCode = s.inner.GetCode(address)
}
prev, changed := s.inner.SelfDestruct6780(address)
if s.hooks.OnBalanceChange != nil && !prev.IsZero() {
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
}
if changed && len(prevCode) > 0 {
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
}
}
func (s *hookedStateDB) SelfDestruct6780(src common.Address) (uint256.Int, bool) {
prev, changed := s.inner.SelfDestruct6780(src)
return prev, changed
}
func (s *hookedStateDB) ExistBeforeCurTx(addr common.Address) bool {
return s.inner.ExistBeforeCurTx(addr)
}
func (s *hookedStateDB) AddLog(log *types.Log) {
// The inner will modify the log (add fields), so invoke that first
s.inner.AddLog(log)
@ -277,16 +260,37 @@ func (s *hookedStateDB) AddLog(log *types.Log) {
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
defer s.inner.Finalise(deleteEmptyObjects)
if s.hooks.OnBalanceChange == nil {
return
}
for addr := range s.inner.journal.dirties {
obj := s.inner.stateObjects[addr]
if obj != nil && obj.selfDestructed {
// If ether was sent to account post-selfdestruct it is burnt.
if bal := obj.Balance(); bal.Sign() != 0 {
s.hooks.OnBalanceChange(addr, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
if s.hooks.OnSelfDestructChange != nil || s.hooks.OnBalanceChange != nil || s.hooks.OnNonceChangeV2 != nil || s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil {
for addr := range s.inner.journal.dirties {
obj := s.inner.stateObjects[addr]
if obj != nil && obj.selfDestructed {
if obj.selfDestructed && s.hooks.OnSelfDestructChange != nil {
// when executing, can we tell the difference between
s.hooks.OnSelfDestructChange(obj.address)
}
// If ether was sent to account post-selfdestruct it is burnt.
if s.hooks.OnBalanceChange != nil {
if bal := obj.Balance(); bal.Sign() != 0 {
s.hooks.OnBalanceChange(addr, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
}
}
if s.hooks.OnNonceChangeV2 != nil {
prevNonce := obj.Nonce()
s.hooks.OnNonceChangeV2(addr, prevNonce, 0, tracing.NonceChangeSelfdestruct)
}
prevCodeHash := s.inner.GetCodeHash(addr)
prevCode := s.inner.GetCode(addr)
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(addr, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(addr, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
}
}
}
}
}
func (s *hookedStateDB) TxIndex() int {
return s.inner.TxIndex()
}

View file

@ -175,10 +175,10 @@ func TestCopy(t *testing.T) {
orig.Finalise(false)
// Copy the state
copy := orig.Copy()
copy := orig.Copy().(*StateDB)
// Copy the copy state
ccopy := copy.Copy()
ccopy := copy.Copy().(*StateDB)
// modify all in memory
for i := byte(0); i < 255; i++ {
@ -191,7 +191,7 @@ func TestCopy(t *testing.T) {
ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i)))
}
// Finalise the changes on all concurrently
// FinaliseIdxChanges the changes on all concurrently
finalise := func(wg *sync.WaitGroup, db *StateDB) {
defer wg.Done()
db.Finalise(true)
@ -243,7 +243,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
amount := uint256.NewInt(uint64(i))
obj.SetBalance(new(uint256.Int).Sub(obj.Balance(), amount))
}
cpy := orig.Copy()
cpy := orig.Copy().(*StateDB)
orig.Finalise(true)
for i := byte(0); i < 255; i++ {
@ -278,7 +278,7 @@ func TestCopyObjectState(t *testing.T) {
obj.data.Root = common.HexToHash("0xdeadbeef")
}
orig.Finalise(true)
cpy := orig.Copy()
cpy := orig.Copy().(*StateDB)
for _, op := range cpy.mutations {
if have, want := op.applied, false; have != want {
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
@ -528,7 +528,7 @@ func (test *snapshotTest) run() bool {
for i, action := range test.actions {
if len(test.snapshots) > sindex && i == test.snapshots[sindex] {
snapshotRevs[sindex] = state.Snapshot()
checkstates[sindex] = state.Copy()
checkstates[sindex] = state.Copy().(*StateDB)
sindex++
}
action.fn(action, state)
@ -747,7 +747,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the non-committed state database and check pre/post commit balance
copyOne := state.Copy()
copyOne := state.Copy().(*StateDB)
if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
}
@ -761,7 +761,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the copy and check the balance once more
copyTwo := copyOne.Copy()
copyTwo := copyOne.Copy().(*StateDB)
if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
}
@ -820,7 +820,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the non-committed state database and check pre/post commit balance
copyOne := state.Copy()
copyOne := state.Copy().(*StateDB)
if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
}
@ -834,7 +834,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
t.Fatalf("first copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the copy and check the balance once more
copyTwo := copyOne.Copy()
copyTwo := copyOne.Copy().(*StateDB)
if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
}
@ -848,7 +848,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Copy the copy-copy and check the balance once more
copyThree := copyTwo.Copy()
copyThree := copyTwo.Copy().(*StateDB)
if balance := copyThree.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
}
@ -896,7 +896,7 @@ func TestCommitCopy(t *testing.T) {
state.Commit(1, true, false)
// Copy the committed state database, the copied one is not fully functional.
copied := state.Copy()
copied := state.Copy().(*StateDB)
if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("unexpected balance: have %v", balance)
}
@ -1098,7 +1098,7 @@ func TestStateDBAccessList(t *testing.T) {
verifySlots("bb", "01", "02")
// Make a copy
stateCopy1 := state.Copy()
stateCopy1 := state.Copy().(*StateDB)
if exp, got := 4, state.journal.length(); exp != got {
t.Fatalf("journal length mismatch: have %d, want %d", got, exp)
}

View file

@ -79,7 +79,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
)
// Apply pre-execution system calls.
var tracingStateDB = vm.StateDB(statedb)
var tracingStateDB vm.StateDB = statedb
if hooks := cfg.Tracer; hooks != nil {
tracingStateDB = state.NewHookedState(statedb, hooks)
}
@ -93,6 +93,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
ProcessParentBlockHash(block.ParentHash(), evm)
}
if hooks := cfg.Tracer; hooks != nil {
hooks.OnPreTxExecutionDone()
}
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
@ -108,6 +112,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
// TODO: how do we signal to the BAL tracer that we are computing post-tx state changes here?
// if there are no txs in the block, then it will just record these state diffs at idx 0
// Read requests if Prague is enabled.
var requests [][]byte
if config.IsPrague(block.Number(), block.Time()) {
@ -118,17 +126,21 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
}
// EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil {
return nil, fmt.Errorf("failed to process withdrawal queue: %w", err)
return nil, err
}
// EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil {
return nil, fmt.Errorf("failed to process consolidation queue: %w", err)
return nil, err
}
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
if hooks := cfg.Tracer; hooks != nil {
hooks.OnBlockFinalization()
}
return &ProcessResult{
Receipts: receipts,
Requests: requests,
@ -154,6 +166,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
if err != nil {
return nil, err
}
// Update the state with pending changes.
var root []byte
if evm.ChainConfig().IsByzantium(blockNumber) {
@ -213,7 +226,8 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *
return nil, err
}
// Create a new context to be used in the EVM environment
return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, usedGas, evm)
receipts, err := ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, usedGas, evm)
return receipts, err
}
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root

View file

@ -19,9 +19,6 @@ package core
import (
"bytes"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
@ -29,6 +26,8 @@ import (
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"math"
"math/big"
)
// ExecutionResult includes all output after executing given evm

View file

@ -70,7 +70,7 @@ func ExecuteStateless(config *params.ChainConfig, vmconfig vm.Config, block *typ
if err != nil {
return common.Hash{}, common.Hash{}, err
}
if err = validator.ValidateState(block, db, res, true); err != nil {
if err = validator.ValidateState(block, db, res, true, true); err != nil {
return common.Hash{}, common.Hash{}, err
}
// Almost everything validated, but receipt and state root needs to be returned

View file

@ -183,6 +183,14 @@ type (
// StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
// ColdStorageReadHook is called before a previously-unread storage slot is read.
ColdStorageReadHook = func(common.Address, common.Hash)
// ColdAccountReadHook is called before an previously-unread account is read.
ColdAccountReadHook = func(address common.Address)
SelfDestructHook = func(address common.Address)
// LogHook is called when a log is emitted.
LogHook = func(log *types.Log)
@ -209,14 +217,22 @@ type Hooks struct {
OnSystemCallStart OnSystemCallStartHook
OnSystemCallStartV2 OnSystemCallStartHookV2
OnSystemCallEnd OnSystemCallEndHook
OnPreTxExecutionDone func() // called after pre-tx system contracts are invoked
OnBlockFinalization func() // called after post-tx system contracts and consensus finalization are invoked
// State events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnSelfDestructChange SelfDestructHook
//State read events
OnColdStorageRead ColdStorageReadHook
OnColdAccountRead ColdAccountReadHook
// Block hash read
OnBlockHashRead BlockHashReadHook
}
@ -375,6 +391,9 @@ const (
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
// NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct
NonceChangeSelfdestruct NonceChangeReason = 7
)
// CodeChangeReason is used to indicate the reason for a code change.

View file

@ -147,6 +147,18 @@ func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, re
}
}
func (j *journal) OnColdStorageLoad(address common.Address, key common.Hash) {
if j.hooks.OnColdStorageRead != nil {
j.hooks.OnColdStorageRead(address, key)
}
}
func (j *journal) OnColdAccountLoad(address common.Address) {
if j.hooks.OnColdAccountRead != nil {
j.hooks.OnColdAccountRead(address)
}
}
func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) {
j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new})
if j.hooks.OnBalanceChange != nil {

View file

@ -32,7 +32,7 @@ type Validator interface {
ValidateBody(block *types.Block) error
// ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, validateStateRoot, stateless bool) error
}
// Prefetcher is an interface for pre-caching transaction signatures and state.
@ -57,4 +57,5 @@ type ProcessResult struct {
Requests [][]byte
Logs []*types.Log
GasUsed uint64
Error error
}

View file

@ -18,143 +18,512 @@ package bal
import (
"bytes"
"maps"
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"maps"
)
/*
BAL Building rework
type BALBuilder
* hold state for the current execution context:
* the state mutations that have already been finalized (previous completed txs)
* state reads that have been finalized
* the pending state reads/mutations of the current tx
pending state:
* a stack (pushing/popping as new execution frames are entered/exited),
each item is a map (address -> accountStateAndModifications{})
finalized state:
* the ConstructionBlockAccessList type (sans the pending state stuff that I have added there
Verification Path:
* only validate single "transition" at a time:
* only need the component which collects pending state and finalizes it for one step.
TLDR:
* break the pending state into its own struct, out of ConstructionBlockAccessList
* create a 'BALBuilder' type that encompasses the 'finalized' ConstructionBlockAccessList and pending state
* ensure that this new model fits nicely with the BAL validation code path
*/
// TODO: maybe rename this to StateDiffBuilder (?)
type AccessListBuilder struct {
// stores the tx-prestate values of any account/storage values which were modified
//
// TODO: it's a bit unfortunate that the prestate.StorageWrites is reused here to
// represent the prestate values of keys which were written and it would be nice to
// somehow make it clear that the field can contain both the mutated values or the
// prestate depending on the context (or find a cleaner solution entirely, instead
// of reusing the field here)
prestates map[common.Address]*AccountState
accessesStack []map[common.Address]*constructionAccountAccess
}
func NewAccessListBuilder() *AccessListBuilder {
return &AccessListBuilder{
make(map[common.Address]*AccountState),
[]map[common.Address]*constructionAccountAccess{
make(map[common.Address]*constructionAccountAccess),
},
}
}
func (c *AccessListBuilder) StorageRead(address common.Address, key common.Hash) {
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.StorageRead(key)
}
func (c *AccessListBuilder) AccountRead(address common.Address) {
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
}
func (c *AccessListBuilder) StorageWrite(address common.Address, key, prevVal, newVal common.Hash) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &AccountState{}
}
if c.prestates[address].StorageWrites == nil {
c.prestates[address].StorageWrites = make(map[common.Hash]common.Hash)
}
if _, ok := c.prestates[address].StorageWrites[key]; !ok {
c.prestates[address].StorageWrites[key] = prevVal
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.StorageWrite(key, prevVal, newVal)
}
func (c *AccessListBuilder) BalanceChange(address common.Address, prev, cur *uint256.Int) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &AccountState{}
}
if c.prestates[address].Balance == nil {
c.prestates[address].Balance = prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.BalanceChange(cur)
}
func (c *AccessListBuilder) CodeChange(address common.Address, prev, cur []byte) {
// auth unset and selfdestruct pass code change as 'nil'
// however, internally in the access list accumulation of state changes,
// a nil field on an account means that it was never modified in the block.
if cur == nil {
cur = []byte{}
}
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &AccountState{}
}
if c.prestates[address].Code == nil {
c.prestates[address].Code = prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.CodeChange(cur)
}
// TODO: rename this hook to "deleted" (CREATE2 + initcode + CALL empties account case)
func (c *AccessListBuilder) SelfDestruct(address common.Address) {
// convert all the account storage writes to reads, preserve the existing reads
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
// TODO: figure out exactly which situations cause this case
// it has to do with an account becoming empty and deleted
// but why was it created as a stateObject without also having
// any access/modification events on it?
return
}
access := c.accessesStack[len(c.accessesStack)-1][address]
for key, _ := range access.storageMutations {
if access.storageReads == nil {
access.storageReads = make(map[common.Hash]struct{})
}
access.storageReads[key] = struct{}{}
}
access.storageMutations = nil
/*
access.nonce = nil
// TODO: should this be set to zero? the semantics are that nil means unmodified since the prestate of the block.
access.balance = nil
access.code = nil
*/
}
func (c *AccessListBuilder) NonceChange(address common.Address, prev, cur uint64) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &AccountState{}
}
if c.prestates[address].Nonce == nil {
c.prestates[address].Nonce = &prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.NonceChange(cur)
}
func (c *AccessListBuilder) EnterScope() {
c.accessesStack = append(c.accessesStack, make(map[common.Address]*constructionAccountAccess))
}
func (c *AccessListBuilder) ExitScope(reverted bool) {
// all storage writes in the child scope are converted into reads
// if there were no storage writes, the account is reported in the BAL as a read (if it wasn't already in the BAL and/or mutated previously)
childAccessList := c.accessesStack[len(c.accessesStack)-1]
parentAccessList := c.accessesStack[len(c.accessesStack)-2]
for addr, childAccess := range childAccessList {
if _, ok := parentAccessList[addr]; ok {
} else {
parentAccessList[addr] = &constructionAccountAccess{}
}
if reverted {
parentAccessList[addr].MergeReads(childAccess)
} else {
parentAccessList[addr].Merge(childAccess)
}
}
c.accessesStack = c.accessesStack[:len(c.accessesStack)-1]
}
func (a *AccessListBuilder) FinaliseIdxChanges() (*StateDiff, StateAccesses) {
diff := &StateDiff{make(map[common.Address]*AccountState)}
stateAccesses := make(StateAccesses)
for addr, access := range a.accessesStack[0] {
// remove any mutations from the access list with no net difference vs the tx prestate value
if access.nonce != nil && *a.prestates[addr].Nonce == *access.nonce {
access.nonce = nil
}
if access.balance != nil && a.prestates[addr].Balance.Eq(access.balance) {
access.balance = nil
}
if access.code != nil && bytes.Equal(access.code, a.prestates[addr].Code) {
access.code = nil
}
if access.storageMutations != nil {
for key, val := range access.storageMutations {
if a.prestates[addr].StorageWrites[key] == val {
delete(access.storageMutations, key)
access.storageReads[key] = struct{}{}
}
}
if len(access.storageMutations) == 0 {
access.storageMutations = nil
}
}
// if the account has no net mutations against the tx prestate, only include
// it in the state read set
if len(access.code) == 0 && access.nonce == nil && access.balance == nil && len(access.storageMutations) == 0 {
stateAccesses[addr] = make(map[common.Hash]struct{})
if access.storageReads != nil {
stateAccesses[addr] = access.storageReads
}
continue
}
stateAccesses[addr] = access.storageReads
diff.Mutations[addr] = &AccountState{
Balance: access.balance,
Nonce: access.nonce,
Code: access.code,
StorageWrites: access.storageMutations,
}
}
return diff, stateAccesses
}
func (c *ConstructionBlockAccessList) Apply(idx uint16, diff *StateDiff, accesses StateAccesses) {
for addr, stateDiff := range diff.Mutations {
acctChanges, ok := c.Accounts[addr]
if !ok {
acctChanges = &ConstructionAccountAccesses{}
c.Accounts[addr] = acctChanges
}
if stateDiff.Nonce != nil {
if acctChanges.NonceChanges == nil {
acctChanges.NonceChanges = make(map[uint16]uint64)
}
acctChanges.NonceChanges[idx] = *stateDiff.Nonce
}
if stateDiff.Balance != nil {
if acctChanges.BalanceChanges == nil {
acctChanges.BalanceChanges = make(map[uint16]*uint256.Int)
}
acctChanges.BalanceChanges[idx] = stateDiff.Balance
}
if stateDiff.Code != nil {
if acctChanges.CodeChanges == nil {
acctChanges.CodeChanges = make(map[uint16]CodeChange)
}
acctChanges.CodeChanges[idx] = CodeChange{idx, stateDiff.Code}
}
if stateDiff.StorageWrites != nil {
if acctChanges.StorageWrites == nil {
acctChanges.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
}
for key, val := range stateDiff.StorageWrites {
if _, ok := acctChanges.StorageWrites[key]; !ok {
acctChanges.StorageWrites[key] = make(map[uint16]common.Hash)
}
acctChanges.StorageWrites[key][idx] = val
// TODO: investigate why commenting out the check here, and the corresponding
// check under accesses causes GeneralStateTests blockchain tests to fail.
// They should only contain one tx per test.
//
// key could have been read in a previous tx, delete it from the read set here
if _, ok := acctChanges.StorageReads[key]; ok {
delete(acctChanges.StorageReads, key)
}
}
}
}
for addr, stateAccesses := range accesses {
acctAccess, ok := c.Accounts[addr]
if !ok {
acctAccess = &ConstructionAccountAccesses{}
c.Accounts[addr] = acctAccess
}
for key := range stateAccesses {
// if key was written in a previous tx, but only read in this one:
// don't include it in the storage reads set
if _, ok := acctAccess.StorageWrites[key]; ok {
continue
}
if acctAccess.StorageReads == nil {
acctAccess.StorageReads = make(map[common.Hash]struct{})
}
acctAccess.StorageReads[key] = struct{}{}
}
}
}
// TODO: the BalReader Validation method should accept the computed values as
// a index/StateDiff/StateAccesses trio.
// BAL tracer maintains a ConstructionBlockAccessList.
// For each BAL index, it instantiates an AccessListBuilder and
// appends the result to the access list where appropriate
// ---- below is the actual code written before my idea sketch above ----
// CodeChange contains the runtime bytecode deployed at an address and the
// transaction index where the deployment took place.
type CodeChange struct {
TxIndex uint16
Code []byte `json:"code,omitempty"`
TxIdx uint16
Code []byte `json:"code,omitempty"`
}
// ConstructionAccountAccess contains post-block account state for mutations as well as
// TODO: make use of this
var IgnoredBALAddresses = map[common.Address]struct{}{
params.SystemAddress: {},
common.BytesToAddress([]byte{0x01}): {},
common.BytesToAddress([]byte{0x02}): {},
common.BytesToAddress([]byte{0x03}): {},
common.BytesToAddress([]byte{0x04}): {},
common.BytesToAddress([]byte{0x05}): {},
common.BytesToAddress([]byte{0x06}): {},
common.BytesToAddress([]byte{0x07}): {},
common.BytesToAddress([]byte{0x08}): {},
common.BytesToAddress([]byte{0x09}): {},
common.BytesToAddress([]byte{0x0a}): {},
common.BytesToAddress([]byte{0x0b}): {},
common.BytesToAddress([]byte{0x0c}): {},
common.BytesToAddress([]byte{0x0d}): {},
common.BytesToAddress([]byte{0x0e}): {},
common.BytesToAddress([]byte{0x0f}): {},
common.BytesToAddress([]byte{0x10}): {},
common.BytesToAddress([]byte{0x11}): {},
}
// ConstructionAccountAccesses contains post-block account state for mutations as well as
// all storage keys that were read during execution. It is used when building block
// access list during execution.
type ConstructionAccountAccess struct {
type ConstructionAccountAccesses struct {
// StorageWrites is the post-state values of an account's storage slots
// that were modified in a block, keyed by the slot key and the tx index
// where the modification occurred.
StorageWrites map[common.Hash]map[uint16]common.Hash `json:"storageWrites,omitempty"`
StorageWrites map[common.Hash]map[uint16]common.Hash
// StorageReads is the set of slot keys that were accessed during block
// execution.
//
// Storage slots which are both read and written (with changed values)
// appear only in StorageWrites.
StorageReads map[common.Hash]struct{} `json:"storageReads,omitempty"`
StorageReads map[common.Hash]struct{}
// BalanceChanges contains the post-transaction balances of an account,
// keyed by transaction indices where it was changed.
BalanceChanges map[uint16]*uint256.Int `json:"balanceChanges,omitempty"`
BalanceChanges map[uint16]*uint256.Int
// NonceChanges contains the post-state nonce values of an account keyed
// by tx index.
NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"`
NonceChanges map[uint16]uint64
// CodeChange is only set for contract accounts which were deployed in
// the block.
CodeChange *CodeChange `json:"codeChange,omitempty"`
CodeChanges map[uint16]CodeChange
}
// NewConstructionAccountAccess initializes the account access object.
func NewConstructionAccountAccess() *ConstructionAccountAccess {
return &ConstructionAccountAccess{
type constructionAccountAccess struct {
code []byte
nonce *uint64
balance *uint256.Int
storageMutations map[common.Hash]common.Hash
storageReads map[common.Hash]struct{}
}
func (c *constructionAccountAccess) Merge(other *constructionAccountAccess) {
if other.code != nil {
c.code = other.code
}
if other.nonce != nil {
c.nonce = other.nonce
}
if other.balance != nil {
c.balance = other.balance
}
if other.storageMutations != nil {
if c.storageMutations == nil {
c.storageMutations = make(map[common.Hash]common.Hash)
}
for key, val := range other.storageMutations {
c.storageMutations[key] = val
delete(c.storageReads, key)
}
}
if other.storageReads != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
for key, val := range other.storageReads {
c.storageReads[key] = val
}
}
}
// MergeReads merges accesses from a reverted execution from:
// * any reads/writes from the reverted frame which weren't mutated
// in the current frame, are merged into the current frame as reads.
func (c *constructionAccountAccess) MergeReads(other *constructionAccountAccess) {
if other.storageMutations != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
for key, _ := range other.storageMutations {
if _, ok := c.storageMutations[key]; ok {
continue
}
c.storageReads[key] = struct{}{}
}
}
if other.storageReads != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
for key := range other.storageReads {
if _, ok := c.storageMutations[key]; ok {
continue
}
c.storageReads[key] = struct{}{}
}
}
}
func (c *constructionAccountAccess) StorageRead(key common.Hash) {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
if _, ok := c.storageMutations[key]; ok {
panic("FUCK")
}
// TODO: if a key is written in tx A, and later on read in tx B, it shoulnd't be in the read set.
// ^ same for account.
c.storageReads[key] = struct{}{}
}
func (c *constructionAccountAccess) StorageWrite(key, prevVal, newVal common.Hash) {
if c.storageMutations == nil {
c.storageMutations = make(map[common.Hash]common.Hash)
}
c.storageMutations[key] = newVal
// a key can be first read and later written, but it must only show up
// in either read or write sets, not both.
//
// the caller should not
// call StorageRead on a slot that was already written
delete(c.storageReads, key)
}
func (c *constructionAccountAccess) BalanceChange(cur *uint256.Int) {
c.balance = cur
}
func (c *constructionAccountAccess) CodeChange(cur []byte) {
c.code = cur
}
func (c *constructionAccountAccess) NonceChange(cur uint64) {
c.nonce = &cur
}
// NewConstructionAccountAccesses initializes the account access object.
func NewConstructionAccountAccesses() *ConstructionAccountAccesses {
return &ConstructionAccountAccesses{
StorageWrites: make(map[common.Hash]map[uint16]common.Hash),
StorageReads: make(map[common.Hash]struct{}),
BalanceChanges: make(map[uint16]*uint256.Int),
NonceChanges: make(map[uint16]uint64),
CodeChanges: make(map[uint16]CodeChange),
}
}
// ConstructionBlockAccessList contains post-block modified state and some state accessed
// in execution (account addresses and storage keys).
type ConstructionBlockAccessList struct {
Accounts map[common.Address]*ConstructionAccountAccess
Accounts map[common.Address]*ConstructionAccountAccesses
curIdx uint16
}
// NewConstructionBlockAccessList instantiates an empty access list.
func NewConstructionBlockAccessList() ConstructionBlockAccessList {
return ConstructionBlockAccessList{
Accounts: make(map[common.Address]*ConstructionAccountAccess),
func NewConstructionBlockAccessList() *ConstructionBlockAccessList {
return &ConstructionBlockAccessList{
make(map[common.Address]*ConstructionAccountAccesses),
0,
}
}
// AccountRead records the address of an account that has been read during execution.
func (b *ConstructionBlockAccessList) AccountRead(addr common.Address) {
if _, ok := b.Accounts[addr]; !ok {
b.Accounts[addr] = NewConstructionAccountAccess()
}
}
// StorageRead records a storage key read during execution.
func (b *ConstructionBlockAccessList) StorageRead(address common.Address, key common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; ok {
return
}
b.Accounts[address].StorageReads[key] = struct{}{}
}
// StorageWrite records the post-transaction value of a mutated storage slot.
// The storage slot is removed from the list of read slots.
func (b *ConstructionBlockAccessList) StorageWrite(txIdx uint16, address common.Address, key, value common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; !ok {
b.Accounts[address].StorageWrites[key] = make(map[uint16]common.Hash)
}
b.Accounts[address].StorageWrites[key][txIdx] = value
delete(b.Accounts[address].StorageReads, key)
}
// CodeChange records the code of a newly-created contract.
func (b *ConstructionBlockAccessList) CodeChange(address common.Address, txIndex uint16, code []byte) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].CodeChange = &CodeChange{
TxIndex: txIndex,
Code: bytes.Clone(code),
}
}
// NonceChange records tx post-state nonce of any contract-like accounts whose
// nonce was incremented.
func (b *ConstructionBlockAccessList) NonceChange(address common.Address, txIdx uint16, postNonce uint64) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].NonceChanges[txIdx] = postNonce
}
// BalanceChange records the post-transaction balance of an account whose
// balance changed.
func (b *ConstructionBlockAccessList) BalanceChange(txIdx uint16, address common.Address, balance *uint256.Int) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].BalanceChanges[txIdx] = balance.Clone()
}
// PrettyPrint returns a human-readable representation of the access list
func (b *ConstructionBlockAccessList) PrettyPrint() string {
enc := b.toEncodingObj()
return enc.PrettyPrint()
}
// Copy returns a deep copy of the access list.
func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
func (c *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
res := NewConstructionBlockAccessList()
for addr, aa := range b.Accounts {
var aaCopy ConstructionAccountAccess
for addr, aa := range c.Accounts {
var aaCopy ConstructionAccountAccesses
slotWrites := make(map[common.Hash]map[uint16]common.Hash, len(aa.StorageWrites))
for key, m := range aa.StorageWrites {
@ -170,13 +539,191 @@ func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
aaCopy.BalanceChanges = balances
aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
if aa.CodeChange != nil {
aaCopy.CodeChange = &CodeChange{
TxIndex: aa.CodeChange.TxIndex,
Code: bytes.Clone(aa.CodeChange.Code),
codeChangesCopy := make(map[uint16]CodeChange)
for idx, codeChange := range aa.CodeChanges {
codeChangesCopy[idx] = CodeChange{
TxIdx: idx,
Code: bytes.Clone(codeChange.Code),
}
}
res.Accounts[addr] = &aaCopy
}
return &res
return res
}
type StateDiff struct {
Mutations map[common.Address]*AccountState `json:"Mutations,omitempty"`
}
type StateAccesses map[common.Address]map[common.Hash]struct{}
func (s *StateAccesses) Merge(other StateAccesses) {
for addr, accesses := range other {
if _, ok := (*s)[addr]; !ok {
(*s)[addr] = make(map[common.Hash]struct{})
}
for slot := range accesses {
(*s)[addr][slot] = struct{}{}
}
}
}
type AccountState struct {
Balance *uint256.Int `json:"Balance,omitempty"`
Nonce *uint64 `json:"Nonce,omitempty"`
Code ContractCode `json:"Code,omitempty"`
StorageWrites map[common.Hash]common.Hash `json:"StorageWrites,omitempty"`
}
func (a *AccountState) Empty() bool {
return a.Balance == nil && a.Nonce == nil && a.Code == nil && len(a.StorageWrites) == 0
}
func (a *AccountState) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(a)
return res.String()
}
// Merge the changes of a future AccountState into the caller, resulting in the
// combined state changes through next.
func (a *AccountState) Merge(next *AccountState) {
if next.Balance != nil {
a.Balance = next.Balance
}
if next.Nonce != nil {
a.Nonce = next.Nonce
}
if next.Code != nil {
a.Code = next.Code
}
if next.StorageWrites != nil {
if a.StorageWrites == nil {
a.StorageWrites = maps.Clone(next.StorageWrites)
} else {
for key, val := range next.StorageWrites {
a.StorageWrites[key] = val
}
}
}
}
func NewEmptyAccountState() *AccountState {
return &AccountState{
nil,
nil,
nil,
nil,
}
}
func (a *AccountState) Eq(other *AccountState) bool {
if a.Balance != nil || other.Balance != nil {
if a.Balance == nil || other.Balance == nil {
return false
}
if !a.Balance.Eq(other.Balance) {
return false
}
}
if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
return false
}
if a.Nonce != nil || other.Nonce != nil {
if a.Nonce == nil || other.Nonce == nil {
return false
}
if *a.Nonce != *other.Nonce {
return false
}
}
if a.StorageWrites != nil || other.StorageWrites != nil {
if a.StorageWrites == nil || other.StorageWrites == nil {
return false
}
if !maps.Equal(a.StorageWrites, other.StorageWrites) {
return false
}
}
return true
}
func (a *AccountState) Copy() *AccountState {
res := NewEmptyAccountState()
if a.Nonce != nil {
res.Nonce = new(uint64)
*res.Nonce = *a.Nonce
}
if a.Code != nil {
res.Code = bytes.Clone(a.Code)
}
if a.Balance != nil {
res.Balance = new(uint256.Int).Set(a.Balance)
}
if a.StorageWrites != nil {
res.StorageWrites = maps.Clone(a.StorageWrites)
}
return res
}
func (s *StateDiff) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(s)
return res.String()
}
// Merge merges the state changes present in next into the caller. After,
// the state of the caller is the aggregate diff through next.
func (s *StateDiff) Merge(next *StateDiff) {
for account, diff := range next.Mutations {
if mut, ok := s.Mutations[account]; ok {
if diff.Balance != nil {
mut.Balance = diff.Balance
}
if diff.Code != nil {
mut.Code = diff.Code
}
if diff.Nonce != nil {
mut.Nonce = diff.Nonce
}
if len(diff.StorageWrites) > 0 {
if mut.StorageWrites == nil {
mut.StorageWrites = maps.Clone(diff.StorageWrites)
} else {
for key, val := range diff.StorageWrites {
mut.StorageWrites[key] = val
}
}
}
} else {
s.Mutations[account] = diff.Copy()
}
}
}
func (s *StateDiff) Copy() *StateDiff {
res := &StateDiff{make(map[common.Address]*AccountState)}
for addr, accountDiff := range s.Mutations {
cpy := accountDiff.Copy()
res.Mutations[addr] = cpy
}
return res
}
// Copy returns a deep copy of the access list
func (e BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e {
res = append(res, accountAccess.Copy())
}
return
}

File diff suppressed because one or more lines are too long

View file

@ -19,12 +19,12 @@ package bal
import (
"bytes"
"cmp"
"encoding/json"
"errors"
"fmt"
"io"
"maps"
"slices"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@ -33,26 +33,59 @@ import (
"github.com/holiman/uint256"
)
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type BlockAccessList -decoder
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type AccountAccess -decoder
// These are objects used as input for the access list encoding. They mirror
// the spec format.
// BlockAccessList is the encoding format of ConstructionBlockAccessList.
type BlockAccessList struct {
Accesses []AccountAccess `ssz-max:"300000"`
type BlockAccessList []AccountAccess
func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
l := w.List()
for _, access := range e {
access.EncodeRLP(w)
}
w.ListEnd(l)
return w.Flush()
}
func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
if _, err := dec.List(); err != nil {
return err
}
*e = (*e)[:0]
for dec.MoreDataInList() {
var access AccountAccess
if err := access.DecodeRLP(dec); err != nil {
return err
}
*e = append(*e, access)
}
dec.ListEnd()
return nil
}
func (e *BlockAccessList) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
// TODO: check error
enc.Encode(e)
return res.String()
}
// Validate returns an error if the contents of the access list are not ordered
// according to the spec or any code changes are contained which exceed protocol
// max code size.
func (e *BlockAccessList) Validate() error {
if !slices.IsSortedFunc(e.Accesses, func(a, b AccountAccess) int {
func (e BlockAccessList) Validate() error {
if !slices.IsSortedFunc(e, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
}) {
return errors.New("block access list accounts not in lexicographic order")
}
for _, entry := range e.Accesses {
for _, entry := range e {
if err := entry.validate(); err != nil {
return err
}
@ -73,39 +106,28 @@ func (e *BlockAccessList) Hash() common.Hash {
return crypto.Keccak256Hash(enc.Bytes())
}
// encodeBalance encodes the provided balance into 16-bytes.
func encodeBalance(val *uint256.Int) [16]byte {
valBytes := val.Bytes()
if len(valBytes) > 16 {
panic("can't encode value that is greater than 16 bytes in size")
}
var enc [16]byte
copy(enc[16-len(valBytes):], valBytes[:])
return enc
}
// encodingBalanceChange is the encoding format of BalanceChange.
type encodingBalanceChange struct {
TxIdx uint16 `ssz-size:"2"`
Balance [16]byte `ssz-size:"16"`
TxIdx uint16 `json:"txIndex"`
Balance *uint256.Int `json:"balance"`
}
// encodingAccountNonce is the encoding format of NonceChange.
type encodingAccountNonce struct {
TxIdx uint16 `ssz-size:"2"`
Nonce uint64 `ssz-size:"8"`
TxIdx uint16 `json:"txIndex"`
Nonce uint64 `json:"nonce"`
}
// encodingStorageWrite is the encoding format of StorageWrites.
type encodingStorageWrite struct {
TxIdx uint16
ValueAfter [32]byte `ssz-size:"32"`
TxIdx uint16 `json:"txIndex"`
ValueAfter common.Hash `json:"valueAfter"`
}
// encodingStorageWrite is the encoding format of SlotWrites.
type encodingSlotWrites struct {
Slot [32]byte `ssz-size:"32"`
Accesses []encodingStorageWrite `ssz-max:"300000"`
Slot common.Hash `json:"slot"`
Accesses []encodingStorageWrite `json:"accesses"`
}
// validate returns an instance of the encoding-representation slot writes in
@ -119,14 +141,14 @@ func (e *encodingSlotWrites) validate() error {
return errors.New("storage write tx indices not in order")
}
// AccountAccess is the encoding format of ConstructionAccountAccess.
// AccountAccess is the encoding format of ConstructionAccountAccesses.
type AccountAccess struct {
Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address
StorageWrites []encodingSlotWrites `ssz-max:"300000"` // Storage changes (slot -> [tx_index -> new_value])
StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce])
Code []CodeChange `ssz-max:"1"` // Code changes ([tx_index -> new_code])
Address common.Address `json:"address,omitempty"` // 20-byte Ethereum address
StorageChanges []encodingSlotWrites `json:"storageChanges,omitempty"` // Storage changes (slot -> [tx_index -> new_value])
StorageReads []common.Hash `json:"storageReads,omitempty"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `json:"balanceChanges,omitempty"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `json:"nonceChanges,omitempty"` // Nonce changes ([tx_index -> new_nonce])
CodeChanges []CodeChange `json:"code,omitempty"` // CodeChanges changes ([tx_index -> new_code])
}
// validate converts the account accesses out of encoding format.
@ -134,19 +156,42 @@ type AccountAccess struct {
// spec, an error is returned.
func (e *AccountAccess) validate() error {
// Check the storage write slots are sorted in order
if !slices.IsSortedFunc(e.StorageWrites, func(a, b encodingSlotWrites) int {
if !slices.IsSortedFunc(e.StorageChanges, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:])
}) {
return errors.New("storage writes slots not in lexicographic order")
}
for _, write := range e.StorageWrites {
for _, write := range e.StorageChanges {
if err := write.validate(); err != nil {
return err
}
}
// test case ideas: keys in both read/writes, duplicate keys in either read/writes
// ensure that the read and write key sets are distinct
readKeys := make(map[common.Hash]struct{})
writeKeys := make(map[common.Hash]struct{})
for _, readKey := range e.StorageReads {
if _, ok := readKeys[readKey]; ok {
return errors.New("duplicate read key")
}
readKeys[readKey] = struct{}{}
}
for _, write := range e.StorageChanges {
writeKey := write.Slot
if _, ok := writeKeys[writeKey]; ok {
return errors.New("duplicate write key")
}
writeKeys[writeKey] = struct{}{}
}
for readKey := range readKeys {
if _, ok := writeKeys[readKey]; ok {
return errors.New("storage key reported in both read/write sets")
}
}
// Check the storage read slots are sorted in order
if !slices.IsSortedFunc(e.StorageReads, func(a, b [32]byte) int {
if !slices.IsSortedFunc(e.StorageReads, func(a, b common.Hash) int {
return bytes.Compare(a[:], b[:])
}) {
return errors.New("storage read slots not in lexicographic order")
@ -167,9 +212,9 @@ func (e *AccountAccess) validate() error {
}
// Convert code change
if len(e.Code) == 1 {
if len(e.Code[0].Code) > params.MaxCodeSize {
return errors.New("code change contained oversized code")
for _, codeChange := range e.CodeChanges {
if len(codeChange.Code) > params.MaxCodeSize {
return fmt.Errorf("code change contained oversized code")
}
}
return nil
@ -183,40 +228,39 @@ func (e *AccountAccess) Copy() AccountAccess {
BalanceChanges: slices.Clone(e.BalanceChanges),
NonceChanges: slices.Clone(e.NonceChanges),
}
for _, storageWrite := range e.StorageWrites {
res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{
for _, storageWrite := range e.StorageChanges {
res.StorageChanges = append(res.StorageChanges, encodingSlotWrites{
Slot: storageWrite.Slot,
Accesses: slices.Clone(storageWrite.Accesses),
})
}
if len(e.Code) == 1 {
res.Code = []CodeChange{
{
e.Code[0].TxIndex,
bytes.Clone(e.Code[0].Code),
},
}
for _, codeChange := range e.CodeChanges {
res.CodeChanges = append(res.CodeChanges,
CodeChange{
codeChange.TxIdx,
bytes.Clone(codeChange.Code),
})
}
return res
}
// EncodeRLP returns the RLP-encoded access list
func (b *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return b.toEncodingObj().EncodeRLP(wr)
func (c *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return c.ToEncodingObj().EncodeRLP(wr)
}
var _ rlp.Encoder = &ConstructionBlockAccessList{}
// toEncodingObj creates an instance of the ConstructionAccountAccess of the type that is
// toEncodingObj creates an instance of the ConstructionAccountAccesses of the type that is
// used as input for the encoding.
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess {
func (a *ConstructionAccountAccesses) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{
Address: addr,
StorageWrites: make([]encodingSlotWrites, 0),
StorageReads: make([][32]byte, 0),
StorageChanges: make([]encodingSlotWrites, 0),
StorageReads: make([]common.Hash, 0),
BalanceChanges: make([]encodingBalanceChange, 0),
NonceChanges: make([]encodingAccountNonce, 0),
Code: nil,
CodeChanges: make([]CodeChange, 0),
}
// Convert write slots
@ -237,7 +281,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
ValueAfter: slotWrites[index],
})
}
res.StorageWrites = append(res.StorageWrites, obj)
res.StorageChanges = append(res.StorageChanges, obj)
}
// Convert read slots
@ -253,7 +297,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, idx := range balanceIndices {
res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{
TxIdx: idx,
Balance: encodeBalance(a.BalanceChanges[idx]),
Balance: new(uint256.Int).Set(a.BalanceChanges[idx]),
})
}
@ -268,77 +312,31 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
}
// Convert code change
if a.CodeChange != nil {
res.Code = []CodeChange{
{
a.CodeChange.TxIndex,
bytes.Clone(a.CodeChange.Code),
},
}
codeChangeIdxs := slices.Collect(maps.Keys(a.CodeChanges))
slices.SortFunc(codeChangeIdxs, cmp.Compare[uint16])
for _, idx := range codeChangeIdxs {
res.CodeChanges = append(res.CodeChanges, CodeChange{
idx,
bytes.Clone(a.CodeChanges[idx].Code),
})
}
return res
}
// toEncodingObj returns an instance of the access list expressed as the type
// ToEncodingObj returns an instance of the access list expressed as the type
// which is used as input for the encoding/decoding.
func (b *ConstructionBlockAccessList) toEncodingObj() *BlockAccessList {
func (c *ConstructionBlockAccessList) ToEncodingObj() *BlockAccessList {
var addresses []common.Address
for addr := range b.Accounts {
for addr := range c.Accounts {
addresses = append(addresses, addr)
}
slices.SortFunc(addresses, common.Address.Cmp)
var res BlockAccessList
for _, addr := range addresses {
res.Accesses = append(res.Accesses, b.Accounts[addr].toEncodingObj(addr))
res = append(res, c.Accounts[addr].toEncodingObj(addr))
}
return &res
}
func (e *BlockAccessList) PrettyPrint() string {
var res bytes.Buffer
printWithIndent := func(indent int, text string) {
fmt.Fprintf(&res, "%s%s\n", strings.Repeat(" ", indent), text)
}
for _, accountDiff := range e.Accesses {
printWithIndent(0, fmt.Sprintf("%x:", accountDiff.Address))
printWithIndent(1, "storage writes:")
for _, sWrite := range accountDiff.StorageWrites {
printWithIndent(2, fmt.Sprintf("%x:", sWrite.Slot))
for _, access := range sWrite.Accesses {
printWithIndent(3, fmt.Sprintf("%d: %x", access.TxIdx, access.ValueAfter))
}
}
printWithIndent(1, "storage reads:")
for _, slot := range accountDiff.StorageReads {
printWithIndent(2, fmt.Sprintf("%x", slot))
}
printWithIndent(1, "balance changes:")
for _, change := range accountDiff.BalanceChanges {
balance := new(uint256.Int).SetBytes(change.Balance[:]).String()
printWithIndent(2, fmt.Sprintf("%d: %s", change.TxIdx, balance))
}
printWithIndent(1, "nonce changes:")
for _, change := range accountDiff.NonceChanges {
printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce))
}
if len(accountDiff.Code) > 0 {
printWithIndent(1, "code:")
printWithIndent(2, fmt.Sprintf("%d: %x", accountDiff.Code[0].TxIndex, accountDiff.Code[0].Code))
}
}
return res.String()
}
// Copy returns a deep copy of the access list
func (e *BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e.Accesses {
res.Accesses = append(res.Accesses, accountAccess.Copy())
}
return
}
type ContractCode []byte

View file

@ -0,0 +1,107 @@
package bal
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
func (c *ContractCode) MarshalJSON() ([]byte, error) {
hexStr := fmt.Sprintf("%x", *c)
return json.Marshal(hexStr)
}
func (e encodingBalanceChange) MarshalJSON() ([]byte, error) {
type Alias encodingBalanceChange
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Alias: (*Alias)(&e),
})
}
func (e *encodingBalanceChange) UnmarshalJSON(data []byte) error {
type Alias encodingBalanceChange
aux := &struct {
TxIdx string `json:"txIndex"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
return nil
}
func (e encodingAccountNonce) MarshalJSON() ([]byte, error) {
type Alias encodingAccountNonce
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Nonce: fmt.Sprintf("0x%x", e.Nonce),
Alias: (*Alias)(&e),
})
}
func (e *encodingAccountNonce) UnmarshalJSON(data []byte) error {
type Alias encodingAccountNonce
aux := &struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
if len(aux.Nonce) >= 2 && aux.Nonce[:2] == "0x" {
if _, err := fmt.Sscanf(aux.Nonce, "0x%x", &e.Nonce); err != nil {
return err
}
}
return nil
}
// UnmarshalJSON implements json.Unmarshaler to decode from RLP hex bytes
func (b *BlockAccessList) UnmarshalJSON(input []byte) error {
// Handle both hex string and object formats
var hexBytes hexutil.Bytes
if err := json.Unmarshal(input, &hexBytes); err == nil {
// It's a hex string, decode from RLP
return rlp.DecodeBytes(hexBytes, b)
}
// Otherwise try to unmarshal as structured JSON
var tmp []AccountAccess
if err := json.Unmarshal(input, &tmp); err != nil {
return err
}
*b = BlockAccessList(tmp)
return nil
}
// MarshalJSON implements json.Marshaler to encode as RLP hex bytes
func (b BlockAccessList) MarshalJSON() ([]byte, error) {
// Encode to RLP then to hex
rlpBytes, err := rlp.EncodeToBytes(b)
if err != nil {
return nil, err
}
return json.Marshal(hexutil.Bytes(rlpBytes))
}

View file

@ -2,275 +2,254 @@
package bal
import "github.com/ethereum/go-ethereum/common"
import "github.com/ethereum/go-ethereum/rlp"
import "github.com/holiman/uint256"
import "io"
func (obj *BlockAccessList) EncodeRLP(_w io.Writer) error {
func (obj *AccountAccess) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteBytes(obj.Address[:])
_tmp1 := w.List()
for _, _tmp2 := range obj.Accesses {
for _, _tmp2 := range obj.StorageChanges {
_tmp3 := w.List()
w.WriteBytes(_tmp2.Address[:])
w.WriteBytes(_tmp2.Slot[:])
_tmp4 := w.List()
for _, _tmp5 := range _tmp2.StorageWrites {
for _, _tmp5 := range _tmp2.Accesses {
_tmp6 := w.List()
w.WriteBytes(_tmp5.Slot[:])
_tmp7 := w.List()
for _, _tmp8 := range _tmp5.Accesses {
_tmp9 := w.List()
w.WriteUint64(uint64(_tmp8.TxIdx))
w.WriteBytes(_tmp8.ValueAfter[:])
w.ListEnd(_tmp9)
}
w.ListEnd(_tmp7)
w.WriteUint64(uint64(_tmp5.TxIdx))
w.WriteBytes(_tmp5.ValueAfter[:])
w.ListEnd(_tmp6)
}
w.ListEnd(_tmp4)
_tmp10 := w.List()
for _, _tmp11 := range _tmp2.StorageReads {
w.WriteBytes(_tmp11[:])
}
w.ListEnd(_tmp10)
_tmp12 := w.List()
for _, _tmp13 := range _tmp2.BalanceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteBytes(_tmp13.Balance[:])
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range _tmp2.NonceChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx))
w.WriteUint64(_tmp16.Nonce)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
_tmp18 := w.List()
for _, _tmp19 := range _tmp2.Code {
_tmp20 := w.List()
w.WriteUint64(uint64(_tmp19.TxIndex))
w.WriteBytes(_tmp19.Code)
w.ListEnd(_tmp20)
}
w.ListEnd(_tmp18)
w.ListEnd(_tmp3)
}
w.ListEnd(_tmp1)
_tmp7 := w.List()
for _, _tmp8 := range obj.StorageReads {
w.WriteBytes(_tmp8[:])
}
w.ListEnd(_tmp7)
_tmp9 := w.List()
for _, _tmp10 := range obj.BalanceChanges {
_tmp11 := w.List()
w.WriteUint64(uint64(_tmp10.TxIdx))
if _tmp10.Balance == nil {
w.Write(rlp.EmptyString)
} else {
w.WriteUint256(_tmp10.Balance)
}
w.ListEnd(_tmp11)
}
w.ListEnd(_tmp9)
_tmp12 := w.List()
for _, _tmp13 := range obj.NonceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteUint64(_tmp13.Nonce)
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range obj.CodeChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx))
w.WriteBytes(_tmp16.Code)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
w.ListEnd(_tmp0)
return w.Flush()
}
func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 BlockAccessList
func (obj *AccountAccess) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 AccountAccess
{
if _, err := dec.List(); err != nil {
return err
}
// Accesses:
var _tmp1 []AccountAccess
// Address:
var _tmp1 common.Address
if err := dec.ReadBytes(_tmp1[:]); err != nil {
return err
}
_tmp0.Address = _tmp1
// StorageChanges:
var _tmp2 []encodingSlotWrites
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp2 AccountAccess
var _tmp3 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Address:
var _tmp3 [20]byte
if err := dec.ReadBytes(_tmp3[:]); err != nil {
// Slot:
var _tmp4 common.Hash
if err := dec.ReadBytes(_tmp4[:]); err != nil {
return err
}
_tmp2.Address = _tmp3
// StorageWrites:
var _tmp4 []encodingSlotWrites
_tmp3.Slot = _tmp4
// Accesses:
var _tmp5 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp5 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Slot:
var _tmp6 [32]byte
if err := dec.ReadBytes(_tmp6[:]); err != nil {
return err
}
_tmp5.Slot = _tmp6
// Accesses:
var _tmp7 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp8 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp9, err := dec.Uint16()
if err != nil {
return err
}
_tmp8.TxIdx = _tmp9
// ValueAfter:
var _tmp10 [32]byte
if err := dec.ReadBytes(_tmp10[:]); err != nil {
return err
}
_tmp8.ValueAfter = _tmp10
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp7 = append(_tmp7, _tmp8)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp5.Accesses = _tmp7
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp4 = append(_tmp4, _tmp5)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageWrites = _tmp4
// StorageReads:
var _tmp11 [][32]byte
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 [32]byte
if err := dec.ReadBytes(_tmp12[:]); err != nil {
return err
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageReads = _tmp11
// BalanceChanges:
var _tmp13 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp14 encodingBalanceChange
var _tmp6 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp15, err := dec.Uint16()
_tmp7, err := dec.Uint16()
if err != nil {
return err
}
_tmp14.TxIdx = _tmp15
// Balance:
var _tmp16 [16]byte
if err := dec.ReadBytes(_tmp16[:]); err != nil {
_tmp6.TxIdx = _tmp7
// ValueAfter:
var _tmp8 common.Hash
if err := dec.ReadBytes(_tmp8[:]); err != nil {
return err
}
_tmp14.Balance = _tmp16
_tmp6.ValueAfter = _tmp8
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp13 = append(_tmp13, _tmp14)
_tmp5 = append(_tmp5, _tmp6)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.BalanceChanges = _tmp13
// NonceChanges:
var _tmp17 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp18 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp19, err := dec.Uint16()
if err != nil {
return err
}
_tmp18.TxIdx = _tmp19
// Nonce:
_tmp20, err := dec.Uint64()
if err != nil {
return err
}
_tmp18.Nonce = _tmp20
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp17 = append(_tmp17, _tmp18)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.NonceChanges = _tmp17
// Code:
var _tmp21 []CodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp22 CodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIndex:
_tmp23, err := dec.Uint16()
if err != nil {
return err
}
_tmp22.TxIndex = _tmp23
// Code:
_tmp24, err := dec.Bytes()
if err != nil {
return err
}
_tmp22.Code = _tmp24
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp21 = append(_tmp21, _tmp22)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.Code = _tmp21
_tmp3.Accesses = _tmp5
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp1 = append(_tmp1, _tmp2)
_tmp2 = append(_tmp2, _tmp3)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.Accesses = _tmp1
_tmp0.StorageChanges = _tmp2
// StorageReads:
var _tmp9 []common.Hash
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp10 common.Hash
if err := dec.ReadBytes(_tmp10[:]); err != nil {
return err
}
_tmp9 = append(_tmp9, _tmp10)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.StorageReads = _tmp9
// BalanceChanges:
var _tmp11 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 encodingBalanceChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp13, err := dec.Uint16()
if err != nil {
return err
}
_tmp12.TxIdx = _tmp13
// Balance:
var _tmp14 uint256.Int
if err := dec.ReadUint256(&_tmp14); err != nil {
return err
}
_tmp12.Balance = &_tmp14
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.BalanceChanges = _tmp11
// NonceChanges:
var _tmp15 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp16 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp17, err := dec.Uint16()
if err != nil {
return err
}
_tmp16.TxIdx = _tmp17
// Nonce:
_tmp18, err := dec.Uint64()
if err != nil {
return err
}
_tmp16.Nonce = _tmp18
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp15 = append(_tmp15, _tmp16)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.NonceChanges = _tmp15
// CodeChanges:
var _tmp19 []CodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp20 CodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp21, err := dec.Uint16()
if err != nil {
return err
}
_tmp20.TxIdx = _tmp21
// Code:
_tmp22, err := dec.Bytes()
if err != nil {
return err
}
_tmp20.Code = _tmp22
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp19 = append(_tmp19, _tmp20)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.CodeChanges = _tmp19
if err := dec.ListEnd(); err != nil {
return err
}

View file

@ -38,7 +38,7 @@ func equalBALs(a *BlockAccessList, b *BlockAccessList) bool {
func makeTestConstructionBAL() *ConstructionBlockAccessList {
return &ConstructionBlockAccessList{
map[common.Address]*ConstructionAccountAccess{
map[common.Address]*ConstructionAccountAccesses{
common.BytesToAddress([]byte{0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
common.BytesToHash([]byte{0x01}): {
@ -60,10 +60,10 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
1: 2,
2: 6,
},
CodeChange: &CodeChange{
TxIndex: 0,
Code: common.Hex2Bytes("deadbeef"),
},
CodeChanges: map[uint16]CodeChange{0: {
TxIdx: 0,
Code: common.Hex2Bytes("deadbeef"),
}},
},
common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
@ -102,10 +102,10 @@ func TestBALEncoding(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil {
t.Fatalf("decoding failed: %v\n", err)
}
if dec.Hash() != bal.toEncodingObj().Hash() {
if dec.Hash() != bal.ToEncodingObj().Hash() {
t.Fatalf("encoded block hash doesn't match decoded")
}
if !equalBALs(bal.toEncodingObj(), &dec) {
if !equalBALs(bal.ToEncodingObj(), &dec) {
t.Fatal("decoded BAL doesn't match")
}
}
@ -113,7 +113,7 @@ func TestBALEncoding(t *testing.T) {
func makeTestAccountAccess(sort bool) AccountAccess {
var (
storageWrites []encodingSlotWrites
storageReads [][32]byte
storageReads []common.Hash
balances []encodingBalanceChange
nonces []encodingAccountNonce
)
@ -144,7 +144,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
storageReads = append(storageReads, testrand.Hash())
}
if sort {
slices.SortFunc(storageReads, func(a, b [32]byte) int {
slices.SortFunc(storageReads, func(a, b common.Hash) int {
return bytes.Compare(a[:], b[:])
})
}
@ -152,7 +152,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
for i := 0; i < 5; i++ {
balances = append(balances, encodingBalanceChange{
TxIdx: uint16(2 * i),
Balance: [16]byte(testrand.Bytes(16)),
Balance: new(uint256.Int).SetBytes(testrand.Bytes(32)),
})
}
if sort {
@ -175,14 +175,14 @@ func makeTestAccountAccess(sort bool) AccountAccess {
return AccountAccess{
Address: [20]byte(testrand.Bytes(20)),
StorageWrites: storageWrites,
StorageChanges: storageWrites,
StorageReads: storageReads,
BalanceChanges: balances,
NonceChanges: nonces,
Code: []CodeChange{
CodeChanges: []CodeChange{
{
TxIndex: 100,
Code: testrand.Bytes(256),
TxIdx: 100,
Code: testrand.Bytes(256),
},
},
}
@ -191,10 +191,10 @@ func makeTestAccountAccess(sort bool) AccountAccess {
func makeTestBAL(sort bool) BlockAccessList {
list := BlockAccessList{}
for i := 0; i < 5; i++ {
list.Accesses = append(list.Accesses, makeTestAccountAccess(sort))
list = append(list, makeTestAccountAccess(sort))
}
if sort {
slices.SortFunc(list.Accesses, func(a, b AccountAccess) int {
slices.SortFunc(list, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
})
}
@ -214,7 +214,7 @@ func TestBlockAccessListCopy(t *testing.T) {
}
// Make sure the mutations on copy won't affect the origin
for _, aa := range cpyCpy.Accesses {
for _, aa := range cpyCpy {
for i := 0; i < len(aa.StorageReads); i++ {
aa.StorageReads[i] = [32]byte(testrand.Bytes(32))
}
@ -245,8 +245,11 @@ func TestBlockAccessListValidation(t *testing.T) {
// Validate the derived block access list
cBAL := makeTestConstructionBAL()
listB := cBAL.toEncodingObj()
listB := cBAL.ToEncodingObj()
if err := listB.Validate(); err != nil {
t.Fatalf("Unexpected validation error: %v", err)
}
}
// BALReader test ideas
// * BAL which doesn't have any pre-tx system contracts should return an empty state diff at idx 0

View file

@ -0,0 +1,32 @@
package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"io"
"os"
"testing"
)
func TestBALDecoding(t *testing.T) {
var (
err error
data []byte
)
data, err = os.ReadFile("blocks_bal_one.rlp")
if err != nil {
t.Fatalf("error opening file: %v", err)
}
reader := bytes.NewReader(data)
stream := rlp.NewStream(reader, 0)
var blocks Block
for i := 0; err == nil; i++ {
fmt.Printf("decode %d\n", i)
err = stream.Decode(&blocks)
if err != nil && err != io.EOF {
t.Fatalf("error decoding blocks: %v", err)
}
fmt.Printf("block number is %d\n", blocks.NumberU64())
}
}

View file

@ -28,6 +28,8 @@ import (
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
@ -106,6 +108,9 @@ type Header struct {
// RequestsHash was added by EIP-7685 and is ignored in legacy headers.
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
// BlockAccessListHash was added by EIP-7928 and is ignored in legacy headers.
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
}
// field type overrides for gencodec
@ -183,7 +188,8 @@ func (h *Header) EmptyReceipts() bool {
type Body struct {
Transactions []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional,nil"`
}
// Block represents an Ethereum block.
@ -214,6 +220,8 @@ type Block struct {
// that process it.
witness *ExecutionWitness
accessList *bal.BlockAccessList
// caches
hash atomic.Pointer[common.Hash]
size atomic.Uint64
@ -229,7 +237,8 @@ type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@ -290,6 +299,12 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher
b.withdrawals = slices.Clone(withdrawals)
}
if body.AccessList != nil {
balHash := body.AccessList.Hash()
b.header.BlockAccessListHash = &balHash
b.accessList = body.AccessList
}
return b
}
@ -334,12 +349,14 @@ func CopyHeader(h *Header) *Header {
// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
var (
eb extblock
)
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
b.header, b.uncles, b.transactions, b.withdrawals, b.accessList = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.AccessList
b.size.Store(rlp.ListSize(size))
return nil
}
@ -351,13 +368,14 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
AccessList: b.accessList,
})
}
// Body returns the non-header content of the block.
// Note the returned data is not an independent copy.
func (b *Block) Body() *Body {
return &Body{b.transactions, b.uncles, b.withdrawals}
return &Body{b.transactions, b.uncles, b.withdrawals, b.accessList}
}
// Accessors for body data. These do not return a copy because the content
@ -508,6 +526,10 @@ func (b *Block) WithBody(body Body) *Block {
withdrawals: slices.Clone(body.Withdrawals),
witness: b.witness,
}
if body.AccessList != nil {
balCopy := body.AccessList.Copy()
block.accessList = &balCopy
}
for i := range body.Uncles {
block.uncles[i] = CopyHeader(body.Uncles[i])
}
@ -520,6 +542,7 @@ func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
accessList: b.accessList,
witness: witness,
}
}

View file

@ -16,28 +16,29 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
Hash common.Hash `json:"hash"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@ -61,6 +62,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot
enc.RequestsHash = h.RequestsHash
enc.BlockAccessListHash = h.BlockAccessListHash
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@ -68,27 +70,28 @@ func (h Header) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@ -169,5 +172,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.RequestsHash != nil {
h.RequestsHash = dec.RequestsHash
}
if dec.BlockAccessListHash != nil {
h.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -43,7 +43,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil
_tmp6 := obj.RequestsHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
_tmp7 := obj.BlockAccessListHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@ -53,41 +54,48 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.WithdrawalsHash[:])
}
}
if _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BlobGasUsed == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.BlobGasUsed))
}
}
if _tmp4 || _tmp5 || _tmp6 {
if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.ExcessBlobGas))
}
}
if _tmp5 || _tmp6 {
if _tmp5 || _tmp6 || _tmp7 {
if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.ParentBeaconRoot[:])
}
}
if _tmp6 {
if _tmp6 || _tmp7 {
if obj.RequestsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.RequestsHash[:])
}
}
if _tmp7 {
if obj.BlockAccessListHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.BlockAccessListHash[:])
}
}
w.ListEnd(_tmp0)
return w.Flush()
}

View file

@ -77,6 +77,7 @@ type TxContext struct {
BlobHashes []common.Hash // Provides information for BLOBHASH
BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set
AccessEvents *state.AccessEvents // Capture all state accesses for this tx
Index uint64 // the index of the transaction within the block being executed (0 if executing a standalone call)
}
// EVM is the Ethereum Virtual Machine base object and provides
@ -470,25 +471,32 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// create creates a new contract using code as deployment code.
func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *uint256.Int, address common.Address, typ OpCode) (ret []byte, createAddress common.Address, leftOverGas uint64, err error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
var nonce uint64
if evm.depth > int(params.CallCreateDepth) {
err = ErrDepth
} else if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
err = ErrInsufficientBalance
} else {
nonce = evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
err = ErrNonceUintOverflow
}
}
if err == nil {
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
}
if evm.Config.Tracer != nil {
evm.captureBegin(evm.depth, typ, caller, address, code, gas, value.ToBig())
defer func(startGas uint64) {
evm.captureEnd(evm.depth, startGas, leftOverGas, ret, err)
}(gas)
}
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
return nil, common.Address{}, gas, ErrDepth
if err != nil {
return nil, common.Address{}, gas, err
}
if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
return nil, common.Address{}, gas, ErrInsufficientBalance
}
nonce := evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
return nil, common.Address{}, gas, ErrNonceUintOverflow
}
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
// Charge the contract creation init gas in verkle mode
if evm.chainRules.IsEIP4762 {
@ -514,6 +522,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address)
storageRoot := evm.StateDB.GetStorageRoot(address)
if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage

View file

@ -887,7 +887,9 @@ func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
}
beneficiary := scope.Stack.pop()
balance := evm.StateDB.GetBalance(scope.Contract.Address())
evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
if scope.Contract.Address() != common.BytesToAddress(beneficiary.Bytes()) {
evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
}
evm.StateDB.SelfDestruct(scope.Contract.Address())
if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
@ -906,8 +908,23 @@ func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, erro
}
beneficiary := scope.Stack.pop()
balance := evm.StateDB.GetBalance(scope.Contract.Address())
evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
createdInTx := !evm.StateDB.ExistBeforeCurTx(scope.Contract.Address())
if createdInTx {
// if the contract is not preexisting, the balance is immediately burned on selfdestruct-to-self
evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
if scope.Contract.Address() != common.BytesToAddress(beneficiary.Bytes()) {
evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
}
} else {
// if the contract is preexisting, the balance isn't burned on selfdestruct-to-self
if scope.Contract.Address() != common.BytesToAddress(beneficiary.Bytes()) {
evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
}
}
evm.StateDB.SelfDestruct6780(scope.Contract.Address())
if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {

View file

@ -71,6 +71,8 @@ type StateDB interface {
// Exist reports whether the given account exists in state.
// Notably this also returns true for self-destructed accounts within the current transaction.
Exist(common.Address) bool
ExistBeforeCurTx(addr common.Address) bool
// Empty returns whether the given account is empty. Empty
// is defined according to EIP161 (balance = nonce = code = 0).
Empty(common.Address) bool
@ -99,6 +101,8 @@ type StateDB interface {
AccessEvents() *state.AccessEvents
TxIndex() int
// Finalise must be invoked at the end of a transaction
Finalise(bool)
}

View file

@ -28,6 +28,8 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
case rules.IsVerkle:
return newCancunInstructionSet(), errors.New("verkle-fork not defined yet")
case rules.IsAmsterdam:
return newPragueInstructionSet(), errors.New("glamsterdam-fork not defined yet")
case rules.IsOsaka:
return newOsakaInstructionSet(), nil
case rules.IsPrague:

View file

@ -18,7 +18,6 @@ package vm
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing"
@ -44,6 +43,12 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
cost = params.ColdSloadCostEIP2929
// If the caller cannot afford the cost, this change will be rolled back
evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdStorageRead != nil {
// TODO: should these only be called if the cold storage read didn't go OOG?
// it's harder to implement, but I lean towards "yes".
// need to clarify this in the spec.
evm.Config.Tracer.OnColdStorageRead(contract.Address(), slot)
}
}
value := common.Hash(y.Bytes32())
@ -103,6 +108,12 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
// If the caller cannot afford the cost, this change will be rolled back
// If he does afford it, we can skip checking the same thing later on, during execution
evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdStorageRead != nil {
// TODO: should these only be called if the cold storage read didn't go OOG?
// it's harder to implement, but I lean towards "yes".
// need to clarify this in the spec.
evm.Config.Tracer.OnColdStorageRead(contract.Address(), slot)
}
return params.ColdSloadCostEIP2929, nil
}
return params.WarmStorageReadCostEIP2929, nil
@ -123,6 +134,12 @@ func gasExtCodeCopyEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memo
// Check slot presence in the access list
if !evm.StateDB.AddressInAccessList(addr) {
evm.StateDB.AddAddressToAccessList(addr)
// TODO: same issue as OnColdSStorageRead. See the TODO above near OnColdStorageRead
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(addr)
}
var overflow bool
// We charge (cold-warm), since 'warm' is already charged as constantGas
if gas, overflow = math.SafeAdd(gas, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow {
@ -144,6 +161,9 @@ func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *Stack, mem *Mem
addr := common.Address(stack.peek().Bytes20())
// Check slot presence in the access list
if !evm.StateDB.AddressInAccessList(addr) {
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(addr)
}
// If the caller cannot afford the cost, this change will be rolled back
evm.StateDB.AddAddressToAccessList(addr)
// The warm storage read cost is already charged as constantGas
@ -161,6 +181,9 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc, addressPosition int) g
// the cost to charge for cold access, if any, is Cold - Warm
coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
if !warmAccess {
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(addr)
}
evm.StateDB.AddAddressToAccessList(addr)
// Charge the remaining difference here already, to correctly calculate available
// gas for call
@ -227,6 +250,9 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
address = common.Address(stack.peek().Bytes20())
)
if !evm.StateDB.AddressInAccessList(address) {
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(address)
}
// If the caller cannot afford the cost, this change will be rolled back
evm.StateDB.AddAddressToAccessList(address)
gas = params.ColdAccountAccessCostEIP2929
@ -259,6 +285,9 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
// Check slot presence in the access list
if !evm.StateDB.AddressInAccessList(addr) {
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(addr)
}
evm.StateDB.AddAddressToAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
@ -277,6 +306,9 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
if evm.StateDB.AddressInAccessList(target) {
cost = params.WarmStorageReadCostEIP2929
} else {
if evm.Config.Tracer != nil && evm.Config.Tracer.OnColdAccountRead != nil {
evm.Config.Tracer.OnColdAccountRead(target)
}
evm.StateDB.AddAddressToAccessList(target)
cost = params.ColdAccountAccessCostEIP2929
}

View file

@ -676,7 +676,7 @@ func TestColdAccountAccessCost(t *testing.T) {
Tracer: &tracing.Hooks{
OnOpcode: func(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
// Uncomment to investigate failures:
//t.Logf("%d: %v %d", step, vm.OpCode(op).String(), cost)
//t.Logf("%d: %v %d", step, vm.OpCode(op).PrettyPrint(), cost)
if step == tc.step {
have = cost
}

View file

@ -17,9 +17,11 @@
package eth
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"time"
"github.com/ethereum/go-ethereum/common"
@ -505,7 +507,7 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness
return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn)
}
result, err := bc.ProcessBlock(parent.Root, block, false, true)
result, err := bc.ProcessBlock(parent.Root, block, false, true, false, false)
if err != nil {
return nil, err
}
@ -525,10 +527,40 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit
return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash)
}
result, err := bc.ProcessBlock(parent.Root, block, false, true)
result, err := bc.ProcessBlock(parent.Root, block, false, true, false, false)
if err != nil {
return nil, err
}
return result.Witness().ToExtWitness(), nil
}
// GetBlockAccessList returns a block access list for the given number/hash
// or nil if one does not exist.
func (api *DebugAPI) GetBlockAccessList(number rpc.BlockNumberOrHash) (*bal.BlockAccessList, error) {
var block *types.Block
if num := number.BlockNumber; num != nil {
block = api.eth.blockchain.GetBlockByNumber(uint64(num.Int64()))
} else if hash := number.BlockHash; hash != nil {
block = api.eth.blockchain.GetBlockByHash(*hash)
}
if block == nil {
return nil, fmt.Errorf("block not found")
}
return block.Body().AccessList, nil
}
// GetEncodedBlockAccessList returns a block access list corresponding to a
// block number/hash in RLP-encoded form. It returns nil if one does not exist.
func (api *DebugAPI) GetEncodedBlockAccessList(number rpc.BlockNumberOrHash) ([]byte, error) {
bal, err := api.GetBlockAccessList(number)
if err != nil {
return nil, err
}
var enc bytes.Buffer
if err = bal.EncodeRLP(&enc); err != nil {
return nil, err
}
return enc.Bytes(), nil
}

View file

@ -244,6 +244,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// - DATADIR/triedb/verkle.journal
TrieJournalDirectory: stack.ResolvePath("triedb"),
StateSizeTracking: config.EnableStateSizeTracking,
EnableBALForTesting: config.ExperimentalBAL,
}
)
if config.VMTrace != "" {

View file

@ -81,6 +81,43 @@ const (
beaconUpdateWarnFrequency = 5 * time.Minute
)
// All methods provided over the engine endpoint.
var caps = []string{
"engine_forkchoiceUpdatedV1",
"engine_forkchoiceUpdatedV2",
"engine_forkchoiceUpdatedV3",
"engine_forkchoiceUpdatedWithWitnessV1",
"engine_forkchoiceUpdatedWithWitnessV2",
"engine_forkchoiceUpdatedWithWitnessV3",
"engine_exchangeTransitionConfigurationV1",
"engine_getPayloadV1",
"engine_getPayloadV2",
"engine_getPayloadV3",
"engine_getPayloadV4",
"engine_getPayloadV5",
"engine_getPayloadV6",
"engine_getBlobsV1",
"engine_getBlobsV2",
"engine_newPayloadV1",
"engine_newPayloadV2",
"engine_newPayloadV3",
"engine_newPayloadV4",
"engine_newPayloadV5",
"engine_newPayloadWithWitnessV1",
"engine_newPayloadWithWitnessV2",
"engine_newPayloadWithWitnessV3",
"engine_newPayloadWithWitnessV4",
"engine_executeStatelessPayloadV1",
"engine_executeStatelessPayloadV2",
"engine_executeStatelessPayloadV3",
"engine_executeStatelessPayloadV4",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByHashV2",
"engine_getPayloadBodiesByRangeV1",
"engine_getPayloadBodiesByRangeV2",
"engine_getClientVersionV1",
}
var (
// Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
@ -212,9 +249,13 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root")
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5):
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5, forks.Amsterdam):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads")
}
if api.checkFork(params.Timestamp, forks.Amsterdam) {
return api.forkchoiceUpdated(update, params, engine.PayloadV4, false)
}
}
// TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up
@ -460,6 +501,14 @@ func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.Execu
return api.getPayload(payloadID, false)
}
// GetPayloadV6 returns a cached payload by id.
func (api *ConsensusAPI) GetPayloadV6(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) {
if !payloadID.Is(engine.PayloadV4) {
return nil, engine.UnsupportedFork
}
return api.getPayload(payloadID, false)
}
func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*engine.ExecutionPayloadEnvelope, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
data := api.localBlocks.get(payloadID, full)
@ -665,6 +714,33 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
// NewPayloadV5 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) NewPayloadV5(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
switch {
case params.Withdrawals == nil:
return invalidStatus, paramsErr("nil withdrawals post-shanghai")
case params.ExcessBlobGas == nil:
return invalidStatus, paramsErr("nil excessBlobGas post-cancun")
case params.BlobGasUsed == nil:
return invalidStatus, paramsErr("nil blobGasUsed post-cancun")
case versionedHashes == nil:
return invalidStatus, paramsErr("nil versionedHashes post-cancun")
case beaconRoot == nil:
return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague")
case params.BlockAccessList == nil:
return invalidStatus, paramsErr("nil block access list post-amsterdam")
case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka, forks.Amsterdam):
return invalidStatus, unsupportedForkErr("newPayloadV5 must only be called for amsterdam payloads")
}
requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//

View file

@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
txs, uncles, withdrawals, accessLists := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, access list hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], accessLists, hashsets[3])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")

View file

@ -22,6 +22,7 @@ package downloader
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"sync"
"sync/atomic"
"time"
@ -564,6 +565,7 @@ func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
blockAccessLists []*bal.BlockAccessList, accessListHashes []common.Hash,
) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -588,6 +590,19 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
if header.BlockAccessListHash == nil {
// nil hash means that access list should not be present in body
if blockAccessLists[index] != nil {
return errInvalidBody
}
} else { // non-nil hash: body must have access list
if blockAccessLists[index] == nil {
return errInvalidBody
}
if accessListHashes[index] != header.Hash() {
return errInvalidBody
}
}
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int

View file

@ -189,6 +189,13 @@ type Config struct {
// EIP-7966: eth_sendRawTransactionSync timeouts
TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
TxSyncMaxTimeout time.Duration `toml:",omitempty"`
// ExperimentalBAL enables EIP-7928 block access list creation during execution
// of post Cancun blocks, and persistence via embedding the BAL in the block body.
//
// TODO: also note that it will cause execution of blocks with access lists to base
// their execution on the BAL.
ExperimentalBAL bool `toml:",omitempty"`
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -381,6 +381,7 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
accessListHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@ -389,8 +390,11 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
if body.AccessList != nil {
accessListHashes[i] = body.AccessList.Hash()
}
}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, accessListHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,

View file

@ -19,6 +19,7 @@ package eth
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"io"
"math/big"
@ -239,20 +240,22 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, []*bal.BlockAccessList) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
accessListSet = make([]*bal.BlockAccessList, len(*p))
)
for i, body := range *p {
txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
txset[i], uncleset[i], withdrawalset[i], accessListSet[i] = body.Transactions, body.Uncles, body.Withdrawals, body.AccessList
}
return txset, uncleset, withdrawalset
return txset, uncleset, withdrawalset, accessListSet
}
// GetReceiptsRequest represents a block receipts query.

View file

@ -975,6 +975,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
if block.Body().AccessList != nil {
fields["accessList"] = block.Body().AccessList
}
return fields
}

View file

@ -347,7 +347,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
}
blockBody := &types.Body{Transactions: txes, Withdrawals: *block.BlockOverrides.Withdrawals}
chainHeadReader := &simChainHeadReader{ctx, sim.b}
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts)
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts, nil)
if err != nil {
return nil, nil, nil, err
}

View file

@ -474,6 +474,16 @@ web3._extend({
params: 1,
inputFormatter: [null],
}),
new web3._extend.Method({
name: 'getBlockAccessList',
call: 'debug_getBlockAccessList',
params: 1
}),
new web3._extend.Method({
name: 'getEncodedBlockAccessList',
call: 'debug_getEncodedBlockAccessList',
params: 1
}),
],
properties: []
});

View file

@ -19,6 +19,7 @@ package miner
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/tracing"
"math/big"
"sync/atomic"
"time"
@ -60,7 +61,8 @@ type environment struct {
sidecars []*types.BlobTxSidecar
blobs int
witness *stateless.Witness
witness *stateless.Witness
alTracer *core.BlockAccessListTracer
}
// txFits reports whether the transaction fits into the block size limit.
@ -134,6 +136,9 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
}
}
body := types.Body{Transactions: work.txs, Withdrawals: genParam.withdrawals}
if work.alTracer != nil {
body.AccessList = work.alTracer.AccessList().ToEncodingObj()
}
allLogs := make([]*types.Log, 0)
for _, r := range work.receipts {
@ -162,10 +167,24 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
work.header.RequestsHash = &reqHash
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
// set the block access list on the body after the block has finished executing
// but before the header hash is computed (in FinalizeAndAssemble).
//
// I considered trying to instantiate the beacon consensus engine with a tracer.
// however, the BAL tracer instance is used once per block, while the engine object
// lives for the entire time the client is running.
onBlockFinalization := func() {
if miner.chainConfig.IsAmsterdam(work.header.Number, work.header.Time) {
work.alTracer.OnBlockFinalization()
body.AccessList = work.alTracer.AccessList().ToEncodingObj()
}
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts, onBlockFinalization)
if err != nil {
return &newPayloadResult{err: err}
}
return &newPayloadResult{
block: block,
fees: totalFees(block, work.receipts),
@ -256,13 +275,16 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
if miner.chainConfig.IsPrague(header.Number, header.Time) {
core.ProcessParentBlockHash(header.ParentHash, env.evm)
}
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
env.alTracer.OnPreTxExecutionDone()
}
return env, nil
}
// makeEnv creates a new environment for the sealing block.
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top.
state, err := miner.chain.StateAt(parent.Root)
sdb, err := miner.chain.StateAt(parent.Root)
if err != nil {
return nil, err
}
@ -271,17 +293,27 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
if err != nil {
return nil, err
}
state.StartPrefetcher("miner", bundle, nil)
sdb.StartPrefetcher("miner", bundle, nil)
}
var alTracer *core.BlockAccessListTracer
var hooks *tracing.Hooks
var hookedState vm.StateDB = sdb
var vmConfig vm.Config
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
alTracer, hooks = core.NewBlockAccessListTracer(0)
hookedState = state.NewHookedState(sdb, hooks)
vmConfig.Tracer = hooks
}
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: state,
state: sdb,
size: uint64(header.Size()),
coinbase: coinbase,
header: header,
witness: state.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), state, miner.chainConfig, vm.Config{}),
witness: sdb.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), hookedState, miner.chainConfig, vmConfig),
alTracer: alTracer,
}, nil
}

View file

@ -1007,9 +1007,11 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
}
if cur.timestamp != nil {
// If the fork is configured, a blob schedule must be defined for it.
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
/*
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
*/
}
}
return nil
@ -1164,6 +1166,9 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
// BlobConfig returns the blob config associated with the provided fork.
func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig {
switch fork {
case forks.Amsterdam:
// TODO: (????)
return c.BlobScheduleConfig.BPO2
case forks.BPO5:
return c.BlobScheduleConfig.BPO5
case forks.BPO4:
@ -1209,6 +1214,8 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre
// the fork isn't defined or isn't a time-based fork.
func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 {
switch {
case fork == forks.Amsterdam:
return c.AmsterdamTime
case fork == forks.BPO5:
return c.BPO5Time
case fork == forks.BPO4:

View file

@ -18,6 +18,7 @@ package tests
import (
"math/rand"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -67,13 +68,119 @@ func TestBlockchain(t *testing.T) {
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
execBlockTest(t, bt, test, false)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
func TestBlockchainBAL(t *testing.T) {
bt := new(testMatcher)
// We are running most of GeneralStatetests to tests witness support, even
// though they are ran as state tests too. Still, the performance tests are
// less about state andmore about EVM number crunching, so skip those.
bt.skipLoad(`^GeneralStateTests/VMTests/vmPerformance`)
// Skip random failures due to selfish mining test
bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`)
// Slow tests
bt.slow(`.*bcExploitTest/DelegateCallSpam.json`)
bt.slow(`.*bcExploitTest/ShanghaiLove.json`)
bt.slow(`.*bcExploitTest/SuicideIssue.json`)
bt.slow(`.*/bcForkStressTest/`)
bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`)
bt.slow(`.*/bcWalletTest/`)
// Very slow test
bt.skipLoad(`.*/stTimeConsuming/.*`)
// test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range,
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
// After the merge we would accept side chains as canonical even if they have lower td
bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
// With chain history removal, TDs become unavailable, this transition tests based on TTD are unrunnable
bt.skipLoad(`.*bcArrowGlacierToParis/powToPosBlockRejection.json`)
// This directory contains no test.
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
execBlockTest(t, bt, test, true)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
// TODO: rename this to reflect that it tests creating/verifying BALs on pre-amsterdam tests
func TestExecutionSpecBlocktestsBAL(t *testing.T) {
if !common.FileExist(executionSpecBlockchainTestDir) {
t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir)
}
bt := new(testMatcher)
bt.skipLoad(".*prague/eip7251_consolidations/contract_deployment/system_contract_deployment.json")
bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/contract_deployment/system_contract_deployment.json")
bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
execBlockTest(t, bt, test, true)
})
}
func TestExecutionSpecBlocktestsAmsterdam(t *testing.T) {
var executionSpecAmsterdamBlockchainTestDir = filepath.Join(".", "fixtures-amsterdam-bal", "blockchain_tests")
if !common.FileExist(executionSpecAmsterdamBlockchainTestDir) {
t.Skipf("directory %s does not exist", executionSpecAmsterdamBlockchainTestDir)
}
bt := new(testMatcher)
bt.walk(t, executionSpecAmsterdamBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
// TODO: skip any tests that aren't amsterdam
execBlockTest(t, bt, test, false)
})
}
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
if !common.FileExist(executionSpecBlockchainTestDir) {
@ -86,11 +193,11 @@ func TestExecutionSpecBlocktests(t *testing.T) {
bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json")
bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
execBlockTest(t, bt, test, false)
})
}
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest, buildAndVerifyBAL bool) {
// Define all the different flag combinations we should run the tests with,
// picking only one for short tests.
//
@ -104,9 +211,11 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
snapshotConf = []bool{snapshotConf[rand.Int()%2]}
dbschemeConf = []string{dbschemeConf[rand.Int()%2]}
}
for _, snapshot := range snapshotConf {
for _, dbscheme := range dbschemeConf {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, nil, nil)); err != nil {
//tracer := logger.NewJSONLogger(&logger.Config{}, os.Stdout)
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, false, buildAndVerifyBAL, nil, nil)); err != nil {
t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err)
return
}

View file

@ -22,6 +22,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
stdmath "math"
"math/big"
"os"
@ -71,6 +72,7 @@ type btBlock struct {
ExpectException string
Rlp string
UncleHeaders []*btHeader
AccessList *bal.BlockAccessList `json:"blockAccessList,omitempty"`
}
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
@ -97,6 +99,7 @@ type btHeader struct {
BlobGasUsed *uint64
ExcessBlobGas *uint64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
type btHeaderMarshaling struct {
@ -111,11 +114,7 @@ type btHeaderMarshaling struct {
ExcessBlobGas *math.HexOrDecimal64
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
func (t *BlockTest) createTestBlockChain(config *params.ChainConfig, snapshotter bool, scheme string, witness, createAndVerifyBAL bool, tracer *tracing.Hooks) (*core.BlockChain, error) {
// import pre accounts & construct test genesis block & state root
var (
db = rawdb.NewMemoryDatabase()
@ -128,7 +127,6 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
} else {
tconf.HashDB = hashdb.Defaults
}
// Commit genesis state
gspec := t.genesis(config)
// if ttd is not specified, set an arbitrary huge value
@ -138,15 +136,15 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb)
if err != nil {
return err
return nil, err
}
triedb.Close() // close the db to prevent memory leak
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
return nil, fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
if gblock.Root() != t.json.Genesis.StateRoot {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
return nil, fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
}
// Wrap the original engine within the beacon-engine
engine := beacon.New(ethash.NewFaker())
@ -160,12 +158,28 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
Tracer: tracer,
StatelessSelfValidation: witness,
},
NoPrefetch: true,
EnableBALForTesting: createAndVerifyBAL,
}
if snapshotter {
options.SnapshotLimit = 1
options.SnapshotWait = true
}
chain, err := core.NewBlockChain(db, gspec, engine, options)
if err != nil {
return nil, err
}
return chain, nil
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, createAndVerifyBAL bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
chain, err := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
if err != nil {
return err
}
@ -199,25 +213,69 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
}
}
}
return t.validateImportedHeaders(chain, validBlocks)
err = t.validateImportedHeaders(chain, validBlocks)
if err != nil {
return err
}
if createAndVerifyBAL {
newChain, _ := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
defer newChain.Stop()
var blocksWithBAL types.Blocks
for i := uint64(1); i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := chain.GetBlockByNumber(i)
if block.Body().AccessList == nil {
return fmt.Errorf("block %d missing BAL", block.NumberU64())
}
blocksWithBAL = append(blocksWithBAL, block)
}
amt, err := newChain.InsertChain(blocksWithBAL)
if err != nil {
return err
}
_ = amt
newDB, err := newChain.State()
if err != nil {
return err
}
if err = t.validatePostState(newDB); err != nil {
return fmt.Errorf("post state validation failed: %v", err)
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
if newChain.Snapshots() != nil {
if err := chain.Snapshots().Verify(chain.CurrentBlock().Root); err != nil {
return err
}
}
}
err = t.validateImportedHeaders(newChain, validBlocks)
if err != nil {
return err
}
}
return nil
}
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
BlockAccessListHash: t.json.Genesis.BlockAccessListHash,
}
}

View file

@ -0,0 +1,27 @@
; This file describes fixture build properties
[fixtures]
timestamp = 2025-09-15T15:45:15.672258
command_line_args = fill -c /Users/jwasinger/projects/execution-spec-tests/src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini --rootdir . --until=Amsterdam tests/amsterdam
[packages]
pytest = 8.4.1
pluggy = 1.6.0
[plugins]
regex = 0.2.0
html = 4.1.1
xdist = 3.8.0
json-report = 1.5.0
metadata = 3.1.1
cov = 4.1.0
custom-report = 1.0.1
[tools]
eels resolutions = /Users/jwasinger/projects/execution-spec-tests/src/pytest_plugins/eels_resolutions.json
t8n = ethereum-spec-evm-resolver 0.0.5
[environment]
python = 3.11.13
platform = macOS-13.0-arm64-arm-64bit

View file

@ -0,0 +1,254 @@
{
"root_hash": "0x4bbf7997c738c7f87e8d08aa8d2c43bc51daf72c82b9f7b25d36791a6ef48638",
"created_at": "2025-09-15T15:45:17.133354",
"test_count": 30,
"forks": [
"Amsterdam"
],
"fixture_formats": [
"blockchain_test_engine",
"blockchain_test"
],
"test_cases": [
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_complex_corruption[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x91d4d6d462a761950c83d3b30a947b915cfabf6051078efe014ff8a216477045",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_complex_corruption.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_duplicate_account[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x8cc2569fedefe68987b7e5e0490c56247dc140c742a4c1eef0876964e122123e",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_duplicate_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_tx_order[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x3edf472ff5b91642ac66276c7993ba31fe53a08b289cd69d92386bf855e73f61",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_tx_order.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_missing_nonce[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x14c40aa5f07c4252937bffad8a6f7ad196d4c00fddfccca2858fa9ebd00d9cbf",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_missing_nonce.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_balance_value[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xb6c1da9621633f00db1bd84120283762f020bc0b0f1da9a5eef05c300a07d956",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_balance_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_storage_value[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x60b01e4ace6086973fafdac8cab9cd114c08c9755b0b9894cb4a2a6e4c136fd4",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_storage_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_account[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xd331e205d1f148f873f06bda9adb51245f1f7bb7280ca2274d7cc02c9ade8806",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_account_order[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x909b911f94a6162ef47c46398c22852ebacfad49285bef18029b80cc027aac46",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_account_order.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_nonce_value[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x186280e177e67f5b4de00e9037b64b5eeb27298a82960cc73a0dffa74fd01d4b",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_nonce_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_missing_account[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xd20ae4fc80b9ab084830436f81446fa69cd75e483524fa7b4c686ac1ae726fbf",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_missing_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_code_changes[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xd3784eba180519598f28c99a7702e75ef1f395da4a57522acdae9633a3f6222d",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_code_changes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_storage_reads[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xf4b38f2a673943b71e12d18102fef37769fc50e16ec6729bd1a4aaf21b38dd27",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_storage_reads.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_storage_writes[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xa44a63948175b6311a68138cdd79c04728a7f08f8130273f96d7eae2766235c",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_storage_writes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_balance_changes[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0xb06ec3b0851444b6519080364b182fec83be1a1b084a0445eaea846b2e095443",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_balance_changes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_nonce_changes[fork_Amsterdam-blockchain_test_engine]",
"fixture_hash": "0x558fec2c1d0d7d544804011044c936bc378262d30b42a7c623c63825df54d10c",
"fork": "Amsterdam",
"format": "blockchain_test_engine",
"pre_hash": null,
"json_path": "blockchain_tests_engine/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_nonce_changes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_complex_corruption[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x6cbd5ee299221511528f6916c5e6df0369cebd8dceba4225c8eb85dbd24d93ad",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_complex_corruption.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_duplicate_account[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x684d8557b425f5b73c8bd4573719f5c464519d9783d6b69059381b64415503a8",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_duplicate_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_tx_order[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0xf177592b49c585371cb2129b65ce7d992ce9f443cac65e7db44bb268daaf281b",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_tx_order.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_missing_nonce[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x6602720a2523736926724a6dcb51520fafe13d36fc2c70fffeba8575d2511f57",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_missing_nonce.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_balance_value[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x5b8c21d88d684e37705f5af59acb6ecc5098c6e4526d172f7670e45f236bc3d9",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_balance_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_storage_value[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x85bea9663f1bf3b20cd80e6e1ca7bbf310ebe2da80cf87ff5b171f612fd05ed4",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_storage_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_account[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0xf254712914d6689c42f84940edf1e4eedb621f172f9c5fd25e5a36ca3d7bab57",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_account_order[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x58d6e7bebe449d438cf216123b8850a7d20958a41cc11493807b768d18f1608e",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_account_order.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_nonce_value[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x7e5909fd80c7352b970fde17619b15627c822798ac7dbe712465b484771a754b",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_nonce_value.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py::test_bal_invalid_missing_account[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x52443eb0df938af8ec0f007e8e54c05004a1cf3e04bace7d666b44c2990779b8",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists_invalid/bal_invalid_missing_account.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_code_changes[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x47353aad1d1868a5a1d7acc4251cd26bef11ac90ff64c0ba63f19800224674c9",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_code_changes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_storage_reads[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x89cf1419f76bd33195b33bd5b3012a8216f6c86207f378cecb9e461be2432a9a",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_storage_reads.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_storage_writes[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0x4b103ea30781985f7e1e8bde58b5957499348aeed901f46088ba6d7c3eb9842a",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_storage_writes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_balance_changes[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0xbb44eb925afd3e3ea06255284950d73820364c366d475a5dc6d040fc8624daa9",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_balance_changes.json"
},
{
"id": "tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists.py::test_bal_nonce_changes[fork_Amsterdam-blockchain_test]",
"fixture_hash": "0xb23fd1678ba8f66079f4dd4b3481817f3322562cc3c60aa4d1278d932cbf1165",
"fork": "Amsterdam",
"format": "blockchain_test",
"pre_hash": null,
"json_path": "blockchain_tests/amsterdam/eip7928_block_level_access_lists/block_access_lists/bal_nonce_changes.json"
}
]
}

View file

@ -38,6 +38,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
var enc btHeader
enc.Bloom = b.Bloom
@ -61,6 +62,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas)
enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot
enc.BlockAccessListHash = b.BlockAccessListHash
return json.Marshal(&enc)
}
@ -88,6 +90,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
var dec btHeader
if err := json.Unmarshal(input, &dec); err != nil {
@ -156,5 +159,8 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil {
b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
if dec.BlockAccessListHash != nil {
b.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -493,6 +493,38 @@ var Forks = map[string]*params.ChainConfig{
BPO1: bpo1BlobConfig,
},
},
"Amsterdam": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(0),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"OsakaToBPO1AtTime15k": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),

View file

@ -210,6 +210,29 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil
}
// UpdateStorageBatch attempts to update a list storages in the batch manner.
func (t *StateTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
var (
hkeys = make([][]byte, 0, len(keys))
evals = make([][]byte, 0, len(values))
)
for _, key := range keys {
hk := crypto.Keccak256(key)
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = key
}
hkeys = append(hkeys, hk)
}
for _, val := range values {
data, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
evals = append(evals, data)
}
return t.trie.UpdateBatch(hkeys, evals)
}
// UpdateAccount will abstract the write of an account to the secure trie.
func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := crypto.Keccak256(address.Bytes())
@ -226,6 +249,29 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
func (t *StateTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
var (
hkeys = make([][]byte, 0, len(addresses))
values = make([][]byte, 0, len(accounts))
)
for _, addr := range addresses {
hk := crypto.Keccak256(addr.Bytes())
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = addr.Bytes()
}
hkeys = append(hkeys, hk)
}
for _, acc := range accounts {
data, err := rlp.EncodeToBytes(acc)
if err != nil {
return err
}
values = append(values, data)
}
return t.trie.UpdateBatch(hkeys, values)
}
func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil
}

View file

@ -33,12 +33,10 @@ import (
// while the latter is inserted/deleted in order to follow the rule of trie.
// This tool can track all of them no matter the node is embedded in its
// parent or not, but valueNode is never tracked.
//
// Note opTracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type opTracer struct {
inserts map[string]struct{}
deletes map[string]struct{}
lock sync.RWMutex
}
// newOpTracer initializes the tracer for capturing trie changes.
@ -53,6 +51,9 @@ func newOpTracer() *opTracer {
// in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched".
func (t *opTracer) onInsert(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path))
return
@ -64,6 +65,9 @@ func (t *opTracer) onInsert(path []byte) {
// in the addition set, then just wipe it from the addition set
// as it's untouched.
func (t *opTracer) onDelete(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path))
return
@ -73,12 +77,18 @@ func (t *opTracer) onDelete(path []byte) {
// reset clears the content tracked by tracer.
func (t *opTracer) reset() {
t.lock.Lock()
defer t.lock.Unlock()
clear(t.inserts)
clear(t.deletes)
}
// copy returns a deep copied tracer instance.
func (t *opTracer) copy() *opTracer {
t.lock.RLock()
defer t.lock.RUnlock()
return &opTracer{
inserts: maps.Clone(t.inserts),
deletes: maps.Clone(t.deletes),
@ -87,6 +97,9 @@ func (t *opTracer) copy() *opTracer {
// deletedList returns a list of node paths which are deleted from the trie.
func (t *opTracer) deletedList() [][]byte {
t.lock.RLock()
defer t.lock.RUnlock()
paths := make([][]byte, 0, len(t.deletes))
for path := range t.deletes {
paths = append(paths, []byte(path))

View file

@ -45,6 +45,14 @@ func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *Transiti
}
}
func (t *TransitionTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *TransitionTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// Base returns the base trie.
func (t *TransitionTrie) Base() *SecureTrie {
return t.base

Some files were not shown because too many files have changed in this diff Show more