all: all block access list changes up through devnet 1 including perf changes

This commit is contained in:
Jared Wasinger 2025-09-29 14:44:19 +08:00
parent 181a3ae9e0
commit 4c36eaf532
77 changed files with 4637 additions and 1098 deletions

View file

@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
)
var _ = (*executableDataMarshaling)(nil)
@ -17,23 +18,24 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@ -58,29 +60,31 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.BlockAccessList = e.BlockAccessList
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@ -154,5 +158,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.BlockAccessList != nil {
e.BlockAccessList = dec.BlockAccessList
}
return nil
}

View file

@ -18,6 +18,7 @@ package engine
import (
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"slices"
@ -50,6 +51,7 @@ var (
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
// fields: blobGasUsed and excessBlobGas.
PayloadV3 PayloadVersion = 0x3
PayloadV4 PayloadVersion = 0x4
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
@ -73,23 +75,24 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
}
// JSON type overrides for executableData.
@ -292,54 +295,62 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
requestsHash = &h
}
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient,
Root: data.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number),
GasLimit: data.GasLimit,
GasUsed: data.GasUsed,
Time: data.Timestamp,
BaseFee: data.BaseFeePerGas,
Extra: data.ExtraData,
MixDigest: data.Random,
WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
var blockAccessListHash *common.Hash
body := types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}
if data.BlockAccessList != nil {
body.AccessList = data.BlockAccessList
balHash := data.BlockAccessList.Hash()
blockAccessListHash = &balHash
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
nil
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient,
Root: data.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number),
GasLimit: data.GasLimit,
GasUsed: data.GasUsed,
Time: data.Timestamp,
BaseFee: data.BaseFeePerGas,
Extra: data.ExtraData,
MixDigest: data.Random,
WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
BlockAccessListHash: blockAccessListHash,
}
return types.NewBlockWithHeader(header).WithBody(body), nil
}
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
BlockAccessList: block.Body().AccessList,
}
// Add blobs.

View file

@ -5,6 +5,11 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
# version:spec-tests-bal v3.0.1
# https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/bal%40v3.0.1
57d0f109f0557ec33d6ecd6cbd77b55415a658aefe583c4035157e1021ae512a fixtures_bal.tar.gz
# version:golang 1.25.1
# https://go.dev/dl/
d010c109cee94d80efe681eab46bdea491ac906bf46583c32e9f0dbb0bd1a594 go1.25.1.src.tar.gz

View file

@ -174,6 +174,9 @@ var (
// This is where the tests should be unpacked.
executionSpecTestsDir = "tests/spec-tests"
// This is where the bal-specific release of the tests should be unpacked.
executionSpecTestsBALDir = "tests/spec-tests-bal"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -382,6 +385,7 @@ func doTest(cmdline []string) {
// Get test fixtures.
if !*short {
downloadSpecTestFixtures(csdb, *cachedir)
downloadBALSpecTestFixtures(csdb, *cachedir)
}
// Configure the toolchain.
@ -447,6 +451,19 @@ func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string
return filepath.Join(cachedir, base)
}
func downloadBALSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string {
ext := ".tar.gz"
base := "fixtures_bal"
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFileFromKnownURL(archivePath); err != nil {
log.Fatal(err)
}
if err := build.ExtractArchive(archivePath, executionSpecTestsBALDir); err != nil {
log.Fatal(err)
}
return filepath.Join(cachedir, base)
}
// doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree.
func doCheckGenerate() {

View file

@ -20,6 +20,7 @@ import (
"bufio"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/cmd/utils"
"maps"
"os"
"regexp"
@ -117,7 +118,7 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
test := tests[name]
result := &testResult{Name: name, Pass: true}
var finalRoot *common.Hash
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), ctx.Bool(utils.ExperimentalBALFlag.Name), tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil {
result.State = dump(s)

View file

@ -160,6 +160,7 @@ var (
utils.BeaconCheckpointFlag,
utils.BeaconCheckpointFileFlag,
utils.LogSlowBlockFlag,
utils.ExperimentalBALFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{

View file

@ -1042,6 +1042,14 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: metrics.DefaultConfig.InfluxDBOrganization,
Category: flags.MetricsCategory,
}
// Block Access List flags
ExperimentalBALFlag = &cli.BoolFlag{
Name: "experimental.bal",
Usage: "Enable generation of EIP-7928 block access lists when importing post-Cancun blocks which lack them. When this flag is specified, importing blocks containing access lists triggers validation of their correctness and execution based off them. The header block access list field is not set with blocks created when this flag is specified, nor is it validated when importing blocks that contain access lists. This is used for development purposes only. Do not enable it otherwise.",
Category: flags.MiscCategory,
}
)
var (
@ -1982,6 +1990,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
}
}
cfg.ExperimentalBAL = ctx.Bool(ExperimentalBALFlag.Name)
}
// MakeBeaconLightConfig constructs a beacon light client config based on the
@ -2391,6 +2401,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
options.VmConfig = vmcfg
options.EnableBALForTesting = ctx.Bool(ExperimentalBALFlag.Name)
chain, err := core.NewBlockChain(chainDb, gspec, engine, options)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)

View file

@ -272,6 +272,9 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return err
}
}
if chain.Config().IsAmsterdam(header.Number, header.Time) && header.BlockAccessListHash == nil {
return fmt.Errorf("block access list hash must be set post-Amsterdam")
}
return nil
}
@ -333,6 +336,9 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
}
// Withdrawals processing.
for _, w := range body.Withdrawals {
// always read the target account regardless of withdrawal amt to include it in the BAL
state.GetBalance(w.Address)
// Convert amount from gwei to wei.
amount := new(uint256.Int).SetUint64(w.Amount)
amount = amount.Mul(amount, uint256.NewInt(params.GWei))
@ -343,9 +349,9 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalization func()) (*types.Block, error) {
if !beacon.IsPoSHeader(header) {
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts)
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts, onFinalization)
}
shanghai := chain.Config().IsShanghai(header.Number, header.Time)
if shanghai {
@ -358,12 +364,17 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
return nil, errors.New("withdrawals set before Shanghai activation")
}
}
// Finalize and assemble the block.
beacon.Finalize(chain, header, state, body)
// Assign the final state root to header.
header.Root = state.IntermediateRoot(true)
if onFinalization != nil {
onFinalization()
}
// Assemble the final block.
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -579,7 +579,7 @@ func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalize func()) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("clique does not support withdrawals")
}
@ -589,6 +589,10 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalize != nil {
onFinalize()
}
// Assemble and return the final block for sealing.
return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -92,7 +92,7 @@ type Engine interface {
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error)
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalization func()) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.

View file

@ -511,7 +511,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalize func()) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("ethash does not support withdrawals")
}
@ -521,6 +521,9 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalize != nil {
onFinalize()
}
// Header seems complete, assemble into a block and return
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -69,6 +69,8 @@ func latestBlobConfig(cfg *params.ChainConfig, time uint64) *BlobConfig {
bc = s.BPO4
case cfg.IsBPO3(london, time) && s.BPO3 != nil:
bc = s.BPO3
case cfg.IsAmsterdam(london, time) && s.Amsterdam != nil:
bc = s.BPO2
case cfg.IsBPO2(london, time) && s.BPO2 != nil:
bc = s.BPO2
case cfg.IsBPO1(london, time) && s.BPO1 != nil:

View file

@ -0,0 +1,131 @@
package core
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/holiman/uint256"
"math/big"
)
// BlockAccessListTracer is a tracer which gathers state accesses/mutations
// from the execution of a block. It is used for constructing and verifying
// EIP-7928 block access lists.
type BlockAccessListTracer struct {
builder *bal.AccessListBuilder
// the access list index that changes are currently being recorded into
balIdx uint16
// the number of system calls that have been invoked, used when building
// an access list to determine if the system calls being executed are
// before/after the block transactions.
sysCallCount int
// true if the tracer is processing posttx state changes. in this case
// we won't record the final index after the end of the second posttx
// system contract but after the finalization of the block.
// This is because we have EIP4895 withdrawals which are processed after the
// last system contracts execute and must be included in the BAL.
isPostTx bool
}
// NewBlockAccessListTracer returns an BlockAccessListTracer and a set of hooks
func NewBlockAccessListTracer() (*BlockAccessListTracer, *tracing.Hooks) {
balTracer := &BlockAccessListTracer{
builder: bal.NewAccessListBuilder(),
}
hooks := &tracing.Hooks{
OnBlockFinalization: balTracer.OnBlockFinalization,
OnTxEnd: balTracer.TxEndHook,
OnTxStart: balTracer.TxStartHook,
OnEnter: balTracer.OnEnter,
OnExit: balTracer.OnExit,
OnCodeChangeV2: balTracer.OnCodeChange,
OnBalanceChange: balTracer.OnBalanceChange,
OnNonceChangeV2: balTracer.OnNonceChange,
OnStorageChange: balTracer.OnStorageChange,
OnStorageRead: balTracer.OnStorageRead,
OnAccountRead: balTracer.OnAcountRead,
OnSelfDestructChange: balTracer.OnSelfDestruct,
OnSystemCallEnd: balTracer.OnSystemCallEnd,
}
wrappedHooks, _ := tracing.WrapWithJournal(hooks)
return balTracer, wrappedHooks
}
func (a *BlockAccessListTracer) SetPostTx() {
a.isPostTx = true
}
// AccessList returns the constructed access list.
// It is assumed that this is only called after all the block state changes
// have been executed and the block has been finalized.
func (a *BlockAccessListTracer) AccessList() *bal.ConstructionBlockAccessList {
return &a.builder.FinalizedAccesses
}
func (a *BlockAccessListTracer) OnSystemCallEnd() {
// finalize the post-block changes in OnBlockFinalization to account for
// the EIP-4895 withdrawals which occur after the last system contracts
// are executed.
if a.isPostTx {
return
}
a.sysCallCount++
if a.sysCallCount == 2 {
a.builder.FinaliseIdxChanges(a.balIdx)
a.balIdx++
}
}
func (a *BlockAccessListTracer) TxStartHook(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
a.builder.EnterTx(tx.Hash())
}
func (a *BlockAccessListTracer) TxEndHook(receipt *types.Receipt, err error) {
a.builder.FinaliseIdxChanges(a.balIdx)
a.balIdx++
}
func (a *BlockAccessListTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
a.builder.EnterScope()
}
func (a *BlockAccessListTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
a.builder.ExitScope(reverted)
}
func (a *BlockAccessListTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) {
a.builder.CodeChange(addr, prevCode, code)
}
func (a *BlockAccessListTracer) OnSelfDestruct(addr common.Address) {
a.builder.SelfDestruct(addr)
}
func (a *BlockAccessListTracer) OnBlockFinalization() {
a.builder.FinaliseIdxChanges(a.balIdx)
}
func (a *BlockAccessListTracer) OnBalanceChange(addr common.Address, prevBalance, newBalance *big.Int, _ tracing.BalanceChangeReason) {
newU256 := new(uint256.Int).SetBytes(newBalance.Bytes())
prevU256 := new(uint256.Int).SetBytes(prevBalance.Bytes())
a.builder.BalanceChange(addr, prevU256, newU256)
}
func (a *BlockAccessListTracer) OnNonceChange(addr common.Address, prev uint64, new uint64, reason tracing.NonceChangeReason) {
a.builder.NonceChange(addr, prev, new)
}
func (a *BlockAccessListTracer) OnStorageRead(addr common.Address, key common.Hash) {
a.builder.StorageRead(addr, key)
}
func (a *BlockAccessListTracer) OnAcountRead(addr common.Address) {
a.builder.AccountRead(addr)
}
func (a *BlockAccessListTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
a.builder.StorageWrite(addr, slot, prev, new)
}

View file

@ -19,7 +19,6 @@ package core
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@ -111,6 +110,36 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
}
// block access lists must be present after the Amsterdam hard fork
if v.config.IsAmsterdam(block.Number(), block.Time()) {
if block.Header().BlockAccessListHash == nil {
// TODO: verify that this check isn't also done elsewhere
return fmt.Errorf("block access list hash not set in header")
}
if block.Body().AccessList != nil {
if *block.Header().BlockAccessListHash != block.Body().AccessList.Hash() {
return fmt.Errorf("access list hash mismatch. local: %x. remote: %x\n", block.Body().AccessList.Hash(), *block.Header().BlockAccessListHash)
} else if err := block.Body().AccessList.Validate(len(block.Transactions())); err != nil {
return fmt.Errorf("invalid block access list: %v", err)
}
}
} else if v.bc.cfg.EnableBALForTesting {
// If experimental.bal is enabled, the BAL hash is not allowed in the header
// but can optionally be in the body.
// This is in order that Geth can import preexisting chains augmented with BALs
// and not have a hash mismatch.
if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed preamsterdam")
}
} else {
// if experimental.bal is not enabled, block headers cannot have access list hash and bodies cannot have access lists.
if block.Body().AccessList != nil {
return fmt.Errorf("access list not allowed in block body if not in amsterdam or experimental.bal is set")
} else if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed when experimental.bal is set")
}
}
// Ancestor block must be known.
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
@ -123,7 +152,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
func (v *BlockValidator) ValidateState(block *types.Block, stateTransition state.BlockStateTransition, res *ProcessResult, stateless bool) error {
if res == nil {
return errors.New("nil ProcessResult value")
}
@ -160,10 +189,11 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} else if res.Requests != nil {
return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
if root := stateTransition.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, stateTransition.Error())
}
return nil
}

View file

@ -101,6 +101,21 @@ var (
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
// BALspecific timers
blockPreprocessingTimer = metrics.NewRegisteredResettingTimer("chain/preprocess", nil)
txExecutionTimer = metrics.NewRegisteredResettingTimer("chain/txexecution", nil)
stateTrieHashTimer = metrics.NewRegisteredResettingTimer("chain/statetriehash", nil)
accountTriesUpdateTimer = metrics.NewRegisteredResettingTimer("chain/accounttriesupdate", nil)
stateTriePrefetchTimer = metrics.NewRegisteredResettingTimer("chain/statetrieprefetch", nil)
stateTrieUpdateTimer = metrics.NewRegisteredResettingTimer("chain/statetrieupdate", nil)
originStorageLoadTimer = metrics.NewRegisteredResettingTimer("chain/originstorageload", nil)
stateRootComputeTimer = metrics.NewRegisteredResettingTimer("chain/staterootcompute", nil)
stateCommitTimer = metrics.NewRegisteredResettingTimer("chain/statetriecommit", nil)
blockPostprocessingTimer = metrics.NewRegisteredResettingTimer("chain/postprocess", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
@ -216,6 +231,11 @@ type BlockChainConfig struct {
// SlowBlockThreshold is the block execution time threshold beyond which
// detailed statistics will be logged.
SlowBlockThreshold time.Duration
// If EnableBALForTesting is enabled, block access lists will be created
// from block execution and embedded in the body. The block access list
// hash will not be set in the header.
EnableBALForTesting bool
}
// DefaultConfig returns the default config.
@ -354,12 +374,13 @@ type BlockChain struct {
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
parallelProcessor ParallelStateProcessor
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
slowBlockThreshold time.Duration // Block execution time threshold beyond which detailed statistics will be logged
@ -568,6 +589,104 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
}
return bc, nil
}
func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *types.Block, setHead bool) (procRes *blockProcessingResult, blockEndErr error) {
var (
startTime = time.Now()
procTime time.Duration
)
reader, err := bc.statedb.Reader(parentRoot)
if err != nil {
return nil, err
}
stateReader := state.NewBALReader(block, reader)
stateTransition, err := state.NewBALStateTransition(stateReader, bc.statedb, parentRoot)
if err != nil {
return nil, err
}
statedb, err := state.New(parentRoot, bc.statedb)
if err != nil {
return nil, err
}
statedb.SetBlockAccessList(stateReader)
if bc.logger != nil && bc.logger.OnBlockStart != nil {
bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block,
Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(),
})
}
if bc.logger != nil && bc.logger.OnBlockEnd != nil {
defer func() {
bc.logger.OnBlockEnd(blockEndErr)
}()
}
res, err := bc.parallelProcessor.Process(block, stateTransition, statedb, bc.cfg.VmConfig)
if err != nil {
return nil, err
}
if err := bc.validator.ValidateState(block, stateTransition, res.ProcessResult, false); err != nil {
return nil, err
}
procTime = time.Since(startTime)
// Write the block to the chain and get the status.
var (
//wstart = time.Now()
status WriteStatus
)
if !setHead {
// Don't set the head, only insert the block
err = bc.writeBlockWithState(block, res.ProcessResult.Receipts, stateTransition)
} else {
status, err = bc.writeBlockAndSetHead(block, res.ProcessResult.Receipts, res.ProcessResult.Logs, stateTransition, false)
}
if err != nil {
return nil, err
}
// Update the metrics touched during block commit
accountCommitTimer.Update(stateTransition.Metrics().AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(stateTransition.Metrics().StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(stateTransition.Metrics().SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(stateTransition.Metrics().TrieDBCommits) // Trie database commits are complete, we can mark them
// blockWriteTimer.Update(time.Since(wstart + max(stateTransition.Metrics().AccountCommits, stateTransition.Metrics().StorageCommits) /* concurrent */ statedb.SnapshotCommits + statedb.TrieDBCommits))
elapsed := time.Since(startTime) + 1 // prevent zero division
blockInsertTimer.Update(elapsed)
// TODO(rjl493456442) generalize the ResettingTimer
mgasps := float64(res.ProcessResult.GasUsed) * 1000 / float64(elapsed)
chainMgaspsMeter.Update(time.Duration(mgasps))
blockPreprocessingTimer.Update(res.PreProcessTime)
txExecutionTimer.Update(res.ExecTime)
// update the metrics from the block state root update
stateTriePrefetchTimer.Update(res.StateTransitionMetrics.StatePrefetch)
accountTriesUpdateTimer.Update(res.StateTransitionMetrics.AccountUpdate)
stateTrieUpdateTimer.Update(res.StateTransitionMetrics.StateUpdate)
stateTrieHashTimer.Update(res.StateTransitionMetrics.StateHash)
stateRootComputeTimer.Update(res.StateTransitionMetrics.AccountUpdate + res.StateTransitionMetrics.StateUpdate + res.StateTransitionMetrics.StateHash)
originStorageLoadTimer.Update(res.StateTransitionMetrics.OriginStorageLoadTime)
stateCommitTimer.Update(res.StateTransitionMetrics.TotalCommitTime)
blockPostprocessingTimer.Update(res.PostProcessTime)
return &blockProcessingResult{
usedGas: res.ProcessResult.GasUsed,
procTime: procTime,
status: status,
witness: nil,
stats: &ExecuteStats{}, // TODO: actually implement this in the future
}, nil
}
func (bc *BlockChain) setupSnapshot() {
// Short circuit if the chain is established with path scheme, as the
@ -1638,7 +1757,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the
// database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, transition state.BlockStateTransition) error {
if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
@ -1652,7 +1771,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
)
rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(batch, statedb.Preimages())
rawdb.WritePreimages(batch, transition.Preimages())
if err := batch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
@ -1667,7 +1786,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
hasStateSizer = bc.stateSizer != nil
)
if hasStateHook || hasStateSizer {
r, update, err := statedb.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun)
r, update, err := transition.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun)
if err != nil {
return err
}
@ -1683,7 +1802,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
root = r
} else {
root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun)
root, err = transition.Commit(block.NumberU64(), isEIP158, isCancun)
if err != nil {
return err
}
@ -1750,7 +1869,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state state.BlockStateTransition, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err
}
@ -1987,6 +2106,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
}
// The traced section of block import.
start := time.Now()
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
if err != nil {
return nil, it.index, err
@ -2073,6 +2193,33 @@ func (bpr *blockProcessingResult) Stats() *ExecuteStats {
// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) {
var constructBALForTesting bool
enableBALFork := bc.chainConfig.IsAmsterdam(block.Number(), block.Time())
if enableBALFork || !bc.chainConfig.IsCancun(block.Number(), block.Time()) {
// disable testmode construction of BALs if we are not in the range [cancun, amsterdam)
constructBALForTesting = false
}
// TODO: need to check that the block is also postcancun if it contained an access list?
// this should be checked during decoding (?)
blockHasAccessList := block.Body().AccessList != nil
// only construct and embed BALs in the block if:
// * it has been enabled for testing purposes (preAmsterdam/postCancun blocks with experimental.bal)
// * we are after Amsterdam and the block was provided with bal omitted
// (importing any historical block not near the chain head)
constructBAL := constructBALForTesting || (enableBALFork && !blockHasAccessList)
// do not verify the integrity of the BAL hash wrt the headerreported value
// for any nonAmsterdam blocks: if the block being imported has been created
// via experimental.bal, the block access list hash is unset in the header
// to keep the block hash unchanged (allow for importing historical blocks
// with BALs for testing purposes).
verifyBALHeader := enableBALFork
// optimized execution path for blocks which contain BALs
if blockHasAccessList {
panic("TODO: strip bal from body before committing it to disk")
return bc.processBlockWithAccessList(parentRoot, block, setHead)
}
var (
err error
startTime = time.Now()
@ -2083,6 +2230,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
if bc.cfg.NoPrefetch {
statedb, err = state.New(parentRoot, bc.statedb)
if err != nil {
return nil, err
}
@ -2128,7 +2276,10 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
// Disable tracing for prefetcher executions.
vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
if block.Body().AccessList == nil {
// only use the state prefetcher for non-BAL blocks.
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
}
blockPrefetchExecuteTimer.Update(time.Since(start))
if interrupt.Load() {
@ -2157,6 +2308,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
witnessStats = stateless.NewWitnessStats()
}
}
statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher()
}
@ -2174,21 +2326,81 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
}()
}
var res *ProcessResult
var ptime, vtime time.Duration
// BAL Tracer used for creating BALs in ProcessBlock in testing path only
var balTracer *BlockAccessListTracer
// Process block using the parent state as reference point
if enableBALFork {
balTracer, bc.cfg.VmConfig.Tracer = NewBlockAccessListTracer()
defer func() {
bc.cfg.VmConfig.Tracer = nil
}()
}
// Process block using the parent state as reference point
pstart := time.Now()
res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
res, err = bc.processor.Process(block, statedb, bc.cfg.VmConfig)
if err != nil {
bc.reportBadBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)
ptime = time.Since(pstart)
if enableBALFork {
balTracer.OnBlockFinalization()
}
// unset the BAL-creation tracer (dirty)
bc.cfg.VmConfig.Tracer = nil
vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
bc.reportBadBlock(block, res, err)
return nil, err
}
vtime := time.Since(vstart)
vtime = time.Since(vstart)
if constructBAL {
if verifyBALHeader && *block.Header().BlockAccessListHash != balTracer.AccessList().ToEncodingObj().Hash() {
err := fmt.Errorf("block access list hash mismatch (reported=%x, computed=%x)", *block.Header().BlockAccessListHash, balTracer.AccessList().ToEncodingObj().Hash())
bc.reportBadBlock(block, res, err)
return nil, err
}
// very ugly... deepcopy the block body before setting the block access
// list on it to prevent mutating the block instance passed by the caller.
existingBody := block.Body()
block = block.WithBody(*existingBody)
existingBody = block.Body()
existingBody.AccessList = balTracer.AccessList().ToEncodingObj()
block = block.WithBody(*existingBody)
} else if enableBALFork {
computedAccessList := balTracer.AccessList().ToEncodingObj()
computedAccessListHash := computedAccessList.Hash()
if *block.Header().BlockAccessListHash != computedAccessListHash {
//fmt.Printf("remote:\n%s\nlocal:\n%s\n", block.Body().AccessList.JSONString(), computedAccessList.JSONString())
err := fmt.Errorf("block header access list hash mismatch with computed (header=%x computed=%x)", *block.Header().BlockAccessListHash, computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
if block.Body().AccessList == nil {
// very ugly... deep copy the block body before setting the block access
// list on it to prevent mutating the block instance passed by the caller.
existingBody := block.Body()
block = block.WithBody(*existingBody)
existingBody = block.Body()
existingBody.AccessList = computedAccessList
block = block.WithBody(*existingBody)
} else if block.Body().AccessList.Hash() != computedAccessListHash {
err := fmt.Errorf("block access list hash mismatch (remote=%x computed=%x)", block.Body().AccessList.Hash(), computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
}
// If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of
@ -2775,6 +2987,10 @@ func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err
log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
}
func (bc *BlockChain) reportBALBlock(block *types.Block, res *ProcessResult, err error) {
}
// logForkReadiness will write a log when a future fork is scheduled, but not
// active. This is useful so operators know their client is ready for the fork.
func (bc *BlockChain) logForkReadiness(block *types.Block) {

View file

@ -165,7 +165,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.reportBadBlock(block, res, err)
return err
}
err = blockchain.validator.ValidateState(block, statedb, res, false)
err = blockchain.validator.ValidateState(block, statedb, res, true, false)
if err != nil {
blockchain.reportBadBlock(block, res, err)
return err

View file

@ -409,7 +409,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts, nil)
if err != nil {
panic(err)
}

View file

@ -5,6 +5,7 @@ package core
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -19,21 +20,22 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var enc Genesis
enc.Config = g.Config
@ -56,27 +58,29 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
enc.BlockAccessListHash = g.BlockAccessListHash
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@ -133,5 +137,9 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
fmt.Printf("dec al hash is %v\n", dec.BlockAccessListHash)
if dec.BlockAccessListHash != nil {
g.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -67,12 +67,13 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"` // EIP-7928
}
// copy copies the genesis.
@ -122,6 +123,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
genesis.BaseFee = genesisHeader.BaseFee
genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas
genesis.BlobGasUsed = genesisHeader.BlobGasUsed
genesis.BlockAccessListHash = genesisHeader.BlockAccessListHash
return &genesis, nil
}
@ -485,18 +487,19 @@ func (g *Genesis) ToBlock() *types.Block {
// toBlockWithRoot constructs the genesis block with the given genesis state root.
func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
Root: root,
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
BlockAccessListHash: g.BlockAccessListHash,
Root: root,
}
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit

View file

@ -0,0 +1,358 @@
package core
import (
"cmp"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/sync/errgroup"
"runtime"
"slices"
"time"
)
// ProcessResultWithMetrics wraps ProcessResult with some metrics that are
// emitted when executing blocks containing access lists.
type ProcessResultWithMetrics struct {
ProcessResult *ProcessResult
PreProcessTime time.Duration
StateTransitionMetrics *state.BALStateTransitionMetrics
// the time it took to execute all txs in the block
ExecTime time.Duration
PostProcessTime time.Duration
}
// ParallelStateProcessor is used to execute and verify blocks containing
// access lists.
type ParallelStateProcessor struct {
*StateProcessor
vmCfg *vm.Config
}
// NewParallelStateProcessor returns a new ParallelStateProcessor instance.
func NewParallelStateProcessor(chain *HeaderChain, vmConfig *vm.Config) ParallelStateProcessor {
res := NewStateProcessor(chain)
return ParallelStateProcessor{
res,
vmConfig,
}
}
// called by resultHandler when all transactions have successfully executed.
// performs post-tx state transition (system contracts and withdrawals)
// and calculates the ProcessResult, returning it to be sent on resCh
// by resultHandler
func (p *ParallelStateProcessor) prepareExecResult(block *types.Block, allStateReads *bal.StateAccesses, tExecStart time.Time, postTxState *state.StateDB, receipts types.Receipts) *ProcessResultWithMetrics {
tExec := time.Since(tExecStart)
var requests [][]byte
tPostprocessStart := time.Now()
header := block.Header()
balTracer, hooks := NewBlockAccessListTracer()
balTracer.SetPostTx()
tracingStateDB := state.NewHookedState(postTxState, hooks)
context := NewEVMBlockContext(header, p.chain, nil)
lastBALIdx := len(block.Transactions()) + 1
postTxState.SetAccessListIndex(lastBALIdx)
cfg := vm.Config{
Tracer: hooks,
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
cfg.Tracer = hooks
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
// 1. order the receipts by tx index
// 2. correctly calculate the cumulative gas used per receipt, returning bad block error if it goes over the allowed
slices.SortFunc(receipts, func(a, b *types.Receipt) int {
return cmp.Compare(a.TransactionIndex, b.TransactionIndex)
})
var cumulativeGasUsed uint64
var allLogs []*types.Log
for _, receipt := range receipts {
receipt.CumulativeGasUsed = cumulativeGasUsed + receipt.GasUsed
cumulativeGasUsed += receipt.GasUsed
if receipt.CumulativeGasUsed > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
allLogs = append(allLogs, receipt.Logs...)
}
// Read requests if Prague is enabled.
if p.chainConfig().IsPrague(block.Number(), block.Time()) {
requests = [][]byte{}
// EIP-6110
if err := ParseDepositLogs(&requests, allLogs, p.chainConfig()); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7002
err := ProcessWithdrawalQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7251
err = ProcessConsolidationQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
// invoke FinaliseIdxChanges so that withdrawals are accounted for in the state diff
postTxState.Finalise(true)
balTracer.OnBlockFinalization()
diff, stateReads := balTracer.builder.FinalizedIdxChanges()
allStateReads.Merge(stateReads)
// TODO: if there is a failure, we need to print out the detailed logs explaining why the BAL validation failed
// but logs are disabled when we are running tests to prevent a ton of output
balIdx := len(block.Transactions()) + 1
if !postTxState.BlockAccessList().ValidateStateDiff(balIdx, diff) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("BAL validation failure")},
}
}
if !postTxState.BlockAccessList().ValidateStateReads(lastBALIdx, *allStateReads) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("BAL validation failure")},
}
}
tPostprocess := time.Since(tPostprocessStart)
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{
Receipts: receipts,
Requests: requests,
Logs: allLogs,
GasUsed: cumulativeGasUsed,
},
PostProcessTime: tPostprocess,
ExecTime: tExec,
}
}
type txExecResult struct {
idx int // transaction index
receipt *types.Receipt
err error // non-EVM error which would render the block invalid
stateReads bal.StateAccesses
}
// resultHandler polls until all transactions have finished executing and the
// state root calculation is complete. The result is emitted on resCh.
func (p *ParallelStateProcessor) resultHandler(block *types.Block, preTxStateReads bal.StateAccesses, postTxState *state.StateDB, tExecStart time.Time, txResCh <-chan txExecResult, stateRootCalcResCh <-chan stateRootCalculationResult, resCh chan *ProcessResultWithMetrics) {
// 1. if the block has transactions, receive the execution results from all of them and return an error on resCh if any txs err'd
// 2. once all txs are executed, compute the post-tx state transition and produce the ProcessResult sending it on resCh (or an error if the post-tx state didn't match what is reported in the BAL)
var receipts []*types.Receipt
gp := new(GasPool)
gp.SetGas(block.GasLimit())
var execErr error
var numTxComplete int
allReads := make(bal.StateAccesses)
allReads.Merge(preTxStateReads)
if len(block.Transactions()) > 0 {
loop:
for {
select {
case res := <-txResCh:
if execErr == nil {
if res.err != nil {
execErr = res.err
} else {
if err := gp.SubGas(res.receipt.GasUsed); err != nil {
execErr = err
} else {
receipts = append(receipts, res.receipt)
allReads.Merge(res.stateReads)
}
}
}
numTxComplete++
if numTxComplete == len(block.Transactions()) {
break loop
}
}
}
if execErr != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: execErr}}
return
}
}
execResults := p.prepareExecResult(block, &allReads, tExecStart, postTxState, receipts)
rootCalcRes := <-stateRootCalcResCh
if execResults.ProcessResult.Error != nil {
resCh <- execResults
} else if rootCalcRes.err != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: rootCalcRes.err}}
} else {
// &{20.39677ms 0s 1.149668ms 735.295µs 0s 0s 0s 0s}
execResults.StateTransitionMetrics = rootCalcRes.metrics
resCh <- execResults
}
}
type stateRootCalculationResult struct {
err error
metrics *state.BALStateTransitionMetrics
root common.Hash
}
// calcAndVerifyRoot performs the post-state root hash calculation, verifying
// it against what is reported by the block and returning a result on resCh.
func (p *ParallelStateProcessor) calcAndVerifyRoot(preState *state.StateDB, block *types.Block, stateTransition *state.BALStateTransition, resCh chan stateRootCalculationResult) {
// calculate and apply the block state modifications
//root, prestateLoadTime, rootCalcTime := preState.BlockAccessList().StateRoot(preState)
root := stateTransition.IntermediateRoot(false)
res := stateRootCalculationResult{
// TODO: I think we can remove the root from this struct
metrics: stateTransition.Metrics(),
}
// TODO: validate state root in block validator?
if root != block.Root() {
res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x", root, block.Root())
}
resCh <- res
}
// execTx executes single transaction returning a result which includes state accessed/modified
func (p *ParallelStateProcessor) execTx(block *types.Block, tx *types.Transaction, txIdx int, db *state.StateDB, signer types.Signer) *txExecResult {
header := block.Header()
balTracer, hooks := NewBlockAccessListTracer()
tracingStateDB := state.NewHookedState(db, hooks)
context := NewEVMBlockContext(header, p.chain, nil)
cfg := vm.Config{
Tracer: hooks,
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
cfg.Tracer = hooks
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
if err != nil {
err = fmt.Errorf("could not apply tx %d [%v]: %w", txIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
gp := new(GasPool)
gp.SetGas(block.GasLimit())
db.SetTxContext(tx.Hash(), txIdx)
var gasUsed uint64
receipt, err := ApplyTransactionWithEVM(msg, gp, db, block.Number(), block.Hash(), context.Time, tx, &gasUsed, evm)
if err != nil {
err := fmt.Errorf("could not apply tx %d [%v]: %w", txIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
diff, accesses := balTracer.builder.FinalizedIdxChanges()
if !db.BlockAccessList().ValidateStateDiff(txIdx+1, diff) {
return &txExecResult{err: fmt.Errorf("bal validation failure")}
}
return &txExecResult{
idx: txIdx,
receipt: receipt,
stateReads: accesses,
}
}
// Process performs EVM execution and state root computation for a block which is known
// to contain an access list.
func (p *ParallelStateProcessor) Process(block *types.Block, stateTransition *state.BALStateTransition, statedb *state.StateDB, cfg vm.Config) (*ProcessResultWithMetrics, error) {
var (
header = block.Header()
resCh = make(chan *ProcessResultWithMetrics)
signer = types.MakeSigner(p.chainConfig(), header.Number, header.Time)
rootCalcResultCh = make(chan stateRootCalculationResult)
context vm.BlockContext
txResCh = make(chan txExecResult)
pStart = time.Now()
tExecStart time.Time
tPreprocess time.Duration // time to create a set of prestates for parallel transaction execution
)
balTracer, hooks := NewBlockAccessListTracer()
tracingStateDB := state.NewHookedState(statedb, hooks)
// TODO: figure out exactly why we need to set the hooks on the TracingStateDB and the vm.Config
cfg.Tracer = hooks
context = NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, tracingStateDB, p.chainConfig(), cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm)
}
if p.chainConfig().IsPrague(block.Number(), block.Time()) || p.chainConfig().IsVerkle(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), evm)
}
diff, stateReads := balTracer.builder.FinalizedIdxChanges()
if !statedb.BlockAccessList().ValidateStateDiff(0, diff) {
return nil, fmt.Errorf("BAL validation failure")
}
// compute the post-tx state prestate (before applying final block system calls and eip-4895 withdrawals)
// the post-tx state transition is verified by resultHandler
postTxState := statedb.Copy()
tPreprocess = time.Since(pStart)
// execute transactions and state root calculation in parallel
tExecStart = time.Now()
go p.resultHandler(block, stateReads, postTxState, tExecStart, txResCh, rootCalcResultCh, resCh)
var workers errgroup.Group
workers.SetLimit(runtime.NumCPU())
startingState := statedb.Copy()
for i, tx := range block.Transactions() {
tx := tx
i := i
workers.Go(func() error {
res := p.execTx(block, tx, i, startingState.Copy(), signer)
txResCh <- *res
return nil
})
}
go p.calcAndVerifyRoot(statedb, block, stateTransition, rootCalcResultCh)
res := <-resCh
if res.ProcessResult.Error != nil {
return nil, res.ProcessResult.Error
}
// TODO: remove preprocess metric ?
res.PreProcessTime = tPreprocess
return res, nil
}

406
core/state/bal_reader.go Normal file
View file

@ -0,0 +1,406 @@
package state
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
"sync"
)
// TODO: probably unnecessary to cache the resolved state object here as it will already be in the db cache?
// ^ experiment with the performance of keeping this as-is vs just using the db cache.
// prestateResolver asynchronously fetches the prestate state accounts of addresses
// which are reported as modified in EIP-7928 access lists in order to produce the full
// updated state account (including fields that weren't modified in the BAL) for the
// state root update
type prestateResolver struct {
inProgress map[common.Address]chan struct{}
resolved sync.Map
ctx context.Context
cancel func()
}
// schedule begins the retrieval of a set of state accounts running on
// a background goroutine.
func (p *prestateResolver) schedule(r Reader, addrs []common.Address) {
p.inProgress = make(map[common.Address]chan struct{})
p.ctx, p.cancel = context.WithCancel(context.Background())
for _, addr := range addrs {
p.inProgress[addr] = make(chan struct{})
}
// TODO: probably we can retrieve these on a single go-routine
// the transaction execution will also load them
for _, addr := range addrs {
resolveAddr := addr
go func() {
select {
case <-p.ctx.Done():
return
default:
}
acct, err := r.Account(resolveAddr)
if err != nil {
// TODO: what do here?
}
p.resolved.Store(resolveAddr, acct)
close(p.inProgress[resolveAddr])
}()
}
}
func (p *prestateResolver) stop() {
p.cancel()
}
// account returns the state account for the given address, blocking if it is
// still being resolved from disk.
func (p *prestateResolver) account(addr common.Address) *types.StateAccount {
if _, ok := p.inProgress[addr]; !ok {
return nil
}
select {
case <-p.inProgress[addr]:
}
res, exist := p.resolved.Load(addr)
if !exist {
return nil
}
return res.(*types.StateAccount)
}
func (r *BALReader) initObjFromDiff(db *StateDB, addr common.Address, a *types.StateAccount, diff *bal.AccountMutations) *stateObject {
var acct *types.StateAccount
if a == nil {
acct = &types.StateAccount{
Nonce: 0,
Balance: uint256.NewInt(0),
Root: types.EmptyRootHash,
CodeHash: types.EmptyCodeHash[:],
}
} else {
acct = a.Copy()
}
if diff == nil {
return newObject(db, addr, acct)
}
if diff.Nonce != nil {
acct.Nonce = *diff.Nonce
}
if diff.Balance != nil {
acct.Balance = new(uint256.Int).Set(diff.Balance)
}
obj := newObject(db, addr, acct)
if diff.Code != nil {
obj.setCode(crypto.Keccak256Hash(diff.Code), diff.Code)
}
if diff.StorageWrites != nil {
for key, val := range diff.StorageWrites {
obj.pendingStorage[common.Hash(key)] = common.Hash(val)
}
}
if obj.empty() {
return nil
}
return obj
}
// BALReader provides methods for reading account state from a block access
// list. State values returned from the Reader methods must not be modified.
type BALReader struct {
block *types.Block
accesses map[common.Address]*bal.AccountAccess
prestateReader prestateResolver
}
// NewBALReader constructs a new reader from an access list. db is expected to have been instantiated with a reader.
func NewBALReader(block *types.Block, reader Reader) *BALReader {
r := &BALReader{accesses: make(map[common.Address]*bal.AccountAccess), block: block}
for _, acctDiff := range *block.Body().AccessList {
r.accesses[acctDiff.Address] = &acctDiff
}
r.prestateReader.schedule(reader, r.ModifiedAccounts())
return r
}
// ModifiedAccounts returns a list of all accounts with mutations in the access list
func (r *BALReader) ModifiedAccounts() (res []common.Address) {
for addr, access := range r.accesses {
if len(access.NonceChanges) != 0 || len(access.CodeChanges) != 0 || len(access.StorageChanges) != 0 || len(access.BalanceChanges) != 0 {
res = append(res, addr)
}
}
return res
}
func logReadsDiff(idx int, address common.Address, computedReads map[common.Hash]struct{}, expectedReads []*bal.EncodedStorage) {
expectedReadsMap := make(map[common.Hash]struct{})
for _, er := range expectedReads {
expectedReadsMap[er.ToHash()] = struct{}{}
}
allReads := make(map[common.Hash]struct{})
for er := range expectedReadsMap {
allReads[er] = struct{}{}
}
for cr := range computedReads {
allReads[cr] = struct{}{}
}
var missingExpected, missingComputed []common.Hash
for storage := range allReads {
_, hasComputed := computedReads[storage]
_, hasExpected := expectedReadsMap[storage]
if hasComputed && !hasExpected {
missingExpected = append(missingExpected, storage)
}
if !hasComputed && hasExpected {
missingComputed = append(missingComputed, storage)
}
}
if len(missingExpected) > 0 {
log.Error("read storage slots which were not reported in the BAL", "index", idx, "address", address, missingExpected)
}
if len(missingComputed) > 0 {
log.Error("did not read storage slots which were reported in the BAL", "index", idx, "address", address, missingComputed)
}
}
func (r *BALReader) ValidateStateReads(idx int, computedReads bal.StateAccesses) bool {
// 1. remove any slots from 'allReads' which were written
// 2. validate that the read set in the BAL matches 'allReads' exactly
for addr, reads := range computedReads {
balAcctDiff := r.readAccountDiff(addr, len(r.block.Transactions())+2)
if balAcctDiff != nil {
for writeSlot := range balAcctDiff.StorageWrites {
delete(reads, writeSlot)
}
}
if _, ok := r.accesses[addr]; !ok {
log.Error(fmt.Sprintf("account %x was accessed during execution but is not present in the access list", addr))
return false
}
expectedReads := r.accesses[addr].StorageReads
if len(reads) != len(expectedReads) {
logReadsDiff(idx, addr, reads, expectedReads)
return false
}
for _, slot := range expectedReads {
if _, ok := reads[slot.ToHash()]; !ok {
log.Error("expected read is missing from BAL")
return false
}
}
}
return true
}
// changesAt returns all state changes occurring at the given index.
func (r *BALReader) changesAt(idx int) *bal.StateDiff {
res := &bal.StateDiff{make(map[common.Address]*bal.AccountMutations)}
for addr, _ := range r.accesses {
accountChanges := r.accountChangesAt(addr, idx)
if accountChanges != nil {
res.Mutations[addr] = accountChanges
}
}
return res
}
// accountChangesAt returns the state changes of an account at a given index,
// or nil if there are no changes.
func (r *BALReader) accountChangesAt(addr common.Address, idx int) *bal.AccountMutations {
acct, exist := r.accesses[addr]
if !exist {
return nil
}
var res bal.AccountMutations
// TODO: remove the reverse iteration here to clean the code up
for i := len(acct.BalanceChanges) - 1; i >= 0; i-- {
if acct.BalanceChanges[i].TxIdx == uint16(idx) {
res.Balance = acct.BalanceChanges[i].Balance
}
if acct.BalanceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.CodeChanges) - 1; i >= 0; i-- {
if acct.CodeChanges[i].TxIdx == uint16(idx) {
res.Code = acct.CodeChanges[i].Code
break
}
if acct.CodeChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.NonceChanges) - 1; i >= 0; i-- {
if acct.NonceChanges[i].TxIdx == uint16(idx) {
res.Nonce = &acct.NonceChanges[i].Nonce
break
}
if acct.NonceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.StorageChanges) - 1; i >= 0; i-- {
if res.StorageWrites == nil {
res.StorageWrites = make(map[common.Hash]common.Hash)
}
slotWrites := acct.StorageChanges[i]
for j := len(slotWrites.Accesses) - 1; j >= 0; j-- {
if slotWrites.Accesses[j].TxIdx == uint16(idx) {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[j].ValueAfter.ToHash()
break
}
if slotWrites.Accesses[j].TxIdx < uint16(idx) {
break
}
}
if len(res.StorageWrites) == 0 {
res.StorageWrites = nil
}
}
if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
return nil
}
return &res
}
func (r *BALReader) isModified(addr common.Address) bool {
access, ok := r.accesses[addr]
if !ok {
return false
}
return len(access.StorageChanges) > 0 || len(access.BalanceChanges) > 0 || len(access.CodeChanges) > 0 || len(access.NonceChanges) > 0
}
func (r *BALReader) readAccount(db *StateDB, addr common.Address, idx int) *stateObject {
diff := r.readAccountDiff(addr, idx)
prestate := r.prestateReader.account(addr)
return r.initObjFromDiff(db, addr, prestate, diff)
}
// readAccountDiff returns the accumulated state changes of an account up
// through, and including the given index.
func (r *BALReader) readAccountDiff(addr common.Address, idx int) *bal.AccountMutations {
diff, exist := r.accesses[addr]
if !exist {
return nil
}
var res bal.AccountMutations
for i := 0; i < len(diff.BalanceChanges) && diff.BalanceChanges[i].TxIdx <= uint16(idx); i++ {
res.Balance = diff.BalanceChanges[i].Balance
}
for i := 0; i < len(diff.CodeChanges) && diff.CodeChanges[i].TxIdx <= uint16(idx); i++ {
res.Code = diff.CodeChanges[i].Code
}
for i := 0; i < len(diff.NonceChanges) && diff.NonceChanges[i].TxIdx <= uint16(idx); i++ {
res.Nonce = &diff.NonceChanges[i].Nonce
}
if len(diff.StorageChanges) > 0 {
res.StorageWrites = make(map[common.Hash]common.Hash)
for _, slotWrites := range diff.StorageChanges {
for i := 0; i < len(slotWrites.Accesses) && slotWrites.Accesses[i].TxIdx <= uint16(idx); i++ {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[i].ValueAfter.ToHash()
}
}
}
return &res
}
func mutationsLogfmt(prefix string, mutations *bal.AccountMutations) (logs []interface{}) {
if mutations.Code != nil {
logs = append(logs, fmt.Sprintf("%s-code", prefix), fmt.Sprintf("%x", mutations.Code))
}
if mutations.Balance != nil {
logs = append(logs, fmt.Sprintf("%s-balance", prefix), mutations.Balance.String())
}
if mutations.Nonce != nil {
logs = append(logs, fmt.Sprintf("%s-nonce", prefix), mutations.Nonce)
}
if mutations.StorageWrites != nil {
for key, val := range mutations.StorageWrites {
logs = append(logs, fmt.Sprintf("%s-storage-write-key"), key, fmt.Sprintf("%s-storage-write-value"), val)
}
}
return logs
}
func logfmtMutationsDiff(local, remote map[common.Address]*bal.AccountMutations) (logs []interface{}) {
keys := make(map[common.Address]struct{})
for addr, _ := range local {
keys[addr] = struct{}{}
}
for addr, _ := range remote {
keys[addr] = struct{}{}
}
for addr := range keys {
_, hasLocal := local[addr]
_, hasRemote := remote[addr]
if hasLocal && !hasRemote {
logs = append(logs, mutationsLogfmt(fmt.Sprintf("local-%x", addr), local[addr])...)
}
if !hasLocal && hasRemote {
logs = append(logs, mutationsLogfmt(fmt.Sprintf("remote-%x", addr), remote[addr])...)
}
}
return logs
}
// ValidateStateDiff returns an error if the computed state diff is not equal to
// diff reported from the access list at the given index.
func (r *BALReader) ValidateStateDiff(idx int, computedDiff *bal.StateDiff) bool {
balChanges := r.changesAt(idx)
for addr, state := range balChanges.Mutations {
computedAccountDiff, ok := computedDiff.Mutations[addr]
if !ok {
// TODO: print out the full fields here
log.Error("BAL contained account which wasn't present in computed state diff", "address", addr)
return false
}
if !state.Eq(computedAccountDiff) {
state.LogDiff(addr, computedAccountDiff)
return false
}
}
if len(balChanges.Mutations) != len(computedDiff.Mutations) {
log.Error("computed state diff contained accounts that weren't reported in BAL", logfmtMutationsDiff(computedDiff.Mutations, balChanges.Mutations))
return false
}
return true
}

View file

@ -0,0 +1,570 @@
package state
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
"maps"
"sync"
"sync/atomic"
"time"
)
// BALStateTransition is responsible for performing the state root update
// and commit for EIP 7928 access-list-containing blocks. An instance of
// this object is only used for a single block.
type BALStateTransition struct {
accessList *BALReader
db Database
reader Reader
stateTrie Trie
parentRoot common.Hash
// the computed state root of the block
rootHash common.Hash
// the state modifications performed by the block
diffs map[common.Address]*bal.AccountMutations
// a map of common.Address -> *types.StateAccount containing the block
// prestate of all accounts that will be modified
prestates sync.Map
postStates map[common.Address]*types.StateAccount
// a map of common.Address -> Trie containing the account tries for all
// accounts with mutated storage
tries sync.Map //map[common.Address]Trie
deletions map[common.Address]struct{}
originStorages map[common.Address]map[common.Hash]common.Hash
originStoragesWG sync.WaitGroup
accountDeleted int64
accountUpdated int64
storageDeleted atomic.Int64
storageUpdated atomic.Int64
stateUpdate *stateUpdate
metrics BALStateTransitionMetrics
err error
}
func (s *BALStateTransition) Metrics() *BALStateTransitionMetrics {
return &s.metrics
}
type BALStateTransitionMetrics struct {
// trie hashing metrics
AccountUpdate time.Duration
StatePrefetch time.Duration
StateUpdate time.Duration
StateHash time.Duration
OriginStorageLoadTime time.Duration
// commit metrics
AccountCommits time.Duration
StorageCommits time.Duration
SnapshotCommits time.Duration
TrieDBCommits time.Duration
TotalCommitTime time.Duration
}
func NewBALStateTransition(accessList *BALReader, db Database, parentRoot common.Hash) (*BALStateTransition, error) {
reader, err := db.Reader(parentRoot)
if err != nil {
return nil, err
}
stateTrie, err := db.OpenTrie(parentRoot)
if err != nil {
return nil, err
}
return &BALStateTransition{
accessList: accessList,
db: db,
reader: reader,
stateTrie: stateTrie,
parentRoot: parentRoot,
rootHash: common.Hash{},
diffs: make(map[common.Address]*bal.AccountMutations),
prestates: sync.Map{},
postStates: make(map[common.Address]*types.StateAccount),
tries: sync.Map{},
deletions: make(map[common.Address]struct{}),
originStorages: make(map[common.Address]map[common.Hash]common.Hash),
originStoragesWG: sync.WaitGroup{},
stateUpdate: nil,
}, nil
}
func (s *BALStateTransition) Error() error {
return s.err
}
func (s *BALStateTransition) setError(err error) {
if s.err != nil {
s.err = err
}
}
// TODO: refresh my knowledge of the storage-clearing EIP and ensure that my assumptions around
// an empty account which contains storage are valid here.
//
// isAccountDeleted checks whether the state account was deleted in this block. Post selfdestruct-removal,
// deletions can only occur if an account which has a balance becomes the target of a CREATE2 initcode
// which calls SENDALL, clearing the account and marking it for deletion.
func isAccountDeleted(prestate *types.StateAccount, mutations *bal.AccountMutations) bool {
// TODO: figure out how to simplify this method
if mutations.Code != nil && len(mutations.Code) != 0 {
return false
}
if mutations.Nonce != nil && *mutations.Nonce != 0 {
return false
}
if mutations.StorageWrites != nil && len(mutations.StorageWrites) > 0 {
return false
}
if mutations.Balance != nil {
if mutations.Balance.IsZero() {
if prestate.Nonce != 0 || prestate.Balance.IsZero() || common.BytesToHash(prestate.CodeHash) != types.EmptyCodeHash {
return false
}
// consider an empty account with storage to be deleted, so we don't check root here
return true
}
}
return false
}
// updateAccount applies the block state mutations to a given account returning
// the updated state account and new code (if the account code changed)
func (s *BALStateTransition) updateAccount(addr common.Address) (*types.StateAccount, []byte) {
a, _ := s.prestates.Load(addr)
acct := a.(*types.StateAccount)
acct, diff := acct.Copy(), s.diffs[addr]
code := diff.Code
if diff.Nonce != nil {
acct.Nonce = *diff.Nonce
}
if diff.Balance != nil {
acct.Balance = new(uint256.Int).Set(diff.Balance)
}
if tr, ok := s.tries.Load(addr); ok {
acct.Root = tr.(Trie).Hash()
}
return acct, code
}
func (s *BALStateTransition) commitAccount(addr common.Address) (*accountUpdate, *trienode.NodeSet, error) {
var (
encode = func(val common.Hash) []byte {
if val == (common.Hash{}) {
return nil
}
blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:]))
return blob
}
)
op := &accountUpdate{
address: addr,
data: types.SlimAccountRLP(*s.postStates[addr]), // TODO: cache the updated state acocunt somewhere
}
if prestate, exist := s.prestates.Load(addr); exist {
prestate := prestate.(*types.StateAccount)
op.origin = types.SlimAccountRLP(*prestate)
}
if s.diffs[addr].Code != nil {
code := contractCode{
hash: crypto.Keccak256Hash(s.diffs[addr].Code),
blob: s.diffs[addr].Code,
}
if op.origin == nil {
code.originHash = types.EmptyCodeHash
} else {
code.originHash = crypto.Keccak256Hash(op.origin)
}
op.code = &code
}
if len(s.diffs[addr].StorageWrites) == 0 {
return op, nil, nil
}
op.storages = make(map[common.Hash][]byte)
op.storagesOriginByHash = make(map[common.Hash][]byte)
op.storagesOriginByKey = make(map[common.Hash][]byte)
for key, value := range s.diffs[addr].StorageWrites {
hash := crypto.Keccak256Hash(key[:])
op.storages[hash] = encode(common.Hash(value))
origin := encode(s.originStorages[addr][common.Hash(key)])
op.storagesOriginByHash[hash] = origin
op.storagesOriginByKey[common.Hash(key)] = origin
}
tr, _ := s.tries.Load(addr)
root, nodes := tr.(Trie).Commit(false)
s.postStates[addr].Root = root
return op, nodes, nil
}
// CommitWithUpdate flushes mutated trie nodes and state accounts to disk.
func (s *BALStateTransition) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
// 1) create a stateUpdate object
// Commit objects to the trie, measuring the elapsed time
var (
commitStart = time.Now()
accountTrieNodesUpdated int
accountTrieNodesDeleted int
storageTrieNodesUpdated int
storageTrieNodesDeleted int
lock sync.Mutex // protect two maps below
nodes = trienode.NewMergedNodeSet() // aggregated trie nodes
updates = make(map[common.Hash]*accountUpdate, len(s.diffs)) // aggregated account updates
// merge aggregates the dirty trie nodes into the global set.
//
// Given that some accounts may be destroyed and then recreated within
// the same block, it's possible that a node set with the same owner
// may already exist. In such cases, these two sets are combined, with
// the later one overwriting the previous one if any nodes are modified
// or deleted in both sets.
//
// merge run concurrently across all the state objects and account trie.
merge = func(set *trienode.NodeSet) error {
if set == nil {
return nil
}
lock.Lock()
defer lock.Unlock()
updates, deletes := set.Size()
if set.Owner == (common.Hash{}) {
accountTrieNodesUpdated += updates
accountTrieNodesDeleted += deletes
} else {
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deletes
}
return nodes.Merge(set)
}
)
destructedPrestates := make(map[common.Address]*types.StateAccount)
s.prestates.Range(func(key, value any) bool {
addr := key.(common.Address)
acct := value.(*types.StateAccount)
destructedPrestates[addr] = acct
return true
})
deletes, delNodes, err := handleDestruction(s.db, s.stateTrie, noStorageWiping, maps.Keys(s.deletions), destructedPrestates)
if err != nil {
return common.Hash{}, nil, err
}
for _, set := range delNodes {
if err := merge(set); err != nil {
return common.Hash{}, nil, err
}
}
// Handle all state updates afterwards, concurrently to one another to shave
// off some milliseconds from the commit operation. Also accumulate the code
// writes to run in parallel with the computations.
var (
start = time.Now()
root common.Hash
workers errgroup.Group
)
// Schedule the account trie first since that will be the biggest, so give
// it the most time to crunch.
//
// TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain
// heads, which seems excessive given that it doesn't do hashing, it just
// shuffles some data. For comparison, the *hashing* at chain head is 2-3ms.
// We need to investigate what's happening as it seems something's wonky.
// Obviously it's not an end of the world issue, just something the original
// code didn't anticipate for.
workers.Go(func() error {
// Write the account trie changes, measuring the amount of wasted time
newroot, set := s.stateTrie.Commit(true)
root = newroot
if err := merge(set); err != nil {
return err
}
s.metrics.AccountCommits = time.Since(start)
return nil
})
s.originStoragesWG.Wait()
// Schedule each of the storage tries that need to be updated, so they can
// run concurrently to one another.
//
// TODO(karalabe): Experimentally, the account commit takes approximately the
// same time as all the storage commits combined, so we could maybe only have
// 2 threads in total. But that kind of depends on the account commit being
// more expensive than it should be, so let's fix that and revisit this todo.
for addr, _ := range s.diffs {
if _, isDeleted := s.deletions[addr]; isDeleted {
continue
}
address := addr
// Run the storage updates concurrently to one another
workers.Go(func() error {
// Write any storage changes in the state object to its storage trie
update, set, err := s.commitAccount(address)
if err != nil {
return err
}
if err := merge(set); err != nil {
return err
}
lock.Lock()
updates[crypto.Keccak256Hash(address[:])] = update
s.metrics.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime
lock.Unlock()
return nil
})
}
// Wait for everything to finish and update the metrics
if err := workers.Wait(); err != nil {
return common.Hash{}, nil, err
}
accountUpdatedMeter.Mark(s.accountUpdated)
storageUpdatedMeter.Mark(s.storageUpdated.Load())
accountDeletedMeter.Mark(s.accountDeleted)
storageDeletedMeter.Mark(s.storageDeleted.Load())
accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
ret := newStateUpdate(noStorageWiping, s.parentRoot, root, block, deletes, updates, nodes)
snapshotCommits, trieDBCommits, err := flushStateUpdate(s.db, block, ret)
if err != nil {
return common.Hash{}, nil, err
}
s.metrics.SnapshotCommits, s.metrics.TrieDBCommits = snapshotCommits, trieDBCommits
s.metrics.TotalCommitTime = time.Since(commitStart)
return root, ret, nil
}
func (s *BALStateTransition) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
root, _, err := s.CommitWithUpdate(block, deleteEmptyObjects, noStorageWiping)
return root, err
}
func (s *BALStateTransition) loadOriginStorages() {
lastIdx := len(s.accessList.block.Transactions()) + 1
type originStorage struct {
address common.Address
key common.Hash
value common.Hash
}
originStoragesCh := make(chan *originStorage)
var pendingStorageCount int
for _, addr := range s.accessList.ModifiedAccounts() {
diff := s.accessList.readAccountDiff(addr, lastIdx)
pendingStorageCount += len(diff.StorageWrites)
s.originStorages[addr] = make(map[common.Hash]common.Hash)
for key := range diff.StorageWrites {
storageKey := key
go func() {
val, err := s.reader.Storage(addr, common.Hash(storageKey))
if err != nil {
s.setError(err)
return
}
originStoragesCh <- &originStorage{
addr,
common.Hash(storageKey),
val,
}
}()
}
}
if pendingStorageCount == 0 {
return
}
for {
select {
case acctStorage := <-originStoragesCh:
s.originStorages[acctStorage.address][acctStorage.key] = acctStorage.value
pendingStorageCount--
if pendingStorageCount == 0 {
return
}
}
}
}
// IntermediateRoot applies block state mutations and computes the updated state
// trie root.
func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
if s.rootHash != (common.Hash{}) {
return s.rootHash
}
// State root calculation proceeds as follows:
// 1 (a): load the prestate state accounts for addresses which were modified in the block
// 1 (b): load the origin storage values for all slots which were modified during the block (this is needed for computing the stateUpdate)
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
//
// 2: compute the post-state root of the account trie
//
// Steps 1/2 are performed sequentially, with steps 1a-d performed in parallel
start := time.Now()
lastIdx := len(s.accessList.block.Transactions()) + 1
//1 (b): load the origin storage values for all slots which were modified during the block
s.originStoragesWG.Add(1)
go func() {
defer s.originStoragesWG.Done()
s.loadOriginStorages()
s.metrics.OriginStorageLoadTime = time.Since(start)
}()
var wg sync.WaitGroup
for _, addr := range s.accessList.ModifiedAccounts() {
diff := s.accessList.readAccountDiff(addr, lastIdx)
s.diffs[addr] = diff
}
for _, addr := range s.accessList.ModifiedAccounts() {
wg.Add(1)
address := addr
go func() {
defer wg.Done()
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
acct := s.accessList.prestateReader.account(address)
diff := s.diffs[address]
if acct == nil {
acct = types.NewEmptyStateAccount()
}
s.prestates.Store(address, acct)
if len(diff.StorageWrites) > 0 {
tr, err := s.db.OpenStorageTrie(s.parentRoot, address, acct.Root, s.stateTrie)
if err != nil {
s.setError(err)
return
}
s.tries.Store(address, tr)
var (
updateKeys, updateValues [][]byte
deleteKeys [][]byte
)
for key, val := range diff.StorageWrites {
if val != (common.Hash{}) {
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(val[:]))
s.storageUpdated.Add(1)
} else {
deleteKeys = append(deleteKeys, key[:])
s.storageDeleted.Add(1)
}
}
if err := tr.UpdateStorageBatch(address, updateKeys, updateValues); err != nil {
s.setError(err)
return
}
for _, key := range deleteKeys {
if err := tr.DeleteStorage(address, key); err != nil {
s.setError(err)
return
}
}
hashStart := time.Now()
tr.Hash()
s.metrics.StateHash = time.Since(hashStart)
}
}()
}
wg.Add(1)
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
go func() {
defer wg.Done()
prefetchStart := time.Now()
if err := s.stateTrie.PrefetchAccount(s.accessList.ModifiedAccounts()); err != nil {
s.setError(err)
return
}
s.metrics.StatePrefetch = time.Since(prefetchStart)
}()
wg.Wait()
s.metrics.AccountUpdate = time.Since(start)
// 2: compute the post-state root of the account trie
stateUpdateStart := time.Now()
for mutatedAddr, _ := range s.diffs {
p, _ := s.prestates.Load(mutatedAddr)
prestate := p.(*types.StateAccount)
isDeleted := isAccountDeleted(prestate, s.diffs[mutatedAddr])
if isDeleted {
if err := s.stateTrie.DeleteAccount(mutatedAddr); err != nil {
s.setError(err)
return common.Hash{}
}
s.deletions[mutatedAddr] = struct{}{}
} else {
acct, code := s.updateAccount(mutatedAddr)
if code != nil {
codeHash := crypto.Keccak256Hash(code)
acct.CodeHash = codeHash.Bytes()
if err := s.stateTrie.UpdateContractCode(mutatedAddr, codeHash, code); err != nil {
s.setError(err)
return common.Hash{}
}
}
if err := s.stateTrie.UpdateAccount(mutatedAddr, acct, len(code)); err != nil {
s.setError(err)
return common.Hash{}
}
s.postStates[mutatedAddr] = acct
}
}
s.metrics.StateUpdate = time.Since(stateUpdateStart)
stateTrieHashStart := time.Now()
s.rootHash = s.stateTrie.Hash()
s.metrics.StateHash = time.Since(stateTrieHashStart)
return s.rootHash
}
func (s *BALStateTransition) Preimages() map[common.Hash][]byte {
// TODO: implement this
return make(map[common.Hash][]byte)
}

View file

@ -76,7 +76,7 @@ type Trie interface {
// be returned.
GetAccount(address common.Address) (*types.StateAccount, error)
// PrefetchAccount attempts to resolve specific accounts from the database
// PrefetchAccount attempts to schedule specific accounts from the database
// to accelerate subsequent trie operations.
PrefetchAccount([]common.Address) error
@ -85,7 +85,7 @@ type Trie interface {
// a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error)
// PrefetchStorage attempts to resolve specific storage slots from the database
// PrefetchStorage attempts to schedule specific storage slots from the database
// to accelerate subsequent trie operations.
PrefetchStorage(addr common.Address, keys [][]byte) error
@ -94,12 +94,18 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
// UpdateStorageBatch attempts to update a list storages in the batch manner.
UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error

View file

@ -29,7 +29,7 @@ import (
// nodeIterator is an iterator to traverse the entire state trie post-order,
// including all of the contract code and contract state tries. Preimage is
// required in order to resolve the contract address.
// required in order to schedule the contract address.
type nodeIterator struct {
state *StateDB // State being iterated
tr Trie // Primary account trie for traversal

View file

@ -381,7 +381,7 @@ func (ch nonceChange) copy() journalEntry {
}
func (ch codeChange) revert(s *StateDB) {
s.getStateObject(ch.account).setCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
s.getStateObject(ch.account).setCodeModified(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
}
func (ch codeChange) dirtied() *common.Address {

View file

@ -85,6 +85,8 @@ type stateObject struct {
// the contract is just created within the current transaction, or when the
// object was previously existent and is being deployed as a contract within
// the current transaction.
//
// the flag is set upon beginning of contract initcode execution, not when the code is actually deployed to the address.
newContract bool
}
@ -178,6 +180,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
if value, pending := s.pendingStorage[key]; pending {
return value
}
if value, cached := s.originStorage[key]; cached {
return value
}
@ -220,6 +223,7 @@ func (s *stateObject) SetState(key, value common.Hash) common.Hash {
if prev == value {
return prev
}
// New value is different, update and journal the change
s.db.journal.storageChange(s.address, key, prev, origin)
s.setState(key, value, origin)
@ -246,9 +250,10 @@ func (s *stateObject) finalise() {
// The slot is reverted to its original value, delete the entry
// to avoid thrashing the data structures.
delete(s.uncommittedStorage, key)
} else if exist {
// The slot is modified to another value and the slot has been
// tracked for commit, do nothing here.
// tracked for commit in uncommittedStorage.
} else {
// The slot is different from its original value and hasn't been
// tracked for commit yet.
@ -294,6 +299,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
return s.trie, nil
}
}
// Retrieve a pretecher populated trie, or fall back to the database. This will
// block until all prefetch tasks are done, which are needed for witnesses even
// for unmodified state objects.
@ -325,8 +331,10 @@ func (s *stateObject) updateTrie() (Trie, error) {
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
updateKeys [][]byte
updateValues [][]byte
)
for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes
@ -340,10 +348,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue
}
if (value != common.Hash{}) {
if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil {
s.db.setError(err)
return nil, err
}
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(value[:]))
s.db.StorageUpdated.Add(1)
} else {
deletions = append(deletions, key)
@ -351,6 +357,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading
used = append(used, key) // Copy needed for closure
}
if len(updateKeys) > 0 {
if err := tr.UpdateStorageBatch(common.Address{}, updateKeys, updateValues); err != nil {
s.db.setError(err)
return nil, err
}
}
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@ -583,13 +595,18 @@ func (s *stateObject) CodeSize() int {
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) (prev []byte) {
prev = slices.Clone(s.code)
s.db.journal.setCode(s.address, prev)
s.setCode(codeHash, code)
s.setCodeModified(codeHash, code)
return prev
}
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
}
// setCodeModified sets the code and hash and dirty markers.
func (s *stateObject) setCodeModified(codeHash common.Hash, code []byte) {
s.setCode(codeHash, code)
s.dirtyCode = true
}

View file

@ -20,6 +20,7 @@ package state
import (
"errors"
"fmt"
"iter"
"maps"
"slices"
"sort"
@ -65,6 +66,14 @@ func (m *mutation) isDelete() bool {
return m.typ == deletion
}
type BlockStateTransition interface {
CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error)
Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error)
IntermediateRoot(deleteEmpty bool) common.Hash
Error() error
Preimages() map[common.Hash][]byte
}
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
@ -118,6 +127,13 @@ type StateDB struct {
// The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
// block access list modifications will be recorded with this index.
// 0 - state access before transaction execution
// 1 -> len(block txs) - state access of each transaction
// len(block txs) + 1 - state access after transaction execution.
balIndex int
logs map[common.Hash][]*types.Log
logSize uint
@ -139,6 +155,8 @@ type StateDB struct {
witness *stateless.Witness
witnessStats *stateless.WitnessStats
blockAccessList *BALReader
// Measurements gathered during execution for debugging purposes
AccountReads time.Duration
AccountHashes time.Duration
@ -167,6 +185,10 @@ type StateDB struct {
CodeLoadBytes int
}
func (s *StateDB) BlockAccessList() *BALReader {
return s.blockAccessList
}
// New creates a new state from a given trie.
func New(root common.Hash, db Database) (*StateDB, error) {
reader, err := db.Reader(root)
@ -297,6 +319,38 @@ func (s *StateDB) AddRefund(gas uint64) {
s.refund += gas
}
func (s *StateDB) SetBlockAccessList(al *BALReader) {
s.blockAccessList = al
}
// LoadModifiedPrestate instantiates the live object based on accounts
// which appeared in the total state diff of a block, and were also preexisting.
func (s *StateDB) LoadModifiedPrestate(addrs []common.Address) (res map[common.Address]*types.StateAccount) {
stateAccounts := new(sync.Map)
wg := new(sync.WaitGroup)
res = make(map[common.Address]*types.StateAccount)
for _, addr := range addrs {
wg.Add(1)
go func(addr common.Address) {
acct, err := s.reader.Account(addr)
if err == nil && acct != nil { // TODO: what should we do if the error is not nil?
stateAccounts.Store(addr, acct)
}
wg.Done()
}(addr)
}
wg.Wait()
stateAccounts.Range(func(addr any, val any) bool {
address := addr.(common.Address)
stateAccount := val.(*types.StateAccount)
res[address] = stateAccount
return true
})
return res
}
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
func (s *StateDB) SubRefund(gas uint64) {
@ -313,6 +367,11 @@ func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil
}
func (s *StateDB) ExistBeforeCurTx(addr common.Address) bool {
obj := s.getStateObject(addr)
return obj != nil && !obj.newContract
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (s *StateDB) Empty(addr common.Address) bool {
@ -568,6 +627,25 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
func (s *StateDB) updateStateObjects(objs []*stateObject) {
var addrs []common.Address
var accts []*types.StateAccount
for _, obj := range objs {
addrs = append(addrs, obj.Address())
accts = append(accts, &obj.data)
}
if err := s.trie.UpdateAccountBatch(addrs, accts, nil); err != nil {
s.setError(fmt.Errorf("updateStateObjects error: %v", err))
}
for _, obj := range objs {
if obj.dirtyCode {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
}
// deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(addr common.Address) {
@ -587,6 +665,24 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil
}
// if we are executing against a block access list, construct the account
// state at the current tx index by applying the access-list diff on top
// of the prestate value for the account.
if s.blockAccessList != nil && s.balIndex != 0 && s.blockAccessList.isModified(addr) {
acct := s.blockAccessList.readAccount(s, addr, s.balIndex-1)
if acct != nil {
s.setStateObject(acct)
return acct
}
return nil
// if the acct was nil, it might be non-existent or was not explicitly requested for loading from the blockAcccessList object.
// try to load it from the snapshot.
// TODO: if the acct was non-existent because it was deleted, we should just return nil herre.
}
s.AccountLoaded++
start := time.Now()
@ -623,6 +719,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
if obj == nil {
obj = s.createObject(addr)
}
return obj
}
@ -681,10 +778,14 @@ func (s *StateDB) Copy() *StateDB {
refund: s.refund,
thash: s.thash,
txIndex: s.txIndex,
balIndex: s.txIndex,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: maps.Clone(s.preimages),
// don't deep-copy these
blockAccessList: s.blockAccessList,
// Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never
@ -831,12 +932,18 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// later time.
workers.SetLimit(1)
}
var updatedAddrs []common.Address
for addr, op := range s.mutations {
if op.applied || op.isDelete() {
continue
}
obj := s.stateObjects[addr] // closure for the task runner below
updatedAddrs = append(updatedAddrs, addr)
}
for _, addr := range updatedAddrs {
workers.Go(func() error {
obj := s.stateObjects[addr] // closure for the task runner below
if s.db.TrieDB().IsVerkle() {
obj.updateTrie()
} else {
@ -931,6 +1038,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
var (
usedAddrs []common.Address
deletedAddrs []common.Address
updatedObjs []*stateObject
)
for addr, op := range s.mutations {
if op.applied {
@ -941,11 +1049,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if op.isDelete() {
deletedAddrs = append(deletedAddrs, addr)
} else {
s.updateStateObject(s.stateObjects[addr])
updatedObjs = append(updatedObjs, s.stateObjects[addr])
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
}
if len(updatedObjs) > 0 {
s.updateStateObjects(updatedObjs)
}
for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1
@ -957,9 +1068,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
// Track the amount of time wasted on hashing the account trie
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
hash := s.trie.Hash()
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
witness := s.trie.Witness()
@ -968,6 +1077,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witnessStats.Add(witness, common.Hash{})
}
}
return hash
}
@ -977,6 +1087,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
s.balIndex = ti + 1
}
// SetAccessListIndex sets the current index that state mutations will
// be reported as in the BAL. It is only relevant if this StateDB instance
// is being used in the BAL construction path.
func (s *StateDB) SetAccessListIndex(idx int) {
s.balIndex = idx
}
func (s *StateDB) clearJournalAndRefund() {
@ -988,8 +1106,8 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
func fastDeleteStorage(originalRoot common.Hash, snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(originalRoot, addrHash, common.Hash{})
if err != nil {
return nil, nil, nil, err
}
@ -1028,8 +1146,8 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
func slowDeleteStorage(db Database, trie Trie, originalRoot common.Hash, addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := db.OpenStorageTrie(originalRoot, addr, root, trie)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
@ -1064,7 +1182,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
// The function will make an attempt to utilize an efficient strategy if the
// associated state snapshot is reachable; otherwise, it will resort to a less
// efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
func deleteStorage(db Database, trie Trie, addr common.Address, addrHash common.Hash, root, originalRoot common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
var (
err error
nodes *trienode.NodeSet // the set for trie node mutations (value is nil)
@ -1074,12 +1192,12 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
snaps := s.db.Snapshot()
snaps := db.Snapshot()
if snaps != nil {
storages, storageOrigins, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
storages, storageOrigins, nodes, err = fastDeleteStorage(originalRoot, snaps, addrHash, root)
}
if snaps == nil || err != nil {
storages, storageOrigins, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
storages, storageOrigins, nodes, err = slowDeleteStorage(db, trie, originalRoot, addr, addrHash, root)
}
if err != nil {
return nil, nil, nil, err
@ -1105,39 +1223,38 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
func handleDestruction(db Database, trie Trie, noStorageWiping bool, destructions iter.Seq[common.Address], prestates map[common.Address]*types.StateAccount) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var (
nodes []*trienode.NodeSet
deletes = make(map[common.Hash]*accountDelete)
)
for addr, prevObj := range s.stateObjectsDestruct {
prev := prevObj.origin
for addr := range destructions {
prestate := prestates[addr]
// The account was non-existent, and it's marked as destructed in the scope
// of block. It can be either case (a) or (b) and will be interpreted as
// null->null state transition.
// - for (a), skip it without doing anything
// - for (b), the resurrected account with nil as original will be handled afterwards
if prev == nil {
if prestate == nil {
continue
}
// The account was existent, it can be either case (c) or (d).
addrHash := crypto.Keccak256Hash(addr.Bytes())
op := &accountDelete{
address: addr,
origin: types.SlimAccountRLP(*prev),
origin: types.SlimAccountRLP(*prestate),
}
deletes[addrHash] = op
// Short circuit if the origin storage was empty.
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
if prestate.Root == types.EmptyRootHash || db.TrieDB().IsVerkle() {
continue
}
if noStorageWiping {
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
}
// Remove storage slots belonging to the account.
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
storages, storagesOrigin, set, err := deleteStorage(db, trie, addr, addrHash, prestate.Root, prestate.Root)
if err != nil {
return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err)
}
@ -1162,6 +1279,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
// Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects)
@ -1211,7 +1329,12 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
// the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly.
deletes, delNodes, err := s.handleDestruction(noStorageWiping)
var stateAccountsDestruct, destructAccountsOrigins = make(map[common.Address]*types.StateAccount), make(map[common.Address]*types.StateAccount)
for addr, obj := range s.stateObjectsDestruct {
stateAccountsDestruct[addr] = &obj.data
destructAccountsOrigins[addr] = obj.origin
}
deletes, delNodes, err := handleDestruction(s.db, s.trie, noStorageWiping, maps.Keys(stateAccountsDestruct), destructAccountsOrigins)
if err != nil {
return nil, err
}
@ -1312,6 +1435,44 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil
}
func flushStateUpdate(d Database, block uint64, update *stateUpdate) (snapshotCommits, trieDBCommits time.Duration, err error) {
if db := d.TrieDB().Disk(); db != nil && len(update.codes) > 0 {
batch := db.NewBatch()
for _, code := range update.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
}
if err := batch.Write(); err != nil {
return 0, 0, err
}
}
if !update.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := d.Snapshot(); snap != nil && snap.Snapshot(update.originRoot) != nil {
start := time.Now()
if err := snap.Update(update.root, update.originRoot, update.accounts, update.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", update.originRoot, "to", update.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := snap.Cap(update.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", update.root, "layers", TriesInMemory, "err", err)
}
snapshotCommits += time.Since(start)
}
// If trie database is enabled, commit the state update as a new layer
if db := d.TrieDB(); db != nil {
start := time.Now()
if err := db.Update(update.root, update.originRoot, block, update.nodes, update.stateSet()); err != nil {
return 0, 0, err
}
trieDBCommits += time.Since(start)
}
}
return snapshotCommits, trieDBCommits, nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, deriveCodeFields bool) (*stateUpdate, error) {
@ -1319,46 +1480,18 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
if err != nil {
return nil, err
}
// TODO: move this check inside flushStateUpdate?
if deriveCodeFields {
if err := ret.deriveCodeFields(s.reader); err != nil {
return nil, err
}
}
// Commit dirty contract code if any exists
if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 {
batch := db.NewBatch()
for _, code := range ret.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
}
if err := batch.Write(); err != nil {
return nil, err
}
}
if !ret.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil {
start := time.Now()
if err := snap.Update(ret.root, ret.originRoot, ret.accounts, ret.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := snap.Cap(ret.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", TriesInMemory, "err", err)
}
s.SnapshotCommits += time.Since(start)
}
// If trie database is enabled, commit the state update as a new layer
if db := s.db.TrieDB(); db != nil {
start := time.Now()
if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, ret.stateSet()); err != nil {
return nil, err
}
s.TrieDBCommits += time.Since(start)
}
snapshotCommits, trieDBCommits, err := flushStateUpdate(s.db, block, ret)
if err != nil {
return nil, err
}
s.SnapshotCommits = snapshotCommits
s.TrieDBCommits = trieDBCommits
s.reader, _ = s.db.Reader(s.originalRoot)
return ret, err
}

View file

@ -59,22 +59,37 @@ func (s *hookedStateDB) IsNewContract(addr common.Address) bool {
}
func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetBalance(addr)
}
func (s *hookedStateDB) GetNonce(addr common.Address) uint64 {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetNonce(addr)
}
func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeHash(addr)
}
func (s *hookedStateDB) GetCode(addr common.Address) []byte {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCode(addr)
}
func (s *hookedStateDB) GetCodeSize(addr common.Address) int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeSize(addr)
}
@ -91,14 +106,23 @@ func (s *hookedStateDB) GetRefund() uint64 {
}
func (s *hookedStateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetStateAndCommittedState(addr, hash)
}
func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetState(addr, hash)
}
func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetStorageRoot(addr)
}
@ -111,14 +135,23 @@ func (s *hookedStateDB) SetTransientState(addr common.Address, key, value common
}
func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.HasSelfDestructed(addr)
}
func (s *hookedStateDB) Exist(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Exist(addr)
}
func (s *hookedStateDB) Empty(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Empty(addr)
}
@ -221,6 +254,10 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) {
s.inner.SelfDestruct(address)
}
func (s *hookedStateDB) ExistBeforeCurTx(addr common.Address) bool {
return s.inner.ExistBeforeCurTx(addr)
}
func (s *hookedStateDB) AddLog(log *types.Log) {
// The inner will modify the log (add fields), so invoke that first
s.inner.AddLog(log)
@ -285,3 +322,7 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
s.inner.Finalise(deleteEmptyObjects)
}
func (s *hookedStateDB) TxIndex() int {
return s.inner.TxIndex()
}

View file

@ -109,6 +109,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
// Read requests if Prague is enabled.
var requests [][]byte
if config.IsPrague(block.Number(), block.Time()) {
@ -130,6 +131,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
if hooks := cfg.Tracer; hooks != nil && hooks.OnBlockFinalization != nil {
hooks.OnBlockFinalization()
}
return &ProcessResult{
Receipts: receipts,
Requests: requests,
@ -214,7 +219,8 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *
return nil, err
}
// Create a new context to be used in the EVM environment
return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, usedGas, evm)
receipts, err := ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, usedGas, evm)
return receipts, err
}
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root

View file

@ -19,9 +19,6 @@ package core
import (
"bytes"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
@ -29,6 +26,8 @@ import (
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"math"
"math/big"
)
// ExecutionResult includes all output after executing given evm
@ -557,6 +556,10 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
} else {
fee := new(uint256.Int).SetUint64(st.gasUsed())
fee.Mul(fee, effectiveTipU256)
// always read the coinbase account to include it in the BAL (TODO check this is actually part of the spec)
st.state.GetBalance(st.evm.Context.Coinbase)
st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee)
// add the coinbase to the witness iff the fee is greater than 0
@ -617,16 +620,22 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization)
st.state.AddRefund(params.CallNewAccountGas - params.TxAuthTupleGas)
}
prevDelegation, isDelegated := types.ParseDelegation(st.state.GetCode(authority))
// Update nonce and account code.
st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization)
if auth.Address == (common.Address{}) {
// Delegation to zero address means clear.
st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear)
if isDelegated {
st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear)
}
return nil
}
// Otherwise install delegation to auth.Address.
st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization)
// install delegation to auth.Address if the delegation changed
if !isDelegated || auth.Address != prevDelegation {
st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization)
}
return nil
}

View file

@ -178,7 +178,6 @@ type (
CloseHook = func()
// BlockStartHook is called before executing `block`.
// `td` is the total difficulty prior to `block`.
BlockStartHook = func(event BlockEvent)
// BlockEndHook is called after executing a block.
@ -192,24 +191,25 @@ type (
// GenesisBlockHook is called when the genesis block is being processed.
GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc)
// OnSystemCallStartHook is called when a system call is about to be executed. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
// OnSystemCallStartHook is called when a system call is about to be executed.
// Today, this hook is invoked when the EIP-4788 system call is about to be
// executed to set the beacon block root.
//
// After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit`
// as well as state hooks between this hook and the `OnSystemCallEndHook`.
// After this hook, the EVM call tracing will happened as usual so you will
// receive a `OnEnter/OnExit` as well as state hooks between this hook and
// the `OnSystemCallEndHook`.
//
// Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks
// will not be invoked.
// Note that system call happens outside normal transaction execution, so
// the `OnTxStart/OnTxEnd` hooks will not be invoked.
OnSystemCallStartHook = func()
// OnSystemCallStartHookV2 is called when a system call is about to be executed. Refer
// to `OnSystemCallStartHook` for more information.
// OnSystemCallStartHookV2 is called when a system call is about to be executed.
// Refer to `OnSystemCallStartHook` for more information.
OnSystemCallStartHookV2 = func(vm *VMContext)
// OnSystemCallEndHook is called when a system call has finished executing. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
// OnSystemCallEndHook is called when a system call has finished executing.
// Today, this hook is invoked when the EIP-4788 system call is about to be
// executed to set the beacon block root.
OnSystemCallEndHook = func()
// StateUpdateHook is called after state is committed for a block.
@ -239,9 +239,17 @@ type (
// StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
SelfDestructHook = func(address common.Address)
// LogHook is called when a log is emitted.
LogHook = func(log *types.Log)
// AccountReadHook is called when the account is accessed.
AccountReadHook = func(addr common.Address)
// StorageReadHook is called when the storage slot is accessed.
StorageReadHook = func(addr common.Address, slot common.Hash)
// BlockHashReadHook is called when EVM reads the blockhash of a block.
BlockHashReadHook = func(blockNumber uint64, hash common.Hash)
)
@ -255,6 +263,7 @@ type Hooks struct {
OnOpcode OpcodeHook
OnFault FaultHook
OnGasChange GasChangeHook
// Chain events
OnBlockchainInit BlockchainInitHook
OnClose CloseHook
@ -266,14 +275,23 @@ type Hooks struct {
OnSystemCallStartV2 OnSystemCallStartHookV2
OnSystemCallEnd OnSystemCallEndHook
OnStateUpdate StateUpdateHook
// State events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnBlockFinalization func() // called after post-tx system contracts and consensus finalization are invoked
// State mutation events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnSelfDestructChange SelfDestructHook
// State access events
OnAccountRead AccountReadHook
OnStorageRead StorageReadHook
// Block hash read
OnBlockHashRead BlockHashReadHook
}
@ -290,57 +308,74 @@ const (
// Issuance
// BalanceIncreaseRewardMineUncle is a reward for mining an uncle block.
BalanceIncreaseRewardMineUncle BalanceChangeReason = 1
// BalanceIncreaseRewardMineBlock is a reward for mining a block.
BalanceIncreaseRewardMineBlock BalanceChangeReason = 2
// BalanceIncreaseWithdrawal is ether withdrawn from the beacon chain.
BalanceIncreaseWithdrawal BalanceChangeReason = 3
// BalanceIncreaseGenesisBalance is ether allocated at the genesis block.
BalanceIncreaseGenesisBalance BalanceChangeReason = 4
// Transaction fees
// BalanceIncreaseRewardTransactionFee is the transaction tip increasing block builder's balance.
// BalanceIncreaseRewardTransactionFee is the transaction tip increasing
// block builder's balance.
BalanceIncreaseRewardTransactionFee BalanceChangeReason = 5
// BalanceDecreaseGasBuy is spent to purchase gas for execution a transaction.
// Part of this gas will be burnt as per EIP-1559 rules.
BalanceDecreaseGasBuy BalanceChangeReason = 6
// BalanceIncreaseGasReturn is ether returned for unused gas at the end of execution.
BalanceIncreaseGasReturn BalanceChangeReason = 7
// DAO fork
// BalanceIncreaseDaoContract is ether sent to the DAO refund contract.
BalanceIncreaseDaoContract BalanceChangeReason = 8
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved to the refund contract.
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved
// to the refund contract.
BalanceDecreaseDaoAccount BalanceChangeReason = 9
// BalanceChangeTransfer is ether transferred via a call.
// it is a decrease for the sender and an increase for the recipient.
BalanceChangeTransfer BalanceChangeReason = 10
// BalanceChangeTouchAccount is a transfer of zero value. It is only there to
// touch-create an account.
BalanceChangeTouchAccount BalanceChangeReason = 11
// BalanceIncreaseSelfdestruct is added to the recipient as indicated by a selfdestructing account.
// BalanceIncreaseSelfdestruct is added to the recipient as indicated by a
// selfdestructing account.
BalanceIncreaseSelfdestruct BalanceChangeReason = 12
// BalanceDecreaseSelfdestruct is deducted from a contract due to self-destruct.
BalanceDecreaseSelfdestruct BalanceChangeReason = 13
// BalanceDecreaseSelfdestructBurn is ether that is sent to an already self-destructed
// account within the same tx (captured at end of tx).
// Note it doesn't account for a self-destruct which appoints itself as recipient.
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
// BalanceChangeRevert is emitted when the balance is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// BalanceChangeRevert is emitted when the balance is reverted back to a
// previous value due to call failure.
//
// It is only emitted when the tracer has opted in to use the journaling
// wrapper (WrapWithJournal).
BalanceChangeRevert BalanceChangeReason = 15
)
// GasChangeReason is used to indicate the reason for a gas change, useful
// for tracing and reporting.
//
// There is essentially two types of gas changes, those that can be emitted once per transaction
// and those that can be emitted on a call basis, so possibly multiple times per transaction.
// There is essentially two types of gas changes, those that can be emitted
// once per transaction and those that can be emitted on a call basis, so possibly
// multiple times per transaction.
//
// They can be recognized easily by their name, those that start with `GasChangeTx` are emitted
// once per transaction, while those that start with `GasChangeCall` are emitted on a call basis.
// They can be recognized easily by their name, those that start with `GasChangeTx`
// are emitted once per transaction, while those that start with `GasChangeCall`
// are emitted on a call basis.
type GasChangeReason byte
//go:generate go run golang.org/x/tools/cmd/stringer -type=GasChangeReason -trimprefix=GasChange -output gen_gas_change_reason_stringer.go
@ -348,61 +383,100 @@ type GasChangeReason byte
const (
GasChangeUnspecified GasChangeReason = 0
// GasChangeTxInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only
// one such gas change per transaction.
// GasChangeTxInitialBalance is the initial balance for the call which will
// be equal to the gasLimit of the call. There is only one such gas change
// per transaction.
GasChangeTxInitialBalance GasChangeReason = 1
// GasChangeTxIntrinsicGas is the amount of gas that will be charged for the intrinsic cost of the transaction, there is
// always exactly one of those per transaction.
// GasChangeTxIntrinsicGas is the amount of gas that will be charged for the
// intrinsic cost of the transaction, there is always exactly one of those
// per transaction.
GasChangeTxIntrinsicGas GasChangeReason = 2
// GasChangeTxRefunds is the sum of all refunds which happened during the tx execution (e.g. storage slot being cleared)
// this generates an increase in gas. There is at most one of such gas change per transaction.
// GasChangeTxRefunds is the sum of all refunds which happened during the tx
// execution (e.g. storage slot being cleared). this generates an increase in
// gas. There is at most one of such gas change per transaction.
GasChangeTxRefunds GasChangeReason = 3
// GasChangeTxLeftOverReturned is the amount of gas left over at the end of transaction's execution that will be returned
// to the chain. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas
// left at the end of execution, no such even will be emitted. The returned gas's value in Wei is returned to caller.
// There is at most one of such gas change per transaction.
// GasChangeTxLeftOverReturned is the amount of gas left over at the end of
// transaction's execution that will be returned to the chain. This change
// will always be a negative change as we "drain" left over gas towards 0.
// If there was no gas left at the end of execution, no such even will be
// emitted. The returned gas's value in Wei is returned to caller. There is
// at most one of such gas change per transaction.
GasChangeTxLeftOverReturned GasChangeReason = 4
// GasChangeCallInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only
// one such gas change per call.
// GasChangeCallInitialBalance is the initial balance for the call which
// will be equal to the gasLimit of the call. There is only one such gas
// change per call.
GasChangeCallInitialBalance GasChangeReason = 5
// GasChangeCallLeftOverReturned is the amount of gas left over that will be returned to the caller, this change will always
// be a negative change as we "drain" left over gas towards 0. If there was no gas left at the end of execution, no such even
// will be emitted.
// GasChangeCallLeftOverReturned is the amount of gas left over that will
// be returned to the caller, this change will always be a negative change
// as we "drain" left over gas towards 0. If there was no gas left at the
// end of execution, no such even will be emitted.
GasChangeCallLeftOverReturned GasChangeReason = 6
// GasChangeCallLeftOverRefunded is the amount of gas that will be refunded to the call after the child call execution it
// executed completed. This value is always positive as we are giving gas back to the you, the left over gas of the child.
// If there was no gas left to be refunded, no such even will be emitted.
// GasChangeCallLeftOverRefunded is the amount of gas that will be refunded
// to the call after the child call execution it executed completed. This
// value is always positive as we are giving gas back to the you, the left over
// gas of the child. If there was no gas left to be refunded, no such event
// will be emitted.
GasChangeCallLeftOverRefunded GasChangeReason = 7
// GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE.
// GasChangeCallContractCreation is the amount of gas that will be burned
// for a CREATE.
GasChangeCallContractCreation GasChangeReason = 8
// GasChangeCallContractCreation2 is the amount of gas that will be burned for a CREATE2.
// GasChangeCallContractCreation2 is the amount of gas that will be burned
// for a CREATE2.
GasChangeCallContractCreation2 GasChangeReason = 9
// GasChangeCallCodeStorage is the amount of gas that will be charged for code storage.
// GasChangeCallCodeStorage is the amount of gas that will be charged for
// code storage.
GasChangeCallCodeStorage GasChangeReason = 10
// GasChangeCallOpCode is the amount of gas that will be charged for an opcode executed by the EVM, exact opcode that was
// performed can be check by `OnOpcode` handling.
// GasChangeCallOpCode is the amount of gas that will be charged for an opcode
// executed by the EVM, exact opcode that was performed can be check by
// `OnOpcode` handling.
GasChangeCallOpCode GasChangeReason = 11
// GasChangeCallPrecompiledContract is the amount of gas that will be charged for a precompiled contract execution.
// GasChangeCallPrecompiledContract is the amount of gas that will be charged
// for a precompiled contract execution.
GasChangeCallPrecompiledContract GasChangeReason = 12
// GasChangeCallStorageColdAccess is the amount of gas that will be charged for a cold storage access as controlled by EIP2929 rules.
// GasChangeCallStorageColdAccess is the amount of gas that will be charged
// for a cold storage access as controlled by EIP2929 rules.
GasChangeCallStorageColdAccess GasChangeReason = 13
// GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert.
// GasChangeCallFailedExecution is the burning of the remaining gas when the
// execution failed without a revert.
GasChangeCallFailedExecution GasChangeReason = 14
// GasChangeWitnessContractInit flags the event of adding to the witness during the contract creation initialization step.
// GasChangeWitnessContractInit flags the event of adding to the witness
// during the contract creation initialization step.
GasChangeWitnessContractInit GasChangeReason = 15
// GasChangeWitnessContractCreation flags the event of adding to the witness during the contract creation finalization step.
// GasChangeWitnessContractCreation flags the event of adding to the witness
// during the contract creation finalization step.
GasChangeWitnessContractCreation GasChangeReason = 16
// GasChangeWitnessCodeChunk flags the event of adding one or more contract code chunks to the witness.
// GasChangeWitnessCodeChunk flags the event of adding one or more contract
// code chunks to the witness.
GasChangeWitnessCodeChunk GasChangeReason = 17
// GasChangeWitnessContractCollisionCheck flags the event of adding to the witness when checking for contract address collision.
// GasChangeWitnessContractCollisionCheck flags the event of adding to the
// witness when checking for contract address collision.
GasChangeWitnessContractCollisionCheck GasChangeReason = 18
// GasChangeTxDataFloor is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the
// transaction data. This change will always be a negative change.
// GasChangeTxDataFloor is the amount of extra gas the transaction has to
// pay to reach the minimum gas requirement for the transaction data.
// This change will always be a negative change.
GasChangeTxDataFloor GasChangeReason = 19
// GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as
// it will be "manually" tracked by a direct emit of the gas change event.
// GasChangeIgnored is a special value that can be used to indicate that
// the gas change should be ignored as it will be "manually" tracked by
// a direct emit of the gas change event.
GasChangeIgnored GasChangeReason = 0xFF
)
@ -426,11 +500,12 @@ const (
// NonceChangeNewContract is the nonce change of a newly created contract.
NonceChangeNewContract NonceChangeReason = 4
// NonceChangeTransaction is the nonce change due to a EIP-7702 authorization.
// NonceChangeAuthorization is the nonce change due to a EIP-7702 authorization.
NonceChangeAuthorization NonceChangeReason = 5
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// NonceChangeRevert is emitted when the nonce is reverted back to a previous
// value due to call failure. It is only emitted when the tracer has opted in
// to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
// NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct
@ -445,22 +520,26 @@ type CodeChangeReason byte
const (
CodeChangeUnspecified CodeChangeReason = 0
// CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations.
// CodeChangeContractCreation is when a new contract is deployed via
// CREATE/CREATE2 operations.
CodeChangeContractCreation CodeChangeReason = 1
// CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup.
// CodeChangeGenesis is when contract code is set during blockchain genesis
// or initial setup.
CodeChangeGenesis CodeChangeReason = 2
// CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization.
CodeChangeAuthorization CodeChangeReason = 3
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address.
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by
// setting to zero address.
CodeChangeAuthorizationClear CodeChangeReason = 4
// CodeChangeSelfDestruct is when contract code is cleared due to self-destruct.
CodeChangeSelfDestruct CodeChangeReason = 5
// CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// CodeChangeRevert is emitted when the code is reverted back to a previous
// value due to call failure. It is only emitted when the tracer has opted
// in to use the journaling wrapper (WrapWithJournal).
CodeChangeRevert CodeChangeReason = 6
)

View file

@ -42,7 +42,9 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
return nil, errors.New("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil &&
hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
// TODO(sina) hooks.OnLog should also be handled here
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
@ -56,11 +58,14 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
wrapped := *hooks
// Create journal
j := &journal{hooks: hooks}
j := &journal{
hooks: hooks,
}
// Scope hooks need to be re-implemented.
wrapped.OnTxEnd = j.OnTxEnd
wrapped.OnEnter = j.OnEnter
wrapped.OnExit = j.OnExit
// Wrap state change hooks.
if hooks.OnBalanceChange != nil {
wrapped.OnBalanceChange = j.OnBalanceChange
@ -69,6 +74,7 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
// Regardless of which hook version is used in the tracer,
// the journal will want to capture the nonce change reason.
wrapped.OnNonceChangeV2 = j.OnNonceChangeV2
// A precaution to ensure EVM doesn't call both hooks.
wrapped.OnNonceChange = nil
}
@ -81,7 +87,6 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks.OnStorageChange != nil {
wrapped.OnStorageChange = j.OnStorageChange
}
return &wrapped, nil
}
@ -148,7 +153,11 @@ func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, re
}
func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) {
j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new})
j.entries = append(j.entries, balanceChange{
addr: addr,
prev: prev,
new: new,
})
if j.hooks.OnBalanceChange != nil {
j.hooks.OnBalanceChange(addr, prev, new, reason)
}
@ -158,7 +167,11 @@ func (j *journal) OnNonceChangeV2(addr common.Address, prev, new uint64, reason
// When a contract is created, the nonce of the creator is incremented.
// This change is not reverted when the creation fails.
if reason != NonceChangeContractCreator {
j.entries = append(j.entries, nonceChange{addr: addr, prev: prev, new: new})
j.entries = append(j.entries, nonceChange{
addr: addr,
prev: prev,
new: new,
})
}
if j.hooks.OnNonceChangeV2 != nil {
j.hooks.OnNonceChangeV2(addr, prev, new, reason)
@ -194,7 +207,12 @@ func (j *journal) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash,
}
func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) {
j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new})
j.entries = append(j.entries, storageChange{
addr: addr,
slot: slot,
prev: prev,
new: new,
})
if j.hooks.OnStorageChange != nil {
j.hooks.OnStorageChange(addr, slot, prev, new)
}

View file

@ -63,7 +63,7 @@ func (t *testTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Has
}
func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new)
t.t.Logf("OnStorageChange(%v, %v, %v -> %v)", addr, slot, prev, new)
if t.storage == nil {
t.storage = make(map[common.Hash]common.Hash)
}
@ -76,7 +76,12 @@ func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev
func TestJournalIntegration(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange, OnCodeChange: tr.OnCodeChange, OnStorageChange: tr.OnStorageChange})
wr, err := WrapWithJournal(&Hooks{
OnBalanceChange: tr.OnBalanceChange,
OnNonceChange: tr.OnNonceChange,
OnCodeChange: tr.OnCodeChange,
OnStorageChange: tr.OnStorageChange,
})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}

View file

@ -32,7 +32,7 @@ type Validator interface {
ValidateBody(block *types.Block) error
// ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
ValidateState(block *types.Block, state state.BlockStateTransition, res *ProcessResult, stateless bool) error
}
// Prefetcher is an interface for pre-caching transaction signatures and state.
@ -57,4 +57,5 @@ type ProcessResult struct {
Requests [][]byte
Logs []*types.Log
GasUsed uint64
Error error
}

View file

@ -18,143 +18,507 @@ package bal
import (
"bytes"
"maps"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
"log/slog"
"maps"
"slices"
)
// idxAccessListBuilder is responsible for producing the state accesses and
// reads recorded within the scope of a single index in the access list.
type idxAccessListBuilder struct {
// stores the previous values of any account data that was modified in the
// current index.
prestates map[common.Address]*accountIdxPrestate
// a stack which maintains a set of state mutations/reads for each EVM
// execution frame. Entering a frame appends an intermediate access list
// and terminating a frame merges the accesses/modifications into the
// intermediate access list of the calling frame.
accessesStack []map[common.Address]*constructionAccountAccess
// context logger for instrumenting debug logging
logger *slog.Logger
}
func newAccessListBuilder(logger *slog.Logger) *idxAccessListBuilder {
return &idxAccessListBuilder{
make(map[common.Address]*accountIdxPrestate),
[]map[common.Address]*constructionAccountAccess{
make(map[common.Address]*constructionAccountAccess),
},
logger,
}
}
func (c *idxAccessListBuilder) storageRead(address common.Address, key common.Hash) {
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.StorageRead(key)
}
func (c *idxAccessListBuilder) accountRead(address common.Address) {
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
}
func (c *idxAccessListBuilder) storageWrite(address common.Address, key, prevVal, newVal common.Hash) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &accountIdxPrestate{}
}
if c.prestates[address].storage == nil {
c.prestates[address].storage = make(map[common.Hash]common.Hash)
}
if _, ok := c.prestates[address].storage[key]; !ok {
c.prestates[address].storage[key] = prevVal
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.StorageWrite(key, prevVal, newVal)
}
func (c *idxAccessListBuilder) balanceChange(address common.Address, prev, cur *uint256.Int) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &accountIdxPrestate{}
}
if c.prestates[address].balance == nil {
c.prestates[address].balance = prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.BalanceChange(cur)
}
func (c *idxAccessListBuilder) codeChange(address common.Address, prev, cur []byte) {
// auth unset and selfdestruct pass code change as 'nil'
// however, internally in the access list accumulation of state changes,
// a nil field on an account means that it was never modified in the block.
if cur == nil {
cur = []byte{}
}
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &accountIdxPrestate{}
}
if c.prestates[address].code == nil {
if prev == nil {
prev = []byte{}
}
c.prestates[address].code = prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.CodeChange(cur)
}
// selfDestruct is invoked when an account which has been created and invoked
// SENDALL in the same transaction is removed as part of transaction finalization.
//
// Any storage accesses/modifications performed at the contract during execution
// of the current call are retained in the block access list as state reads.
func (c *idxAccessListBuilder) selfDestruct(address common.Address) {
access := c.accessesStack[len(c.accessesStack)-1][address]
if len(access.storageMutations) != 0 && access.storageReads == nil {
access.storageReads = make(map[common.Hash]struct{})
}
for key, _ := range access.storageMutations {
access.storageReads[key] = struct{}{}
}
access.storageMutations = nil
}
func (c *idxAccessListBuilder) nonceChange(address common.Address, prev, cur uint64) {
if _, ok := c.prestates[address]; !ok {
c.prestates[address] = &accountIdxPrestate{}
}
if c.prestates[address].nonce == nil {
c.prestates[address].nonce = &prev
}
if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
}
acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
acctAccesses.NonceChange(cur)
}
// enterScope is called after a new EVM call frame has been entered.
func (c *idxAccessListBuilder) enterScope() {
c.accessesStack = append(c.accessesStack, make(map[common.Address]*constructionAccountAccess))
}
// exitScope is called after an EVM call scope terminates. If the call scope
// terminates with an error:
// * the scope's state accesses are added to the calling scope's access list
// * mutated accounts/storage are added into the calling scope's access list as state accesses
func (c *idxAccessListBuilder) exitScope(evmErr bool) {
childAccessList := c.accessesStack[len(c.accessesStack)-1]
parentAccessList := c.accessesStack[len(c.accessesStack)-2]
for addr, childAccess := range childAccessList {
if _, ok := parentAccessList[addr]; ok {
} else {
parentAccessList[addr] = &constructionAccountAccess{}
}
if evmErr {
// all storage writes in the child scope are converted into reads
// if there were no storage writes, the account is reported in the BAL as a read (if it wasn't already in the BAL and/or mutated previously)
parentAccessList[addr].MergeReads(childAccess)
} else {
parentAccessList[addr].Merge(childAccess)
}
}
c.accessesStack = c.accessesStack[:len(c.accessesStack)-1]
}
// finalise returns the net state mutations at the access list index as well as
// state which was accessed. The idxAccessListBuilder instance should be discarded
// after calling finalise.
func (a *idxAccessListBuilder) finalise() (*StateDiff, StateAccesses) {
diff := &StateDiff{make(map[common.Address]*AccountMutations)}
stateAccesses := make(StateAccesses)
for addr, access := range a.accessesStack[0] {
// remove any reported mutations from the access list with no net difference vs the index prestate value
if access.nonce != nil && *a.prestates[addr].nonce == *access.nonce {
access.nonce = nil
}
if access.balance != nil && a.prestates[addr].balance.Eq(access.balance) {
access.balance = nil
}
if access.code != nil && bytes.Equal(access.code, a.prestates[addr].code) {
access.code = nil
}
if access.storageMutations != nil {
for key, val := range access.storageMutations {
if a.prestates[addr].storage[key] == val {
delete(access.storageMutations, key)
access.storageReads[key] = struct{}{}
}
}
if len(access.storageMutations) == 0 {
access.storageMutations = nil
}
}
// if the account has no net mutations against the index prestate, only include
// it in the state read set
if len(access.code) == 0 && access.nonce == nil && access.balance == nil && len(access.storageMutations) == 0 {
stateAccesses[addr] = make(map[common.Hash]struct{})
if access.storageReads != nil {
stateAccesses[addr] = access.storageReads
}
continue
}
stateAccesses[addr] = access.storageReads
diff.Mutations[addr] = &AccountMutations{
Balance: access.balance,
Nonce: access.nonce,
Code: access.code,
StorageWrites: access.storageMutations,
}
}
return diff, stateAccesses
}
func (c *AccessListBuilder) EnterTx(txHash common.Hash) {
c.idxBuilder = newAccessListBuilder(slog.New(slog.DiscardHandler))
}
// FinaliseIdxChanges records all pending state mutations/accesses in the
// access list at the given index. The set of pending state mutations/accesse are
// then emptied.
func (c *AccessListBuilder) FinaliseIdxChanges(idx uint16) {
pendingDiff, pendingAccesses := c.idxBuilder.finalise()
c.idxBuilder = newAccessListBuilder(slog.New(slog.DiscardHandler))
for addr, pendingAcctDiff := range pendingDiff.Mutations {
finalizedAcctChanges, ok := c.FinalizedAccesses[addr]
if !ok {
finalizedAcctChanges = &ConstructionAccountAccesses{}
c.FinalizedAccesses[addr] = finalizedAcctChanges
}
if pendingAcctDiff.Nonce != nil {
if finalizedAcctChanges.NonceChanges == nil {
finalizedAcctChanges.NonceChanges = make(map[uint16]uint64)
}
finalizedAcctChanges.NonceChanges[idx] = *pendingAcctDiff.Nonce
}
if pendingAcctDiff.Balance != nil {
if finalizedAcctChanges.BalanceChanges == nil {
finalizedAcctChanges.BalanceChanges = make(map[uint16]*uint256.Int)
}
finalizedAcctChanges.BalanceChanges[idx] = pendingAcctDiff.Balance
}
if pendingAcctDiff.Code != nil {
if finalizedAcctChanges.CodeChanges == nil {
finalizedAcctChanges.CodeChanges = make(map[uint16]CodeChange)
}
finalizedAcctChanges.CodeChanges[idx] = CodeChange{idx, pendingAcctDiff.Code}
}
if pendingAcctDiff.StorageWrites != nil {
if finalizedAcctChanges.StorageWrites == nil {
finalizedAcctChanges.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
}
for key, val := range pendingAcctDiff.StorageWrites {
if _, ok := finalizedAcctChanges.StorageWrites[key]; !ok {
finalizedAcctChanges.StorageWrites[key] = make(map[uint16]common.Hash)
}
finalizedAcctChanges.StorageWrites[key][idx] = val
// if any of the newly-written storage slots were previously
// accessed, they must be removed from the accessed state set.
// TODO: commenting this 'if' results in no test failures.
// double-check that this edge-case was fixed by a future
// release of the eest BAL tests.
if _, ok := finalizedAcctChanges.StorageReads[key]; ok {
delete(finalizedAcctChanges.StorageReads, key)
}
}
}
}
// record pending accesses in the BAL access set unless they were
// already written in a previous index
for addr, pendingAccountAccesses := range pendingAccesses {
finalizedAcctAccesses, ok := c.FinalizedAccesses[addr]
if !ok {
finalizedAcctAccesses = &ConstructionAccountAccesses{}
c.FinalizedAccesses[addr] = finalizedAcctAccesses
}
for key := range pendingAccountAccesses {
if _, ok := finalizedAcctAccesses.StorageWrites[key]; ok {
continue
}
if finalizedAcctAccesses.StorageReads == nil {
finalizedAcctAccesses.StorageReads = make(map[common.Hash]struct{})
}
finalizedAcctAccesses.StorageReads[key] = struct{}{}
}
}
c.lastFinalizedMutations = pendingDiff
c.lastFinalizedAccesses = pendingAccesses
}
func (c *AccessListBuilder) StorageRead(address common.Address, key common.Hash) {
c.idxBuilder.storageRead(address, key)
}
func (c *AccessListBuilder) AccountRead(address common.Address) {
c.idxBuilder.accountRead(address)
}
func (c *AccessListBuilder) StorageWrite(address common.Address, key, prevVal, newVal common.Hash) {
c.idxBuilder.storageWrite(address, key, prevVal, newVal)
}
func (c *AccessListBuilder) BalanceChange(address common.Address, prev, cur *uint256.Int) {
c.idxBuilder.balanceChange(address, prev, cur)
}
func (c *AccessListBuilder) NonceChange(address common.Address, prev, cur uint64) {
c.idxBuilder.nonceChange(address, prev, cur)
}
func (c *AccessListBuilder) CodeChange(address common.Address, prev, cur []byte) {
c.idxBuilder.codeChange(address, prev, cur)
}
func (c *AccessListBuilder) SelfDestruct(address common.Address) {
c.idxBuilder.selfDestruct(address)
}
func (c *AccessListBuilder) EnterScope() {
c.idxBuilder.enterScope()
}
func (c *AccessListBuilder) ExitScope(executionErr bool) {
c.idxBuilder.exitScope(executionErr)
}
// CodeChange contains the runtime bytecode deployed at an address and the
// transaction index where the deployment took place.
type CodeChange struct {
TxIndex uint16
Code []byte `json:"code,omitempty"`
TxIdx uint16
Code []byte `json:"code,omitempty"`
}
// ConstructionAccountAccess contains post-block account state for mutations as well as
// ConstructionAccountAccesses contains post-block account state for mutations as well as
// all storage keys that were read during execution. It is used when building block
// access list during execution.
type ConstructionAccountAccess struct {
type ConstructionAccountAccesses struct {
// StorageWrites is the post-state values of an account's storage slots
// that were modified in a block, keyed by the slot key and the tx index
// where the modification occurred.
StorageWrites map[common.Hash]map[uint16]common.Hash `json:"storageWrites,omitempty"`
StorageWrites map[common.Hash]map[uint16]common.Hash
// StorageReads is the set of slot keys that were accessed during block
// execution.
//
// Storage slots which are both read and written (with changed values)
// storage slots which are both read and written (with changed values)
// appear only in StorageWrites.
StorageReads map[common.Hash]struct{} `json:"storageReads,omitempty"`
StorageReads map[common.Hash]struct{}
// BalanceChanges contains the post-transaction balances of an account,
// keyed by transaction indices where it was changed.
BalanceChanges map[uint16]*uint256.Int `json:"balanceChanges,omitempty"`
BalanceChanges map[uint16]*uint256.Int
// NonceChanges contains the post-state nonce values of an account keyed
// by tx index.
NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"`
NonceChanges map[uint16]uint64
// CodeChange is only set for contract accounts which were deployed in
// the block.
CodeChange *CodeChange `json:"codeChange,omitempty"`
CodeChanges map[uint16]CodeChange
}
// NewConstructionAccountAccess initializes the account access object.
func NewConstructionAccountAccess() *ConstructionAccountAccess {
return &ConstructionAccountAccess{
StorageWrites: make(map[common.Hash]map[uint16]common.Hash),
StorageReads: make(map[common.Hash]struct{}),
BalanceChanges: make(map[uint16]*uint256.Int),
NonceChanges: make(map[uint16]uint64),
// constructionAccountAccess contains fields for an account which were modified
// during execution of the current access list index.
// It also accumulates a set of storage slots which were accessed but not
// modified.
type constructionAccountAccess struct {
code []byte
nonce *uint64
balance *uint256.Int
storageMutations map[common.Hash]common.Hash
storageReads map[common.Hash]struct{}
}
// Merge adds the accesses/mutations from other into the calling instance. If
func (c *constructionAccountAccess) Merge(other *constructionAccountAccess) {
if other.code != nil {
c.code = other.code
}
if other.nonce != nil {
c.nonce = other.nonce
}
if other.balance != nil {
c.balance = other.balance
}
if other.storageMutations != nil {
if c.storageMutations == nil {
c.storageMutations = make(map[common.Hash]common.Hash)
}
for key, val := range other.storageMutations {
c.storageMutations[key] = val
delete(c.storageReads, key)
}
}
if other.storageReads != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
// TODO: if the state was mutated in the caller, don't add it to the caller's reads.
// need to have a test case for this, verify it fails in the current state, and then fix this bug.
for key, val := range other.storageReads {
c.storageReads[key] = val
}
}
}
// ConstructionBlockAccessList contains post-block modified state and some state accessed
// in execution (account addresses and storage keys).
type ConstructionBlockAccessList struct {
Accounts map[common.Address]*ConstructionAccountAccess
}
// NewConstructionBlockAccessList instantiates an empty access list.
func NewConstructionBlockAccessList() ConstructionBlockAccessList {
return ConstructionBlockAccessList{
Accounts: make(map[common.Address]*ConstructionAccountAccess),
// MergeReads merges accesses from a reverted execution from:
// * any reads/writes from the reverted frame which weren't mutated
// in the current frame, are merged into the current frame as reads.
func (c *constructionAccountAccess) MergeReads(other *constructionAccountAccess) {
if other.storageMutations != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
for key, _ := range other.storageMutations {
if _, ok := c.storageMutations[key]; ok {
continue
}
c.storageReads[key] = struct{}{}
}
}
if other.storageReads != nil {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
for key := range other.storageReads {
if _, ok := c.storageMutations[key]; ok {
continue
}
c.storageReads[key] = struct{}{}
}
}
}
// AccountRead records the address of an account that has been read during execution.
func (b *ConstructionBlockAccessList) AccountRead(addr common.Address) {
if _, ok := b.Accounts[addr]; !ok {
b.Accounts[addr] = NewConstructionAccountAccess()
func (c *constructionAccountAccess) StorageRead(key common.Hash) {
if c.storageReads == nil {
c.storageReads = make(map[common.Hash]struct{})
}
if _, ok := c.storageMutations[key]; !ok {
c.storageReads[key] = struct{}{}
}
}
// StorageRead records a storage key read during execution.
func (b *ConstructionBlockAccessList) StorageRead(address common.Address, key common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
func (c *constructionAccountAccess) StorageWrite(key, prevVal, newVal common.Hash) {
if c.storageMutations == nil {
c.storageMutations = make(map[common.Hash]common.Hash)
}
if _, ok := b.Accounts[address].StorageWrites[key]; ok {
return
}
b.Accounts[address].StorageReads[key] = struct{}{}
c.storageMutations[key] = newVal
// a key can be first read and later written, but it must only show up
// in either read or write sets, not both.
//
// the caller should not
// call StorageRead on a slot that was already written
delete(c.storageReads, key)
}
// StorageWrite records the post-transaction value of a mutated storage slot.
// The storage slot is removed from the list of read slots.
func (b *ConstructionBlockAccessList) StorageWrite(txIdx uint16, address common.Address, key, value common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; !ok {
b.Accounts[address].StorageWrites[key] = make(map[uint16]common.Hash)
}
b.Accounts[address].StorageWrites[key][txIdx] = value
delete(b.Accounts[address].StorageReads, key)
func (c *constructionAccountAccess) BalanceChange(cur *uint256.Int) {
c.balance = cur
}
// CodeChange records the code of a newly-created contract.
func (b *ConstructionBlockAccessList) CodeChange(address common.Address, txIndex uint16, code []byte) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].CodeChange = &CodeChange{
TxIndex: txIndex,
Code: bytes.Clone(code),
}
func (c *constructionAccountAccess) CodeChange(cur []byte) {
c.code = cur
}
// NonceChange records tx post-state nonce of any contract-like accounts whose
// nonce was incremented.
func (b *ConstructionBlockAccessList) NonceChange(address common.Address, txIdx uint16, postNonce uint64) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].NonceChanges[txIdx] = postNonce
func (c *constructionAccountAccess) NonceChange(cur uint64) {
c.nonce = &cur
}
// BalanceChange records the post-transaction balance of an account whose
// balance changed.
func (b *ConstructionBlockAccessList) BalanceChange(txIdx uint16, address common.Address, balance *uint256.Int) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].BalanceChanges[txIdx] = balance.Clone()
type ConstructionBlockAccessList map[common.Address]*ConstructionAccountAccesses
// AccessListBuilder is used to build an EIP-7928 block access list
type AccessListBuilder struct {
FinalizedAccesses ConstructionBlockAccessList
idxBuilder *idxAccessListBuilder
lastFinalizedMutations *StateDiff
lastFinalizedAccesses StateAccesses
logger *slog.Logger
}
// PrettyPrint returns a human-readable representation of the access list
func (b *ConstructionBlockAccessList) PrettyPrint() string {
enc := b.toEncodingObj()
return enc.PrettyPrint()
// NewAccessListBuilder instantiates an empty access list.
func NewAccessListBuilder() *AccessListBuilder {
logger := slog.New(slog.DiscardHandler)
return &AccessListBuilder{
make(map[common.Address]*ConstructionAccountAccesses),
newAccessListBuilder(logger),
nil,
nil,
logger,
}
}
// Copy returns a deep copy of the access list.
func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
res := NewConstructionBlockAccessList()
for addr, aa := range b.Accounts {
var aaCopy ConstructionAccountAccess
func (c *AccessListBuilder) Copy() *AccessListBuilder {
res := NewAccessListBuilder()
for addr, aa := range c.FinalizedAccesses {
var aaCopy ConstructionAccountAccesses
slotWrites := make(map[common.Hash]map[uint16]common.Hash, len(aa.StorageWrites))
for key, m := range aa.StorageWrites {
@ -170,13 +534,235 @@ func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
aaCopy.BalanceChanges = balances
aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
if aa.CodeChange != nil {
aaCopy.CodeChange = &CodeChange{
TxIndex: aa.CodeChange.TxIndex,
Code: bytes.Clone(aa.CodeChange.Code),
codeChangesCopy := make(map[uint16]CodeChange)
for idx, codeChange := range aa.CodeChanges {
codeChangesCopy[idx] = CodeChange{
TxIdx: idx,
Code: bytes.Clone(codeChange.Code),
}
}
res.Accounts[addr] = &aaCopy
res.FinalizedAccesses[addr] = &aaCopy
}
return &res
return res
}
// FinalizedIdxChanges returns the state mutations and accesses recorded in the latest
// access list index that was finalized.
func (c *AccessListBuilder) FinalizedIdxChanges() (*StateDiff, StateAccesses) {
return c.lastFinalizedMutations, c.lastFinalizedAccesses
}
// StateDiff contains state mutations occuring over one or more access list
// index.
type StateDiff struct {
Mutations map[common.Address]*AccountMutations `json:"Mutations,omitempty"`
}
// StateAccesses contains a set of accounts/storage that were accessed during the
// execution of one or more access list indices.
type StateAccesses map[common.Address]map[common.Hash]struct{}
// Merge combines adds the accesses from other into s.
func (s *StateAccesses) Merge(other StateAccesses) {
for addr, accesses := range other {
if _, ok := (*s)[addr]; !ok {
(*s)[addr] = make(map[common.Hash]struct{})
}
for slot := range accesses {
(*s)[addr][slot] = struct{}{}
}
}
}
// accountIdxPrestate records the account prestate at a access list index
// for components which were modified at that index.
type accountIdxPrestate struct {
balance *uint256.Int
nonce *uint64
code ContractCode
storage map[common.Hash]common.Hash
}
// AccountMutations contains mutations that were made to an account across
// one or more access list indices.
type AccountMutations struct {
Balance *uint256.Int `json:"Balance,omitempty"`
Nonce *uint64 `json:"Nonce,omitempty"`
Code ContractCode `json:"Code,omitempty"`
StorageWrites map[common.Hash]common.Hash `json:"StorageWrites,omitempty"`
}
// String returns a human-readable JSON representation of the account mutations.
func (a *AccountMutations) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(a)
return res.String()
}
func (a *AccountMutations) LogDiff(addr common.Address, other *AccountMutations) {
var diff []interface{}
if a.Balance != nil || other.Balance != nil {
if a.Balance == nil || other.Balance == nil || !a.Balance.Eq(other.Balance) {
diff = append(diff, "local balance", a.Balance, "remote balance", other.Balance)
}
}
if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
diff = append(diff, "local code", a.Code, "remote code", other.Code)
}
if a.Nonce != nil || other.Nonce != nil {
if a.Nonce == nil || other.Nonce == nil || *a.Nonce != *other.Nonce {
diff = append(diff, "local nonce", a.Nonce, "remote nonce", other.Nonce)
}
}
if a.StorageWrites != nil || other.StorageWrites != nil {
if !maps.Equal(a.StorageWrites, other.StorageWrites) {
union := make(map[common.Hash]struct{})
for slot, _ := range a.StorageWrites {
union[slot] = struct{}{}
}
for slot, _ := range other.StorageWrites {
union[slot] = struct{}{}
}
orderedKeys := slices.SortedFunc(maps.Keys(union), func(hash common.Hash, hash2 common.Hash) int {
return bytes.Compare(hash[:], hash2[:])
})
for _, key := range orderedKeys {
aVal, inA := a.StorageWrites[key]
otherVal, inOther := other.StorageWrites[key]
if (inA && !inOther) || (!inA && inOther) || !bytes.Equal(aVal[:], otherVal[:]) {
diff = append(diff, fmt.Sprintf("storage-local-%x", key), aVal)
diff = append(diff, fmt.Sprintf("storage-remote-%x", key), otherVal)
}
}
}
}
if len(diff) > 0 {
log.Error(fmt.Sprintf("diff between remote/local BAL for address %x", addr), diff...)
}
}
// Copy returns a deep-copy of the instance.
func (a *AccountMutations) Copy() *AccountMutations {
res := &AccountMutations{
nil,
nil,
nil,
nil,
}
if a.Nonce != nil {
res.Nonce = new(uint64)
*res.Nonce = *a.Nonce
}
if a.Code != nil {
res.Code = bytes.Clone(a.Code)
}
if a.Balance != nil {
res.Balance = new(uint256.Int).Set(a.Balance)
}
if a.StorageWrites != nil {
res.StorageWrites = maps.Clone(a.StorageWrites)
}
return res
}
// String returns the state diff as a formatted JSON string.
func (s *StateDiff) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(s)
return res.String()
}
// Merge merges the state changes present in next into the caller. After,
// the state of the caller is the aggregate diff through next.
func (s *StateDiff) Merge(next *StateDiff) {
for account, diff := range next.Mutations {
if mut, ok := s.Mutations[account]; ok {
if diff.Balance != nil {
mut.Balance = diff.Balance
}
if diff.Code != nil {
mut.Code = diff.Code
}
if diff.Nonce != nil {
mut.Nonce = diff.Nonce
}
if len(diff.StorageWrites) > 0 {
if mut.StorageWrites == nil {
mut.StorageWrites = maps.Clone(diff.StorageWrites)
} else {
for key, val := range diff.StorageWrites {
mut.StorageWrites[key] = val
}
}
}
} else {
s.Mutations[account] = diff.Copy()
}
}
}
// Copy returns a deep copy of the StateDiff
func (s *StateDiff) Copy() *StateDiff {
res := &StateDiff{make(map[common.Address]*AccountMutations)}
for addr, accountDiff := range s.Mutations {
cpy := accountDiff.Copy()
res.Mutations[addr] = cpy
}
return res
}
// Copy returns a deep copy of the access list
func (e BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e {
res = append(res, accountAccess.Copy())
}
return
}
// Eq returns whether the calling instance is equal to the provided one.
func (a *AccountMutations) Eq(other *AccountMutations) bool {
if a.Balance != nil || other.Balance != nil {
if a.Balance == nil || other.Balance == nil {
return false
}
if !a.Balance.Eq(other.Balance) {
return false
}
}
if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
return false
}
if a.Nonce != nil || other.Nonce != nil {
if a.Nonce == nil || other.Nonce == nil {
return false
}
if *a.Nonce != *other.Nonce {
return false
}
}
if a.StorageWrites != nil || other.StorageWrites != nil {
if a.StorageWrites == nil || other.StorageWrites == nil {
return false
}
if !maps.Equal(a.StorageWrites, other.StorageWrites) {
return false
}
}
return true
}

File diff suppressed because one or more lines are too long

View file

@ -19,6 +19,8 @@ package bal
import (
"bytes"
"cmp"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@ -33,27 +35,86 @@ import (
"github.com/holiman/uint256"
)
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type BlockAccessList -decoder
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type AccountAccess -decoder
// These are objects used as input for the access list encoding. They mirror
// the spec format.
// BlockAccessList is the encoding format of ConstructionBlockAccessList.
type BlockAccessList struct {
Accesses []AccountAccess `ssz-max:"300000"`
// BlockAccessList is the encoding format of AccessListBuilder.
type BlockAccessList []AccountAccess
func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
l := w.List()
for _, access := range e {
access.EncodeRLP(w)
}
w.ListEnd(l)
return w.Flush()
}
func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
if _, err := dec.List(); err != nil {
return err
}
*e = (*e)[:0]
for dec.MoreDataInList() {
var access AccountAccess
if err := access.DecodeRLP(dec); err != nil {
return err
}
*e = append(*e, access)
}
dec.ListEnd()
return nil
}
func (e *BlockAccessList) JSONString() string {
res, _ := json.MarshalIndent(e.StringableRepresentation(), "", " ")
return string(res)
}
// StringableRepresentation returns an instance of the block access list
// which can be converted to a human-readable JSON representation.
func (e *BlockAccessList) StringableRepresentation() interface{} {
res := []AccountAccess{}
for _, aa := range *e {
res = append(res, aa)
}
return &res
}
func (e *BlockAccessList) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
// TODO: check error
enc.Encode(e)
return res.String()
}
// TODO: check that no fields are nil in Validate (unless it's valid for them to be nil)
// Validate returns an error if the contents of the access list are not ordered
// according to the spec or any code changes are contained which exceed protocol
// max code size.
func (e *BlockAccessList) Validate() error {
if !slices.IsSortedFunc(e.Accesses, func(a, b AccountAccess) int {
func (e BlockAccessList) Validate(blockTxCount int) error {
if !slices.IsSortedFunc(e, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
}) {
return errors.New("block access list accounts not in lexicographic order")
}
for _, entry := range e.Accesses {
if err := entry.validate(); err != nil {
// check that the accounts are unique
addrs := make(map[common.Address]struct{})
for _, acct := range e {
addr := acct.Address
if _, ok := addrs[addr]; ok {
return fmt.Errorf("duplicate account in block access list: %x", addr)
}
addrs[addr] = struct{}{}
}
for _, entry := range e {
if err := entry.validate(blockTxCount); err != nil {
return err
}
}
@ -70,106 +131,220 @@ func (e *BlockAccessList) Hash() common.Hash {
// under reasonable conditions.
panic(err)
}
/*
bal, err := json.MarshalIndent(e.StringableRepresentation(), "", " ")
if err != nil {
panic(err)
}
*/
return crypto.Keccak256Hash(enc.Bytes())
}
// encodeBalance encodes the provided balance into 16-bytes.
func encodeBalance(val *uint256.Int) [16]byte {
valBytes := val.Bytes()
if len(valBytes) > 16 {
panic("can't encode value that is greater than 16 bytes in size")
}
var enc [16]byte
copy(enc[16-len(valBytes):], valBytes[:])
return enc
}
// encodingBalanceChange is the encoding format of BalanceChange.
type encodingBalanceChange struct {
TxIdx uint16 `ssz-size:"2"`
Balance [16]byte `ssz-size:"16"`
TxIdx uint16 `json:"txIndex"`
Balance *uint256.Int `json:"balance"`
}
// encodingAccountNonce is the encoding format of NonceChange.
type encodingAccountNonce struct {
TxIdx uint16 `ssz-size:"2"`
Nonce uint64 `ssz-size:"8"`
TxIdx uint16 `json:"txIndex"`
Nonce uint64 `json:"nonce"`
}
// encodingStorageWrite is the encoding format of StorageWrites.
type encodingStorageWrite struct {
TxIdx uint16
ValueAfter [32]byte `ssz-size:"32"`
TxIdx uint16 `json:"txIndex"`
ValueAfter *EncodedStorage `json:"valueAfter"`
}
// EncodedStorage can represent either a storage key or value
type EncodedStorage struct {
inner *uint256.Int
}
var _ rlp.Encoder = &EncodedStorage{}
var _ rlp.Decoder = &EncodedStorage{}
func (e *EncodedStorage) ToHash() common.Hash {
if e == nil {
return common.Hash{}
}
return e.inner.Bytes32()
}
func newEncodedStorageFromHash(hash common.Hash) *EncodedStorage {
return &EncodedStorage{
new(uint256.Int).SetBytes(hash[:]),
}
}
func (s *EncodedStorage) UnmarshalJSON(b []byte) error {
var str string
if err := json.Unmarshal(b, &str); err != nil {
return err
}
str = strings.TrimLeft(str, "0x")
if len(str) == 0 {
return nil
}
if len(str)%2 == 1 {
str = "0" + str
}
val, err := hex.DecodeString(str)
if err != nil {
return err
}
if len(val) > 32 {
return fmt.Errorf("storage key/value cannot be greater than 32 bytes")
}
// TODO: check is s == nil ?? should be programmer error
*s = EncodedStorage{
inner: new(uint256.Int).SetBytes(val),
}
return nil
}
func (s EncodedStorage) MarshalJSON() ([]byte, error) {
return json.Marshal(s.inner.Hex())
}
func (s *EncodedStorage) EncodeRLP(_w io.Writer) error {
return s.inner.EncodeRLP(_w)
}
func (s *EncodedStorage) DecodeRLP(dec *rlp.Stream) error {
if s == nil {
*s = EncodedStorage{}
}
s.inner = uint256.NewInt(0)
return dec.ReadUint256(s.inner)
}
// encodingStorageWrite is the encoding format of SlotWrites.
type encodingSlotWrites struct {
Slot [32]byte `ssz-size:"32"`
Accesses []encodingStorageWrite `ssz-max:"300000"`
Slot *EncodedStorage `json:"slot"`
Accesses []encodingStorageWrite `json:"accesses"`
}
// validate returns an instance of the encoding-representation slot writes in
// working representation.
func (e *encodingSlotWrites) validate() error {
if slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
func (e *encodingSlotWrites) validate(blockTxCount int) error {
if !slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return nil
return errors.New("storage write tx indices not in order")
}
return errors.New("storage write tx indices not in order")
// TODO: add test that covers there are actually storage modifications here
// if there aren't, it should be a bad block
if len(e.Accesses) == 0 {
return fmt.Errorf("empty storage writes")
} else if int(e.Accesses[len(e.Accesses)-1].TxIdx) >= blockTxCount+2 {
return fmt.Errorf("storage access reported index higher than allowed")
}
return nil
}
// AccountAccess is the encoding format of ConstructionAccountAccess.
// AccountAccess is the encoding format of ConstructionAccountAccesses.
type AccountAccess struct {
Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address
StorageWrites []encodingSlotWrites `ssz-max:"300000"` // Storage changes (slot -> [tx_index -> new_value])
StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce])
Code []CodeChange `ssz-max:"1"` // Code changes ([tx_index -> new_code])
Address common.Address `json:"address,omitempty"` // 20-byte Ethereum address
StorageChanges []encodingSlotWrites `json:"storageChanges,omitempty"` // EncodedStorage changes (slot -> [tx_index -> new_value])
StorageReads []*EncodedStorage `json:"storageReads,omitempty"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `json:"balanceChanges,omitempty"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `json:"nonceChanges,omitempty"` // Nonce changes ([tx_index -> new_nonce])
CodeChanges []CodeChange `json:"code,omitempty"` // CodeChanges changes ([tx_index -> new_code])
}
// validate converts the account accesses out of encoding format.
// If any of the keys in the encoding object are not ordered according to the
// spec, an error is returned.
func (e *AccountAccess) validate() error {
func (e *AccountAccess) validate(blockTxCount int) error {
// Check the storage write slots are sorted in order
if !slices.IsSortedFunc(e.StorageWrites, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:])
if !slices.IsSortedFunc(e.StorageChanges, func(a, b encodingSlotWrites) int {
aHash, bHash := a.Slot.ToHash(), b.Slot.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) {
return errors.New("storage writes slots not in lexicographic order")
}
for _, write := range e.StorageWrites {
if err := write.validate(); err != nil {
for _, write := range e.StorageChanges {
if err := write.validate(blockTxCount); err != nil {
return err
}
}
readKeys := make(map[common.Hash]struct{})
writeKeys := make(map[common.Hash]struct{})
for _, readKey := range e.StorageReads {
if _, ok := readKeys[readKey.ToHash()]; ok {
return errors.New("duplicate read key")
}
readKeys[readKey.ToHash()] = struct{}{}
}
for _, write := range e.StorageChanges {
writeKey := write.Slot
if _, ok := writeKeys[writeKey.ToHash()]; ok {
return errors.New("duplicate write key")
}
writeKeys[writeKey.ToHash()] = struct{}{}
}
for readKey := range readKeys {
if _, ok := writeKeys[readKey]; ok {
return errors.New("storage key reported in both read/write sets")
}
}
// Check the storage read slots are sorted in order
if !slices.IsSortedFunc(e.StorageReads, func(a, b [32]byte) int {
return bytes.Compare(a[:], b[:])
if !slices.IsSortedFunc(e.StorageReads, func(a, b *EncodedStorage) int {
aHash, bHash := a.ToHash(), b.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) {
return errors.New("storage read slots not in lexicographic order")
}
// Check the balance changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.BalanceChanges, func(a, b encodingBalanceChange) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return errors.New("balance changes not in ascending order by tx index")
}
if len(e.BalanceChanges) > 0 && int(e.BalanceChanges[len(e.BalanceChanges)-1].TxIdx) > blockTxCount+2 {
return errors.New("highest balance change index beyond what is allowed")
}
// Check the nonce changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.NonceChanges, func(a, b encodingAccountNonce) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return errors.New("nonce changes not in ascending order by tx index")
}
if len(e.CodeChanges) > 0 && int(e.NonceChanges[len(e.NonceChanges)-1].TxIdx) >= blockTxCount+2 {
return errors.New("highest nonce change index beyond what is allowed")
}
// Convert code change
if len(e.Code) == 1 {
if len(e.Code[0].Code) > params.MaxCodeSize {
return errors.New("code change contained oversized code")
// TODO: contact testing team to add a test case which has the code changes out of order,
// as it wasn't checked here previously
if !slices.IsSortedFunc(e.CodeChanges, func(a, b CodeChange) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return errors.New("code changes not in ascending order")
}
if len(e.CodeChanges) > 0 && int(e.CodeChanges[len(e.CodeChanges)-1].TxIdx) >= blockTxCount+2 {
return errors.New("highest code change index beyond what is allowed")
}
// validate that code changes could plausibly be correct (none exceed
// max code size of a contract)
for _, codeChange := range e.CodeChanges {
if len(codeChange.Code) > params.MaxCodeSize {
return fmt.Errorf("code change contained oversized code")
}
}
return nil
@ -183,40 +358,39 @@ func (e *AccountAccess) Copy() AccountAccess {
BalanceChanges: slices.Clone(e.BalanceChanges),
NonceChanges: slices.Clone(e.NonceChanges),
}
for _, storageWrite := range e.StorageWrites {
res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{
for _, storageWrite := range e.StorageChanges {
res.StorageChanges = append(res.StorageChanges, encodingSlotWrites{
Slot: storageWrite.Slot,
Accesses: slices.Clone(storageWrite.Accesses),
})
}
if len(e.Code) == 1 {
res.Code = []CodeChange{
{
e.Code[0].TxIndex,
bytes.Clone(e.Code[0].Code),
},
}
for _, codeChange := range e.CodeChanges {
res.CodeChanges = append(res.CodeChanges,
CodeChange{
codeChange.TxIdx,
bytes.Clone(codeChange.Code),
})
}
return res
}
// EncodeRLP returns the RLP-encoded access list
func (b *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return b.toEncodingObj().EncodeRLP(wr)
func (c ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return c.ToEncodingObj().EncodeRLP(wr)
}
var _ rlp.Encoder = &ConstructionBlockAccessList{}
// toEncodingObj creates an instance of the ConstructionAccountAccess of the type that is
// toEncodingObj creates an instance of the ConstructionAccountAccesses of the type that is
// used as input for the encoding.
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess {
func (a *ConstructionAccountAccesses) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{
Address: addr,
StorageWrites: make([]encodingSlotWrites, 0),
StorageReads: make([][32]byte, 0),
StorageChanges: make([]encodingSlotWrites, 0),
StorageReads: make([]*EncodedStorage, 0),
BalanceChanges: make([]encodingBalanceChange, 0),
NonceChanges: make([]encodingAccountNonce, 0),
Code: nil,
CodeChanges: make([]CodeChange, 0),
}
// Convert write slots
@ -224,7 +398,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
slices.SortFunc(writeSlots, common.Hash.Cmp)
for _, slot := range writeSlots {
var obj encodingSlotWrites
obj.Slot = slot
obj.Slot = newEncodedStorageFromHash(slot)
slotWrites := a.StorageWrites[slot]
obj.Accesses = make([]encodingStorageWrite, 0, len(slotWrites))
@ -234,17 +408,17 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, index := range indices {
obj.Accesses = append(obj.Accesses, encodingStorageWrite{
TxIdx: index,
ValueAfter: slotWrites[index],
ValueAfter: newEncodedStorageFromHash(slotWrites[index]),
})
}
res.StorageWrites = append(res.StorageWrites, obj)
res.StorageChanges = append(res.StorageChanges, obj)
}
// Convert read slots
readSlots := slices.Collect(maps.Keys(a.StorageReads))
slices.SortFunc(readSlots, common.Hash.Cmp)
for _, slot := range readSlots {
res.StorageReads = append(res.StorageReads, slot)
res.StorageReads = append(res.StorageReads, newEncodedStorageFromHash(slot))
}
// Convert balance changes
@ -253,7 +427,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, idx := range balanceIndices {
res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{
TxIdx: idx,
Balance: encodeBalance(a.BalanceChanges[idx]),
Balance: new(uint256.Int).Set(a.BalanceChanges[idx]),
})
}
@ -268,77 +442,31 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
}
// Convert code change
if a.CodeChange != nil {
res.Code = []CodeChange{
{
a.CodeChange.TxIndex,
bytes.Clone(a.CodeChange.Code),
},
}
codeChangeIdxs := slices.Collect(maps.Keys(a.CodeChanges))
slices.SortFunc(codeChangeIdxs, cmp.Compare[uint16])
for _, idx := range codeChangeIdxs {
res.CodeChanges = append(res.CodeChanges, CodeChange{
idx,
bytes.Clone(a.CodeChanges[idx].Code),
})
}
return res
}
// toEncodingObj returns an instance of the access list expressed as the type
// ToEncodingObj returns an instance of the access list expressed as the type
// which is used as input for the encoding/decoding.
func (b *ConstructionBlockAccessList) toEncodingObj() *BlockAccessList {
func (c ConstructionBlockAccessList) ToEncodingObj() *BlockAccessList {
var addresses []common.Address
for addr := range b.Accounts {
for addr := range c {
addresses = append(addresses, addr)
}
slices.SortFunc(addresses, common.Address.Cmp)
var res BlockAccessList
for _, addr := range addresses {
res.Accesses = append(res.Accesses, b.Accounts[addr].toEncodingObj(addr))
res = append(res, c[addr].toEncodingObj(addr))
}
return &res
}
func (e *BlockAccessList) PrettyPrint() string {
var res bytes.Buffer
printWithIndent := func(indent int, text string) {
fmt.Fprintf(&res, "%s%s\n", strings.Repeat(" ", indent), text)
}
for _, accountDiff := range e.Accesses {
printWithIndent(0, fmt.Sprintf("%x:", accountDiff.Address))
printWithIndent(1, "storage writes:")
for _, sWrite := range accountDiff.StorageWrites {
printWithIndent(2, fmt.Sprintf("%x:", sWrite.Slot))
for _, access := range sWrite.Accesses {
printWithIndent(3, fmt.Sprintf("%d: %x", access.TxIdx, access.ValueAfter))
}
}
printWithIndent(1, "storage reads:")
for _, slot := range accountDiff.StorageReads {
printWithIndent(2, fmt.Sprintf("%x", slot))
}
printWithIndent(1, "balance changes:")
for _, change := range accountDiff.BalanceChanges {
balance := new(uint256.Int).SetBytes(change.Balance[:]).String()
printWithIndent(2, fmt.Sprintf("%d: %s", change.TxIdx, balance))
}
printWithIndent(1, "nonce changes:")
for _, change := range accountDiff.NonceChanges {
printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce))
}
if len(accountDiff.Code) > 0 {
printWithIndent(1, "code:")
printWithIndent(2, fmt.Sprintf("%d: %x", accountDiff.Code[0].TxIndex, accountDiff.Code[0].Code))
}
}
return res.String()
}
// Copy returns a deep copy of the access list
func (e *BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e.Accesses {
res.Accesses = append(res.Accesses, accountAccess.Copy())
}
return
}
type ContractCode []byte

View file

@ -0,0 +1,107 @@
package bal
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
func (c *ContractCode) MarshalJSON() ([]byte, error) {
hexStr := fmt.Sprintf("%x", *c)
return json.Marshal(hexStr)
}
func (e encodingBalanceChange) MarshalJSON() ([]byte, error) {
type Alias encodingBalanceChange
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Alias: (*Alias)(&e),
})
}
func (e *encodingBalanceChange) UnmarshalJSON(data []byte) error {
type Alias encodingBalanceChange
aux := &struct {
TxIdx string `json:"txIndex"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
return nil
}
func (e encodingAccountNonce) MarshalJSON() ([]byte, error) {
type Alias encodingAccountNonce
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Nonce: fmt.Sprintf("0x%x", e.Nonce),
Alias: (*Alias)(&e),
})
}
func (e *encodingAccountNonce) UnmarshalJSON(data []byte) error {
type Alias encodingAccountNonce
aux := &struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
if len(aux.Nonce) >= 2 && aux.Nonce[:2] == "0x" {
if _, err := fmt.Sscanf(aux.Nonce, "0x%x", &e.Nonce); err != nil {
return err
}
}
return nil
}
// UnmarshalJSON implements json.Unmarshaler to decode from RLP hex bytes
func (b *BlockAccessList) UnmarshalJSON(input []byte) error {
// Handle both hex string and object formats
var hexBytes hexutil.Bytes
if err := json.Unmarshal(input, &hexBytes); err == nil {
// It's a hex string, decode from RLP
return rlp.DecodeBytes(hexBytes, b)
}
// Otherwise try to unmarshal as structured JSON
var tmp []AccountAccess
if err := json.Unmarshal(input, &tmp); err != nil {
return err
}
*b = BlockAccessList(tmp)
return nil
}
// MarshalJSON implements json.Marshaler to encode as RLP hex bytes
func (b BlockAccessList) MarshalJSON() ([]byte, error) {
// Encode to RLP then to hex
rlpBytes, err := rlp.EncodeToBytes(b)
if err != nil {
return nil, err
}
return json.Marshal(hexutil.Bytes(rlpBytes))
}

View file

@ -2,275 +2,260 @@
package bal
import "github.com/ethereum/go-ethereum/common"
import "github.com/ethereum/go-ethereum/rlp"
import "github.com/holiman/uint256"
import "io"
func (obj *BlockAccessList) EncodeRLP(_w io.Writer) error {
func (obj *AccountAccess) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteBytes(obj.Address[:])
_tmp1 := w.List()
for _, _tmp2 := range obj.Accesses {
for _, _tmp2 := range obj.StorageChanges {
_tmp3 := w.List()
w.WriteBytes(_tmp2.Address[:])
if err := _tmp2.Slot.EncodeRLP(w); err != nil {
return err
}
_tmp4 := w.List()
for _, _tmp5 := range _tmp2.StorageWrites {
for _, _tmp5 := range _tmp2.Accesses {
_tmp6 := w.List()
w.WriteBytes(_tmp5.Slot[:])
_tmp7 := w.List()
for _, _tmp8 := range _tmp5.Accesses {
_tmp9 := w.List()
w.WriteUint64(uint64(_tmp8.TxIdx))
w.WriteBytes(_tmp8.ValueAfter[:])
w.ListEnd(_tmp9)
w.WriteUint64(uint64(_tmp5.TxIdx))
if err := _tmp5.ValueAfter.EncodeRLP(w); err != nil {
return err
}
w.ListEnd(_tmp7)
w.ListEnd(_tmp6)
}
w.ListEnd(_tmp4)
_tmp10 := w.List()
for _, _tmp11 := range _tmp2.StorageReads {
w.WriteBytes(_tmp11[:])
}
w.ListEnd(_tmp10)
_tmp12 := w.List()
for _, _tmp13 := range _tmp2.BalanceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteBytes(_tmp13.Balance[:])
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range _tmp2.NonceChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx))
w.WriteUint64(_tmp16.Nonce)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
_tmp18 := w.List()
for _, _tmp19 := range _tmp2.Code {
_tmp20 := w.List()
w.WriteUint64(uint64(_tmp19.TxIndex))
w.WriteBytes(_tmp19.Code)
w.ListEnd(_tmp20)
}
w.ListEnd(_tmp18)
w.ListEnd(_tmp3)
}
w.ListEnd(_tmp1)
_tmp7 := w.List()
for _, _tmp8 := range obj.StorageReads {
if err := _tmp8.EncodeRLP(w); err != nil {
return err
}
}
w.ListEnd(_tmp7)
_tmp9 := w.List()
for _, _tmp10 := range obj.BalanceChanges {
_tmp11 := w.List()
w.WriteUint64(uint64(_tmp10.TxIdx))
if _tmp10.Balance == nil {
w.Write(rlp.EmptyString)
} else {
w.WriteUint256(_tmp10.Balance)
}
w.ListEnd(_tmp11)
}
w.ListEnd(_tmp9)
_tmp12 := w.List()
for _, _tmp13 := range obj.NonceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteUint64(_tmp13.Nonce)
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range obj.CodeChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx))
w.WriteBytes(_tmp16.Code)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
w.ListEnd(_tmp0)
return w.Flush()
}
func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 BlockAccessList
func (obj *AccountAccess) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 AccountAccess
{
if _, err := dec.List(); err != nil {
return err
}
// Accesses:
var _tmp1 []AccountAccess
// Address:
var _tmp1 common.Address
if err := dec.ReadBytes(_tmp1[:]); err != nil {
return err
}
_tmp0.Address = _tmp1
// StorageChanges:
var _tmp2 []encodingSlotWrites
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp2 AccountAccess
var _tmp3 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Address:
var _tmp3 [20]byte
if err := dec.ReadBytes(_tmp3[:]); err != nil {
// Slot:
_tmp4 := new(EncodedStorage)
if err := _tmp4.DecodeRLP(dec); err != nil {
return err
}
_tmp2.Address = _tmp3
// StorageWrites:
var _tmp4 []encodingSlotWrites
_tmp3.Slot = _tmp4
// Accesses:
var _tmp5 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp5 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Slot:
var _tmp6 [32]byte
if err := dec.ReadBytes(_tmp6[:]); err != nil {
return err
}
_tmp5.Slot = _tmp6
// Accesses:
var _tmp7 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp8 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp9, err := dec.Uint16()
if err != nil {
return err
}
_tmp8.TxIdx = _tmp9
// ValueAfter:
var _tmp10 [32]byte
if err := dec.ReadBytes(_tmp10[:]); err != nil {
return err
}
_tmp8.ValueAfter = _tmp10
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp7 = append(_tmp7, _tmp8)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp5.Accesses = _tmp7
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp4 = append(_tmp4, _tmp5)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageWrites = _tmp4
// StorageReads:
var _tmp11 [][32]byte
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 [32]byte
if err := dec.ReadBytes(_tmp12[:]); err != nil {
return err
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageReads = _tmp11
// BalanceChanges:
var _tmp13 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp14 encodingBalanceChange
var _tmp6 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp15, err := dec.Uint16()
_tmp7, err := dec.Uint16()
if err != nil {
return err
}
_tmp14.TxIdx = _tmp15
// Balance:
var _tmp16 [16]byte
if err := dec.ReadBytes(_tmp16[:]); err != nil {
_tmp6.TxIdx = _tmp7
// ValueAfter:
_tmp8 := new(EncodedStorage)
if err := _tmp8.DecodeRLP(dec); err != nil {
return err
}
_tmp14.Balance = _tmp16
_tmp6.ValueAfter = _tmp8
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp13 = append(_tmp13, _tmp14)
_tmp5 = append(_tmp5, _tmp6)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.BalanceChanges = _tmp13
// NonceChanges:
var _tmp17 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp18 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp19, err := dec.Uint16()
if err != nil {
return err
}
_tmp18.TxIdx = _tmp19
// Nonce:
_tmp20, err := dec.Uint64()
if err != nil {
return err
}
_tmp18.Nonce = _tmp20
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp17 = append(_tmp17, _tmp18)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.NonceChanges = _tmp17
// Code:
var _tmp21 []CodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp22 CodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIndex:
_tmp23, err := dec.Uint16()
if err != nil {
return err
}
_tmp22.TxIndex = _tmp23
// Code:
_tmp24, err := dec.Bytes()
if err != nil {
return err
}
_tmp22.Code = _tmp24
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp21 = append(_tmp21, _tmp22)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.Code = _tmp21
_tmp3.Accesses = _tmp5
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp1 = append(_tmp1, _tmp2)
_tmp2 = append(_tmp2, _tmp3)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.Accesses = _tmp1
_tmp0.StorageChanges = _tmp2
// StorageReads:
var _tmp9 []*EncodedStorage
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
_tmp10 := new(EncodedStorage)
if err := _tmp10.DecodeRLP(dec); err != nil {
return err
}
_tmp9 = append(_tmp9, _tmp10)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.StorageReads = _tmp9
// BalanceChanges:
var _tmp11 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 encodingBalanceChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp13, err := dec.Uint16()
if err != nil {
return err
}
_tmp12.TxIdx = _tmp13
// Balance:
var _tmp14 uint256.Int
if err := dec.ReadUint256(&_tmp14); err != nil {
return err
}
_tmp12.Balance = &_tmp14
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.BalanceChanges = _tmp11
// NonceChanges:
var _tmp15 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp16 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp17, err := dec.Uint16()
if err != nil {
return err
}
_tmp16.TxIdx = _tmp17
// Nonce:
_tmp18, err := dec.Uint64()
if err != nil {
return err
}
_tmp16.Nonce = _tmp18
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp15 = append(_tmp15, _tmp16)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.NonceChanges = _tmp15
// CodeChanges:
var _tmp19 []CodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp20 CodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp21, err := dec.Uint16()
if err != nil {
return err
}
_tmp20.TxIdx = _tmp21
// Code:
_tmp22, err := dec.Bytes()
if err != nil {
return err
}
_tmp20.Code = _tmp22
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp19 = append(_tmp19, _tmp20)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.CodeChanges = _tmp19
if err := dec.ListEnd(); err != nil {
return err
}

View file

@ -36,9 +36,9 @@ func equalBALs(a *BlockAccessList, b *BlockAccessList) bool {
return true
}
func makeTestConstructionBAL() *ConstructionBlockAccessList {
return &ConstructionBlockAccessList{
map[common.Address]*ConstructionAccountAccess{
func makeTestConstructionBAL() *AccessListBuilder {
return &AccessListBuilder{
FinalizedAccesses: map[common.Address]*ConstructionAccountAccesses{
common.BytesToAddress([]byte{0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
common.BytesToHash([]byte{0x01}): {
@ -60,10 +60,10 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
1: 2,
2: 6,
},
CodeChange: &CodeChange{
TxIndex: 0,
Code: common.Hex2Bytes("deadbeef"),
},
CodeChanges: map[uint16]CodeChange{0: {
TxIdx: 0,
Code: common.Hex2Bytes("deadbeef"),
}},
},
common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
@ -102,10 +102,10 @@ func TestBALEncoding(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil {
t.Fatalf("decoding failed: %v\n", err)
}
if dec.Hash() != bal.toEncodingObj().Hash() {
if dec.Hash() != bal.ToEncodingObj().Hash() {
t.Fatalf("encoded block hash doesn't match decoded")
}
if !equalBALs(bal.toEncodingObj(), &dec) {
if !equalBALs(bal.ToEncodingObj(), &dec) {
t.Fatal("decoded BAL doesn't match")
}
}
@ -113,18 +113,18 @@ func TestBALEncoding(t *testing.T) {
func makeTestAccountAccess(sort bool) AccountAccess {
var (
storageWrites []encodingSlotWrites
storageReads [][32]byte
storageReads []common.Hash
balances []encodingBalanceChange
nonces []encodingAccountNonce
)
for i := 0; i < 5; i++ {
slot := encodingSlotWrites{
Slot: testrand.Hash(),
Slot: newEncodedStorageFromHash(testrand.Hash()),
}
for j := 0; j < 3; j++ {
slot.Accesses = append(slot.Accesses, encodingStorageWrite{
TxIdx: uint16(2 * j),
ValueAfter: testrand.Hash(),
ValueAfter: newEncodedStorageFromHash(testrand.Hash()),
})
}
if sort {
@ -144,7 +144,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
storageReads = append(storageReads, testrand.Hash())
}
if sort {
slices.SortFunc(storageReads, func(a, b [32]byte) int {
slices.SortFunc(storageReads, func(a, b common.Hash) int {
return bytes.Compare(a[:], b[:])
})
}
@ -152,7 +152,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
for i := 0; i < 5; i++ {
balances = append(balances, encodingBalanceChange{
TxIdx: uint16(2 * i),
Balance: [16]byte(testrand.Bytes(16)),
Balance: new(uint256.Int).SetBytes(testrand.Bytes(32)),
})
}
if sort {
@ -173,16 +173,20 @@ func makeTestAccountAccess(sort bool) AccountAccess {
})
}
var encodedStorageReads []EncodedStorage
for _, slot := range storageReads {
encodedStorageReads = append(encodedStorageReads, newEncodedStorageFromHash(slot))
}
return AccountAccess{
Address: [20]byte(testrand.Bytes(20)),
StorageWrites: storageWrites,
StorageReads: storageReads,
StorageChanges: storageWrites,
StorageReads: encodedStorageReads,
BalanceChanges: balances,
NonceChanges: nonces,
Code: []CodeChange{
CodeChanges: []CodeChange{
{
TxIndex: 100,
Code: testrand.Bytes(256),
TxIdx: 100,
Code: testrand.Bytes(256),
},
},
}
@ -191,10 +195,10 @@ func makeTestAccountAccess(sort bool) AccountAccess {
func makeTestBAL(sort bool) BlockAccessList {
list := BlockAccessList{}
for i := 0; i < 5; i++ {
list.Accesses = append(list.Accesses, makeTestAccountAccess(sort))
list = append(list, makeTestAccountAccess(sort))
}
if sort {
slices.SortFunc(list.Accesses, func(a, b AccountAccess) int {
slices.SortFunc(list, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
})
}
@ -214,9 +218,9 @@ func TestBlockAccessListCopy(t *testing.T) {
}
// Make sure the mutations on copy won't affect the origin
for _, aa := range cpyCpy.Accesses {
for _, aa := range cpyCpy {
for i := 0; i < len(aa.StorageReads); i++ {
aa.StorageReads[i] = [32]byte(testrand.Bytes(32))
aa.StorageReads[i] = testrand.Bytes(32)
}
}
if !reflect.DeepEqual(list, cpy) {
@ -245,8 +249,11 @@ func TestBlockAccessListValidation(t *testing.T) {
// Validate the derived block access list
cBAL := makeTestConstructionBAL()
listB := cBAL.toEncodingObj()
listB := cBAL.ToEncodingObj()
if err := listB.Validate(); err != nil {
t.Fatalf("Unexpected validation error: %v", err)
}
}
// BALReader test ideas
// * BAL which doesn't have any pre-tx system contracts should return an empty state diff at idx 0

View file

@ -0,0 +1,32 @@
package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"io"
"os"
"testing"
)
func TestBALDecoding(t *testing.T) {
var (
err error
data []byte
)
data, err = os.ReadFile("blocks_bal_one.rlp")
if err != nil {
t.Fatalf("error opening file: %v", err)
}
reader := bytes.NewReader(data)
stream := rlp.NewStream(reader, 0)
var blocks Block
for i := 0; err == nil; i++ {
fmt.Printf("decode %d\n", i)
err = stream.Decode(&blocks)
if err != nil && err != io.EOF {
t.Fatalf("error decoding blocks: %v", err)
}
fmt.Printf("block number is %d\n", blocks.NumberU64())
}
}

View file

@ -28,6 +28,8 @@ import (
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
@ -98,6 +100,9 @@ type Header struct {
// RequestsHash was added by EIP-7685 and is ignored in legacy headers.
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
// BlockAccessListHash was added by EIP-7928 and is ignored in legacy headers.
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
}
// field type overrides for gencodec
@ -159,10 +164,8 @@ func (h *Header) SanityCheck() error {
// EmptyBody returns true if there is no additional 'body' to complete the header
// that is: no transactions, no uncles and no withdrawals.
func (h *Header) EmptyBody() bool {
var (
emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash
)
return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals
// quick hack to ensure that we download bodies for empty blocks so that we receive the BALs
return false
}
// EmptyReceipts returns true if there are no receipts for this header/block.
@ -175,7 +178,8 @@ func (h *Header) EmptyReceipts() bool {
type Body struct {
Transactions []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional,nil"`
}
// Block represents an Ethereum block.
@ -200,6 +204,7 @@ type Block struct {
uncles []*Header
transactions Transactions
withdrawals Withdrawals
accessList *bal.BlockAccessList
// caches
hash atomic.Pointer[common.Hash]
@ -216,7 +221,8 @@ type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@ -277,6 +283,12 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher
b.withdrawals = slices.Clone(withdrawals)
}
if body.AccessList != nil {
balHash := body.AccessList.Hash()
b.header.BlockAccessListHash = &balHash
b.accessList = body.AccessList
}
return b
}
@ -321,12 +333,14 @@ func CopyHeader(h *Header) *Header {
// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
var (
eb extblock
)
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
b.header, b.uncles, b.transactions, b.withdrawals, b.accessList = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.AccessList
b.size.Store(rlp.ListSize(size))
return nil
}
@ -338,13 +352,14 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
AccessList: b.accessList,
})
}
// Body returns the non-header content of the block.
// Note the returned data is not an independent copy.
func (b *Block) Body() *Body {
return &Body{b.transactions, b.uncles, b.withdrawals}
return &Body{b.transactions, b.uncles, b.withdrawals, b.accessList}
}
// Accessors for body data. These do not return a copy because the content
@ -490,6 +505,10 @@ func (b *Block) WithBody(body Body) *Block {
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
}
if body.AccessList != nil {
balCopy := body.AccessList.Copy()
block.accessList = &balCopy
}
for i := range body.Uncles {
block.uncles[i] = CopyHeader(body.Uncles[i])
}

View file

@ -16,28 +16,29 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
Hash common.Hash `json:"hash"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@ -61,6 +62,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot
enc.RequestsHash = h.RequestsHash
enc.BlockAccessListHash = h.BlockAccessListHash
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@ -68,27 +70,28 @@ func (h Header) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@ -169,5 +172,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.RequestsHash != nil {
h.RequestsHash = dec.RequestsHash
}
if dec.BlockAccessListHash != nil {
h.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -43,7 +43,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil
_tmp6 := obj.RequestsHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
_tmp7 := obj.BlockAccessListHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@ -53,41 +54,48 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.WithdrawalsHash[:])
}
}
if _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BlobGasUsed == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.BlobGasUsed))
}
}
if _tmp4 || _tmp5 || _tmp6 {
if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.ExcessBlobGas))
}
}
if _tmp5 || _tmp6 {
if _tmp5 || _tmp6 || _tmp7 {
if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.ParentBeaconRoot[:])
}
}
if _tmp6 {
if _tmp6 || _tmp7 {
if obj.RequestsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.RequestsHash[:])
}
}
if _tmp7 {
if obj.BlockAccessListHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.BlockAccessListHash[:])
}
}
w.ListEnd(_tmp0)
return w.Flush()
}

View file

@ -470,25 +470,32 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// create creates a new contract using code as deployment code.
func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *uint256.Int, address common.Address, typ OpCode) (ret []byte, createAddress common.Address, leftOverGas uint64, err error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
var nonce uint64
if evm.depth > int(params.CallCreateDepth) {
err = ErrDepth
} else if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
err = ErrInsufficientBalance
} else {
nonce = evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
err = ErrNonceUintOverflow
}
}
if err == nil {
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
}
if evm.Config.Tracer != nil {
evm.captureBegin(evm.depth, typ, caller, address, code, gas, value.ToBig())
defer func(startGas uint64) {
evm.captureEnd(evm.depth, startGas, leftOverGas, ret, err)
}(gas)
}
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
return nil, common.Address{}, gas, ErrDepth
if err != nil {
return nil, common.Address{}, gas, err
}
if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
return nil, common.Address{}, gas, ErrInsufficientBalance
}
nonce := evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
return nil, common.Address{}, gas, ErrNonceUintOverflow
}
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
// Charge the contract creation init gas in verkle mode
if evm.chainRules.IsEIP4762 {
@ -514,6 +521,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address)
storageRoot := evm.StateDB.GetStorageRoot(address)
if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage

View file

@ -374,7 +374,33 @@ func gasExpEIP158(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor
return gas, nil
}
func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
transfersValue = !stack.Back(2).IsZero()
)
if transfersValue {
if evm.readOnly {
return 0, ErrWriteProtection
} else if !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
}
memoryGas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
transfersValue = !stack.Back(2).IsZero()
@ -391,15 +417,22 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
} else if !evm.StateDB.Exist(address) {
gas += params.CallNewAccountGas
}
if transfersValue && !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
memoryGas, err := memoryGasCost(mem, memorySize)
return gas, nil
}
func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
stateless, err := gasCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
stateful, err := gasCallStateful(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
gas, overflow := math.SafeAdd(stateless, stateful)
if overflow {
return 0, ErrGasUintOverflow
}
@ -410,25 +443,43 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasCallCodeStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasCallCodeStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
memoryGas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
var (
gas uint64
overflow bool
gas uint64
overflow bool
transfersValue = !stack.Back(2).IsZero()
)
if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
if transfersValue {
if evm.readOnly {
return 0, ErrWriteProtection
} else if !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
}
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var overflow bool
gas, err := gasCallCodeStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -440,10 +491,16 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory
}
func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
var (
err error
gas uint64
)
gas, err = gasDelegateCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -455,11 +512,36 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
return gas, nil
}
func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasDelegateCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasDelegateCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
return gas, nil
}
func gasStaticCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
return gas, nil
}
func gasStaticCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := gasStaticCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -477,11 +559,16 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
}
var gas uint64
// EIP150 homestead gas reprice fork:
if evm.chainRules.IsEIP150 {
gas = params.SelfdestructGasEIP150
var address = common.Address(stack.Back(0).Bytes20())
if gas > contract.Gas {
return gas, nil
}
if evm.chainRules.IsEIP158 {
// if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {

View file

@ -518,9 +518,6 @@ func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
}
func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())

View file

@ -28,6 +28,8 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
case rules.IsVerkle:
return newCancunInstructionSet(), errors.New("verkle-fork not defined yet")
case rules.IsAmsterdam:
return newPragueInstructionSet(), nil
case rules.IsOsaka:
return newOsakaInstructionSet(), nil
case rules.IsPrague:

View file

@ -155,50 +155,12 @@ func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *Stack, mem *Mem
return 0, nil
}
func makeCallVariantGasCallEIP2929(oldCalculator gasFunc, addressPosition int) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
addr := common.Address(stack.Back(addressPosition).Bytes20())
// Check slot presence in the access list
warmAccess := evm.StateDB.AddressInAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
if !warmAccess {
evm.StateDB.AddAddressToAccessList(addr)
// Charge the remaining difference here already, to correctly calculate available
// gas for call
if !contract.UseGas(coldCost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
}
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
gas, err := oldCalculator(evm, contract, stack, mem, memorySize)
if warmAccess || err != nil {
return gas, err
}
// In case of a cold access, we temporarily add the cold charge back, and also
// add it to the returned gas. By adding it to the return, it will be charged
// outside of this function, as part of the dynamic gas, and that will make it
// also become correctly reported to tracers.
contract.Gas += coldCost
var overflow bool
if gas, overflow = math.SafeAdd(gas, coldCost); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
}
var (
gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall, 1)
gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall, 1)
gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall, 1)
gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode, 1)
// TODO: we can use the same functions already defined above for the 7702 gas handlers
gasCallEIP2929 = makeCallVariantGasCall(gasCallStateless, gasCallStateful)
gasDelegateCallEIP2929 = makeCallVariantGasCall(gasDelegateCallStateless, gasDelegateCallStateful)
gasStaticCallEIP2929 = makeCallVariantGasCall(gasStaticCallStateless, gasStaticCallStateful)
gasCallCodeEIP2929 = makeCallVariantGasCall(gasCallCodeStateless, gasCallCodeStateful)
gasSelfdestructEIP2929 = makeSelfdestructGasFn(true)
// gasSelfdestructEIP3529 implements the changes in EIP-3529 (no refunds)
gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
@ -243,6 +205,10 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
return 0, ErrOutOfGas
}
}
if contract.Gas < gas {
return gas, nil
}
// if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
gas += params.CreateBySelfdestructGas
@ -256,33 +222,25 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
}
var (
innerGasCallEIP7702 = makeCallVariantGasCallEIP7702(gasCall)
gasDelegateCallEIP7702 = makeCallVariantGasCallEIP7702(gasDelegateCall)
gasStaticCallEIP7702 = makeCallVariantGasCallEIP7702(gasStaticCall)
gasCallCodeEIP7702 = makeCallVariantGasCallEIP7702(gasCallCode)
gasCallEIP7702 = makeCallVariantGasCall(gasCallStateful, gasCallStateless)
gasDelegateCallEIP7702 = makeCallVariantGasCall(gasDelegateCallStateful, gasDelegateCallStateless)
gasStaticCallEIP7702 = makeCallVariantGasCall(gasStaticCallStateful, gasStaticCallStateless)
gasCallCodeEIP7702 = makeCallVariantGasCall(gasCallCodeStateful, gasCallCodeStateless)
)
func gasCallEIP7702(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
// Return early if this call attempts to transfer value in a static context.
// Although it's checked in `gasCall`, EIP-7702 loads the target's code before
// to determine if it is resolving a delegation. This could incorrectly record
// the target in the block access list (BAL) if the call later fails.
transfersValue := !stack.Back(2).IsZero()
if evm.readOnly && transfersValue {
return 0, ErrWriteProtection
}
return innerGasCallEIP7702(evm, contract, stack, mem, memorySize)
}
func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
func makeCallVariantGasCall(oldCalculatorStateful, oldCalculatorStateless gasFunc) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
total uint64 // total dynamic gas used
addr = common.Address(stack.Back(1).Bytes20())
eip150BaseGas uint64 // gas used for memory expansion, transfer costs -> input to the 63/64 bounding
eip7702Gas uint64
eip2929Gas uint64
addr = common.Address(stack.Back(1).Bytes20())
overflow bool
err error
)
// Check slot presence in the access list
if !evm.StateDB.AddressInAccessList(addr) {
if evm.chainRules.IsEIP2929 && !evm.StateDB.AddressInAccessList(addr) {
evm.StateDB.AddAddressToAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
@ -292,44 +250,87 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
if !contract.UseGas(coldCost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
total += coldCost
eip2929Gas = coldCost
}
// Check if code is a delegation and if so, charge for resolution.
if target, ok := types.ParseDelegation(evm.StateDB.GetCode(addr)); ok {
var cost uint64
if evm.StateDB.AddressInAccessList(target) {
cost = params.WarmStorageReadCostEIP2929
} else {
evm.StateDB.AddAddressToAccessList(target)
cost = params.ColdAccountAccessCostEIP2929
}
if !contract.UseGas(cost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
total += cost
}
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
old, err := oldCalculator(evm, contract, stack, mem, memorySize)
eip150BaseGas, err = oldCalculatorStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return old, err
return 0, err
}
// ensure the portion of the call cost which doesn't depend on state lookups
// is covered by the provided gas
if contract.Gas < eip150BaseGas {
return 0, ErrOutOfGas
}
oldStateful, err := oldCalculatorStateful(evm, contract, stack, mem, memorySize)
if err != nil {
return oldStateful, err
}
// this should cause BAL test failures if uncommented
baseCost, overflow := math.SafeAdd(eip150BaseGas, oldStateful)
if overflow {
return 0, ErrGasUintOverflow
} else if contract.Gas < baseCost {
return 0, ErrOutOfGas
}
if eip150BaseGas, overflow = math.SafeAdd(eip150BaseGas, oldStateful); overflow {
return 0, ErrOutOfGas
}
if evm.chainRules.IsPrague {
// Check if code is a delegation and if so, charge for resolution.
if target, ok := types.ParseDelegation(evm.StateDB.GetCode(addr)); ok {
if evm.StateDB.AddressInAccessList(target) {
eip7702Gas = params.WarmStorageReadCostEIP2929
} else {
evm.StateDB.AddAddressToAccessList(target)
eip7702Gas = params.ColdAccountAccessCostEIP2929
}
if !contract.UseGas(eip7702Gas, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
}
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, eip150BaseGas, stack.Back(0))
if err != nil {
return 0, err
}
// TODO: it's not clear what happens if there is enough gas to cover the stateless component
// but not enough to cover the whole call: do all the state reads happen in this case, and
// we fail at the very end?
// Temporarily add the gas charge back to the contract and return value. By
// adding it to the return, it will be charged outside of this function, as
// part of the dynamic gas. This will ensure it is correctly reported to
// tracers.
contract.Gas += total
var overflow bool
if total, overflow = math.SafeAdd(old, total); overflow {
contract.Gas, overflow = math.SafeAdd(contract.Gas, eip2929Gas)
if overflow {
return 0, ErrGasUintOverflow
}
return total, nil
contract.Gas, overflow = math.SafeAdd(contract.Gas, eip7702Gas)
if overflow {
return 0, ErrGasUintOverflow
}
var totalCost uint64
totalCost, overflow = math.SafeAdd(eip2929Gas, eip7702Gas)
if overflow {
return 0, ErrGasUintOverflow
}
totalCost, overflow = math.SafeAdd(totalCost, evm.callGasTemp)
if overflow {
return 0, ErrGasUintOverflow
}
totalCost, overflow = math.SafeAdd(totalCost, eip150BaseGas)
if overflow {
return 0, ErrGasUintOverflow
}
return totalCost, nil
}
}

View file

@ -19,6 +19,7 @@ package eth
import (
"context"
"errors"
"fmt"
"math/big"
"time"
@ -499,3 +500,22 @@ func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration {
func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration {
return b.eth.config.TxSyncMaxTimeout
}
// GetBlockAccessList returns a block access list for the given number/hash
// or nil if one does not exist.
func (b *EthAPIBackend) BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error) {
var block *types.Block
if num := number.BlockNumber; num != nil {
block = b.eth.blockchain.GetBlockByNumber(uint64(num.Int64()))
} else if hash := number.BlockHash; hash != nil {
block = b.eth.blockchain.GetBlockByHash(*hash)
}
if block == nil {
return nil, fmt.Errorf("block not found")
}
if block.Body().AccessList == nil {
return nil, nil
}
return block.Body().AccessList.StringableRepresentation(), nil
}

View file

@ -503,6 +503,7 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness
if parent == nil {
return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn)
}
result, err := bc.ProcessBlock(parent.Root, block, false, true)
if err != nil {
return nil, err
@ -520,6 +521,7 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit
if parent == nil {
return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash)
}
result, err := bc.ProcessBlock(parent.Root, block, false, true)
if err != nil {
return nil, err

View file

@ -198,9 +198,13 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root")
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5):
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5, forks.Amsterdam):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads")
}
if api.checkFork(params.Timestamp, forks.Amsterdam) {
return api.forkchoiceUpdated(update, params, engine.PayloadV4, false)
}
}
// TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up
@ -455,11 +459,23 @@ func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.Execu
})
}
// GetPayloadV6 returns a cached payload by id.
func (api *ConsensusAPI) GetPayloadV6(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) {
if !payloadID.Is(engine.PayloadV4) {
return nil, engine.UnsupportedFork
}
return api.getPayload(payloadID,
false,
[]engine.PayloadVersion{engine.PayloadV4},
nil)
}
// getPayload will retrieve the specified payload and verify it conforms to the
// endpoint's allowed payload versions and forks.
//
// Note passing nil `forks`, `versions` disables the respective check.
func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
if versions != nil && !payloadID.Is(versions...) {
return nil, engine.UnsupportedFork
@ -697,6 +713,33 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
// NewPayloadV5 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) NewPayloadV5(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
switch {
case params.Withdrawals == nil:
return invalidStatus, paramsErr("nil withdrawals post-shanghai")
case params.ExcessBlobGas == nil:
return invalidStatus, paramsErr("nil excessBlobGas post-cancun")
case params.BlobGasUsed == nil:
return invalidStatus, paramsErr("nil blobGasUsed post-cancun")
case versionedHashes == nil:
return invalidStatus, paramsErr("nil versionedHashes post-cancun")
case beaconRoot == nil:
return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague")
case params.BlockAccessList == nil:
return invalidStatus, paramsErr("nil block access list post-amsterdam")
case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka, forks.Amsterdam):
return invalidStatus, unsupportedForkErr("newPayloadV5 must only be called for amsterdam payloads")
}
requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//

View file

@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
txs, uncles, withdrawals, accessLists := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, access list hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], accessLists, hashsets[3])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")

View file

@ -22,6 +22,7 @@ package downloader
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"sync"
"sync/atomic"
"time"
@ -72,6 +73,7 @@ type fetchResult struct {
Transactions types.Transactions
Receipts rlp.RawValue
Withdrawals types.Withdrawals
AccessList *bal.BlockAccessList
}
func newFetchResult(header *types.Header, snapSync bool) *fetchResult {
@ -100,6 +102,7 @@ func (f *fetchResult) body() types.Body {
Transactions: f.Transactions,
Uncles: f.Uncles,
Withdrawals: f.Withdrawals,
AccessList: f.AccessList,
}
}
@ -562,6 +565,7 @@ func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
blockAccessLists []*bal.BlockAccessList, accessListHashes []common.Hash,
) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@ -586,6 +590,19 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
if header.BlockAccessListHash == nil {
// nil hash means that access list should not be present in body
if blockAccessLists[index] != nil {
return errInvalidBody
}
} else { // non-nil hash: body must have access list
if blockAccessLists[index] == nil {
return errInvalidBody
}
if accessListHashes[index] != *header.BlockAccessListHash {
return errInvalidBody
}
}
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int
@ -624,6 +641,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
result.Transactions = txLists[index]
result.Uncles = uncleLists[index]
result.Withdrawals = withdrawalLists[index]
result.AccessList = blockAccessLists[index]
result.SetBodyDone()
}
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,

View file

@ -341,7 +341,7 @@ func XTestDelivery(t *testing.T) {
uncleHashes[i] = types.CalcUncleHash(uncles)
}
time.Sleep(100 * time.Millisecond)
_, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil)
_, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil, nil)
if err != nil {
fmt.Printf("delivered %d bodies %v\n", len(txset), err)
}

View file

@ -208,6 +208,12 @@ type Config struct {
// RangeLimit restricts the maximum range (end - start) for range queries.
RangeLimit uint64 `toml:",omitempty"`
// ExperimentalBAL enables EIP-7928 block access list creation during execution
// of post Cancun blocks, and persistence via embedding the BAL in the block body.
//
// TODO: also note that it will cause execution of blocks with access lists to base
// their execution on the BAL.
ExperimentalBAL bool `toml:",omitempty"`
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -381,6 +381,7 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
accessListHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@ -389,8 +390,11 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
if body.AccessList != nil {
accessListHashes[i] = body.AccessList.Hash()
}
}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, accessListHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,

View file

@ -19,6 +19,7 @@ package eth
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"io"
"math/big"
@ -239,20 +240,22 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, []*bal.BlockAccessList) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
accessListSet = make([]*bal.BlockAccessList, len(*p))
)
for i, body := range *p {
txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
txset[i], uncleset[i], withdrawalset[i], accessListSet[i] = body.Transactions, body.Uncles, body.Withdrawals, body.AccessList
}
return txset, uncleset, withdrawalset
return txset, uncleset, withdrawalset, accessListSet
}
// GetReceiptsRequest represents a block receipts query.

View file

@ -967,6 +967,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
if block.Body().AccessList != nil {
fields["accessList"] = block.Body().AccessList
}
return fields
}
@ -1336,6 +1339,18 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
}
}
// BlockAccessListByBlockNumber returns a block access list for the given block number
// or nil if one does not exist.
func (api *BlockChainAPI) BlockAccessListByBlockNumber(number rpc.BlockNumber) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockNumber: &number})
}
// BlockAccessListByBlockHash returns a block access list for the given block hash
// or nil if one does not exist.
func (api *BlockChainAPI) BlockAccessListByBlockHash(hash common.Hash) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockHash: &hash})
}
// TransactionAPI exposes methods for reading and creating transaction data.
type TransactionAPI struct {
b Backend

View file

@ -74,6 +74,7 @@ type Backend interface {
GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error)
// Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error

View file

@ -347,7 +347,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
}
blockBody := &types.Body{Transactions: txes, Withdrawals: *block.BlockOverrides.Withdrawals}
chainHeadReader := &simChainHeadReader{ctx, sim.b}
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts)
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts, nil)
if err != nil {
return nil, nil, nil, err
}

View file

@ -474,6 +474,11 @@ web3._extend({
params: 1,
inputFormatter: [null],
}),
new web3._extend.Method({
name: 'getEncodedBlockAccessList',
call: 'debug_getEncodedBlockAccessList',
params: 1
}),
],
properties: []
});
@ -605,7 +610,17 @@ web3._extend({
name: 'config',
call: 'eth_config',
params: 0,
})
}),
new web3._extend.Method({
name: 'getBlockAccessListByBlockNumber',
call: 'eth_blockAccessListByBlockNumber',
params: 1,
}),
new web3._extend.Method({
name: 'getBlockAccessListByBlockHash',
call: 'eth_blockAccessListByBlockHash',
params: 1,
}),
],
properties: [
new web3._extend.Property({

View file

@ -19,6 +19,7 @@ package miner
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/tracing"
"math/big"
"sync/atomic"
"time"
@ -70,7 +71,8 @@ type environment struct {
sidecars []*types.BlobTxSidecar
blobs int
witness *stateless.Witness
witness *stateless.Witness
alTracer *core.BlockAccessListTracer
}
// txFits reports whether the transaction fits into the block size limit.
@ -144,6 +146,9 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
}
}
body := types.Body{Transactions: work.txs, Withdrawals: genParam.withdrawals}
if work.alTracer != nil {
body.AccessList = work.alTracer.AccessList().ToEncodingObj()
}
allLogs := make([]*types.Log, 0)
for _, r := range work.receipts {
@ -172,10 +177,24 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
work.header.RequestsHash = &reqHash
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
// set the block access list on the body after the block has finished executing
// but before the header hash is computed (in FinalizeAndAssemble).
//
// I considered trying to instantiate the beacon consensus engine with a tracer.
// however, the BAL tracer instance is used once per block, while the engine object
// lives for the entire time the client is running.
onBlockFinalization := func() {
if miner.chainConfig.IsAmsterdam(work.header.Number, work.header.Time) {
work.alTracer.OnBlockFinalization()
body.AccessList = work.alTracer.AccessList().ToEncodingObj()
}
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts, onBlockFinalization)
if err != nil {
return &newPayloadResult{err: err}
}
return &newPayloadResult{
block: block,
fees: totalFees(block, work.receipts),
@ -266,13 +285,15 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
if miner.chainConfig.IsPrague(header.Number, header.Time) {
core.ProcessParentBlockHash(header.ParentHash, env.evm)
}
// TODO: verify that we can make blocks that correctly record the pre-tx system calls
// TODO ^ comprehensive miner unit tests
return env, nil
}
// makeEnv creates a new environment for the sealing block.
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top.
state, err := miner.chain.StateAt(parent.Root)
sdb, err := miner.chain.StateAt(parent.Root)
if err != nil {
return nil, err
}
@ -281,17 +302,27 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
if err != nil {
return nil, err
}
state.StartPrefetcher("miner", bundle, nil)
sdb.StartPrefetcher("miner", bundle, nil)
}
var alTracer *core.BlockAccessListTracer
var hooks *tracing.Hooks
var hookedState vm.StateDB = sdb
var vmConfig vm.Config
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
alTracer, hooks = core.NewBlockAccessListTracer()
hookedState = state.NewHookedState(sdb, hooks)
vmConfig.Tracer = hooks
}
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: state,
state: sdb,
size: uint64(header.Size()),
coinbase: coinbase,
header: header,
witness: state.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), state, miner.chainConfig, vm.Config{}),
witness: sdb.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), hookedState, miner.chainConfig, vmConfig),
alTracer: alTracer,
}, nil
}

View file

@ -236,6 +236,8 @@ var (
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
Osaka: DefaultOsakaBlobConfig,
BPO1: DefaultBPO1BlobConfig,
BPO2: DefaultBPO2BlobConfig,
},
}
@ -1015,9 +1017,11 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
}
if cur.timestamp != nil {
// If the fork is configured, a blob schedule must be defined for it.
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
/*
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
*/
}
}
return nil
@ -1172,6 +1176,9 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
// BlobConfig returns the blob config associated with the provided fork.
func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig {
switch fork {
case forks.Amsterdam:
// TODO: (????)
return c.BlobScheduleConfig.BPO2
case forks.BPO5:
return c.BlobScheduleConfig.BPO5
case forks.BPO4:
@ -1217,6 +1224,8 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre
// the fork isn't defined or isn't a time-based fork.
func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 {
switch {
case fork == forks.Amsterdam:
return c.AmsterdamTime
case fork == forks.BPO5:
return c.BPO5Time
case fork == forks.BPO4:

View file

@ -25,6 +25,7 @@ import (
"io"
"math/big"
"reflect"
"runtime/debug"
"strings"
"sync"
@ -672,6 +673,7 @@ func (s *Stream) ReadBytes(b []byte) error {
return nil
case String:
if uint64(len(b)) != size {
debug.PrintStack()
return fmt.Errorf("input value has wrong size %d, want %d", size, len(b))
}
if err = s.readFull(b); err != nil {

View file

@ -24,6 +24,65 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
)
func TestBlockchainBAL(t *testing.T) {
bt := new(testMatcher)
// We are running most of GeneralStatetests to tests witness support, even
// though they are ran as state tests too. Still, the performance tests are
// less about state andmore about EVM number crunching, so skip those.
bt.skipLoad(`^GeneralStateTests/VMTests/vmPerformance`)
// Skip random failures due to selfish mining test
bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`)
// Slow tests
bt.slow(`.*bcExploitTest/DelegateCallSpam.json`)
bt.slow(`.*bcExploitTest/ShanghaiLove.json`)
bt.slow(`.*bcExploitTest/SuicideIssue.json`)
bt.slow(`.*/bcForkStressTest/`)
bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`)
bt.slow(`.*/bcWalletTest/`)
// Very slow test
bt.skipLoad(`.*/stTimeConsuming/.*`)
// test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range,
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
// After the merge we would accept side chains as canonical even if they have lower td
bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
// With chain history removal, TDs become unavailable, this transition tests based on TTD are unrunnable
bt.skipLoad(`.*bcArrowGlacierToParis/powToPosBlockRejection.json`)
// This directory contains no test.
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
execBlockTest(t, bt, test, true)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
func TestBlockchain(t *testing.T) {
bt := new(testMatcher)
@ -74,10 +133,9 @@ func TestBlockchain(t *testing.T) {
// which run natively, so there's no reason to run them here.
}
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
if !common.FileExist(executionSpecBlockchainTestDir) {
t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir)
func testExecutionSpecBlocktests(t *testing.T, testDir string) {
if !common.FileExist(testDir) {
t.Skipf("directory %s does not exist", testDir)
}
bt := new(testMatcher)
@ -85,12 +143,22 @@ func TestExecutionSpecBlocktests(t *testing.T) {
bt.skipLoad(".*prague/eip7251_consolidations/test_system_contract_deployment.json")
bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json")
bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
bt.walk(t, testDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
})
}
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBlockchainTestDir)
}
// TestExecutionSpecBlocktestsBAL runs the BAL release test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktestsBAL(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBALBlockchainTestDir)
}
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest, buildAndVerifyBAL bool) {
// Define all the different flag combinations we should run the tests with,
// picking only one for short tests.
//
@ -106,7 +174,7 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
}
for _, snapshot := range snapshotConf {
for _, dbscheme := range dbschemeConf {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, nil, nil)); err != nil {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, buildAndVerifyBAL, nil, nil)); err != nil {
t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err)
return
}

View file

@ -22,11 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
stdmath "math"
"math/big"
"os"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
@ -37,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -44,6 +40,11 @@ import (
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
stdmath "math"
"math/big"
"os"
"reflect"
"strings"
)
// A BlockTest checks handling of entire blocks.
@ -71,6 +72,7 @@ type btBlock struct {
ExpectException string
Rlp string
UncleHeaders []*btHeader
AccessList *bal.BlockAccessList `json:"blockAccessList,omitempty"`
}
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
@ -97,6 +99,7 @@ type btHeader struct {
BlobGasUsed *uint64
ExcessBlobGas *uint64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
type btHeaderMarshaling struct {
@ -111,27 +114,20 @@ type btHeaderMarshaling struct {
ExcessBlobGas *math.HexOrDecimal64
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
func (t *BlockTest) createTestBlockChain(config *params.ChainConfig, snapshotter bool, scheme string, witness, createAndVerifyBAL bool, tracer *tracing.Hooks) (*core.BlockChain, error) {
// import pre accounts & construct test genesis block & state root
// Commit genesis state
var (
gspec = t.genesis(config)
db = rawdb.NewMemoryDatabase()
tconf = &triedb.Config{
Preimages: true,
IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp,
}
)
if scheme == rawdb.PathScheme || tconf.IsVerkle {
if scheme == rawdb.PathScheme {
tconf.PathDB = pathdb.Defaults
} else {
tconf.HashDB = hashdb.Defaults
}
gspec := t.genesis(config)
// if ttd is not specified, set an arbitrary huge value
if gspec.Config.TerminalTotalDifficulty == nil {
@ -140,15 +136,15 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb, nil)
if err != nil {
return err
return nil, err
}
triedb.Close() // close the db to prevent memory leak
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
return nil, fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
if gblock.Root() != t.json.Genesis.StateRoot {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
return nil, fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
}
// Wrap the original engine within the beacon-engine
engine := beacon.New(ethash.NewFaker())
@ -162,12 +158,28 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
Tracer: tracer,
StatelessSelfValidation: witness,
},
NoPrefetch: true,
EnableBALForTesting: createAndVerifyBAL,
}
if snapshotter {
options.SnapshotLimit = 1
options.SnapshotWait = true
}
chain, err := core.NewBlockChain(db, gspec, engine, options)
if err != nil {
return nil, err
}
return chain, nil
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, createAndVerifyBAL bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
chain, err := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
if err != nil {
return err
}
@ -201,7 +213,50 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
}
}
}
return t.validateImportedHeaders(chain, validBlocks)
err = t.validateImportedHeaders(chain, validBlocks)
if err != nil {
return err
}
if createAndVerifyBAL {
newChain, _ := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
defer newChain.Stop()
var blocksWithBAL types.Blocks
for i := uint64(1); i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := chain.GetBlockByNumber(i)
if block.Body().AccessList == nil {
return fmt.Errorf("block %d missing BAL", block.NumberU64())
}
blocksWithBAL = append(blocksWithBAL, block)
}
amt, err := newChain.InsertChain(blocksWithBAL)
if err != nil {
return err
}
_ = amt
newDB, err := newChain.State()
if err != nil {
return err
}
if err = t.validatePostState(newDB); err != nil {
return fmt.Errorf("post state validation failed: %v", err)
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
if newChain.Snapshots() != nil {
if err := chain.Snapshots().Verify(chain.CurrentBlock().Root); err != nil {
return err
}
}
}
err = t.validateImportedHeaders(newChain, validBlocks)
if err != nil {
return err
}
}
return nil
}
// Network returns the network/fork name for this test.
@ -211,20 +266,21 @@ func (t *BlockTest) Network() string {
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
BlockAccessListHash: t.json.Genesis.BlockAccessListHash,
}
}
@ -254,6 +310,16 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
return nil, fmt.Errorf("block RLP decoding failed when expected to succeed: %v", err)
}
}
// check that if we encode the same block, it will result in the same RLP
var enc bytes.Buffer
if err := rlp.Encode(&enc, cb); err != nil {
return nil, err
}
expected := common.Hex2Bytes(strings.TrimLeft(b.Rlp, "0x"))
if !bytes.Equal(enc.Bytes(), expected) {
return nil, fmt.Errorf("mismatch. expected\n%s\ngot\n%x\n", expected, enc.Bytes())
}
// RLP decoding worked, try to insert into chain:
blocks := types.Blocks{cb}
i, err := blockchain.InsertChain(blocks)
@ -266,7 +332,7 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
}
if b.BlockHeader == nil {
if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil {
fmt.Fprintf(os.Stdout, "block (index %d) insertion should have failed due to: %v:\n%v\n",
fmt.Fprintf(os.Stderr, "block (index %d) insertion should have failed due to: %v:\n%v\n",
bi, b.ExpectException, string(data))
}
return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v",

View file

@ -38,6 +38,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
var enc btHeader
enc.Bloom = b.Bloom
@ -88,6 +89,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
}
var dec btHeader
if err := json.Unmarshal(input, &dec); err != nil {
@ -156,5 +158,8 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil {
b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
if dec.BlockAccessListHash != nil {
b.BlockAccessListHash = dec.BlockAccessListHash
}
return nil
}

View file

@ -493,6 +493,38 @@ var Forks = map[string]*params.ChainConfig{
BPO1: bpo1BlobConfig,
},
},
"Amsterdam": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(0),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"OsakaToBPO1AtTime15k": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),

View file

@ -34,17 +34,18 @@ import (
)
var (
baseDir = filepath.Join(".", "testdata")
blockTestDir = filepath.Join(baseDir, "BlockchainTests")
stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests")
executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
baseDir = filepath.Join(".", "testdata")
blockTestDir = filepath.Join(baseDir, "BlockchainTests")
stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests")
executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
executionSpecBALBlockchainTestDir = filepath.Join(".", "spec-tests-bal", "fixtures", "blockchain_tests")
)
func readJSON(reader io.Reader, value interface{}) error {

View file

@ -239,6 +239,14 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
return t.root.Get(GetBinaryTreeKey(addr, key), t.nodeResolver)
}
func (t *BinaryTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *BinaryTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// UpdateAccount updates the account information for the given address.
func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var (

View file

@ -210,6 +210,29 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil
}
// UpdateStorageBatch attempts to update a list storages in the batch manner.
func (t *StateTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
var (
hkeys = make([][]byte, 0, len(keys))
evals = make([][]byte, 0, len(values))
)
for _, key := range keys {
hk := crypto.Keccak256(key)
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = key
}
hkeys = append(hkeys, hk)
}
for _, val := range values {
data, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
evals = append(evals, data)
}
return t.trie.UpdateBatch(hkeys, evals)
}
// UpdateAccount will abstract the write of an account to the secure trie.
func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := crypto.Keccak256(address.Bytes())
@ -226,6 +249,29 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
func (t *StateTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
var (
hkeys = make([][]byte, 0, len(addresses))
values = make([][]byte, 0, len(accounts))
)
for _, addr := range addresses {
hk := crypto.Keccak256(addr.Bytes())
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = addr.Bytes()
}
hkeys = append(hkeys, hk)
}
for _, acc := range accounts {
data, err := rlp.EncodeToBytes(acc)
if err != nil {
return err
}
values = append(values, data)
}
return t.trie.UpdateBatch(hkeys, values)
}
func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil
}

View file

@ -33,12 +33,10 @@ import (
// while the latter is inserted/deleted in order to follow the rule of trie.
// This tool can track all of them no matter the node is embedded in its
// parent or not, but valueNode is never tracked.
//
// Note opTracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type opTracer struct {
inserts map[string]struct{}
deletes map[string]struct{}
lock sync.RWMutex
}
// newOpTracer initializes the tracer for capturing trie changes.
@ -53,6 +51,9 @@ func newOpTracer() *opTracer {
// in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched".
func (t *opTracer) onInsert(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path))
return
@ -64,6 +65,9 @@ func (t *opTracer) onInsert(path []byte) {
// in the addition set, then just wipe it from the addition set
// as it's untouched.
func (t *opTracer) onDelete(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path))
return
@ -73,12 +77,18 @@ func (t *opTracer) onDelete(path []byte) {
// reset clears the content tracked by tracer.
func (t *opTracer) reset() {
t.lock.Lock()
defer t.lock.Unlock()
clear(t.inserts)
clear(t.deletes)
}
// copy returns a deep copied tracer instance.
func (t *opTracer) copy() *opTracer {
t.lock.RLock()
defer t.lock.RUnlock()
return &opTracer{
inserts: maps.Clone(t.inserts),
deletes: maps.Clone(t.deletes),
@ -87,6 +97,9 @@ func (t *opTracer) copy() *opTracer {
// deletedList returns a list of node paths which are deleted from the trie.
func (t *opTracer) deletedList() [][]byte {
t.lock.RLock()
defer t.lock.RUnlock()
paths := make([][]byte, 0, len(t.deletes))
for path := range t.deletes {
paths = append(paths, []byte(path))

View file

@ -49,6 +49,14 @@ func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bo
}
}
func (t *TransitionTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *TransitionTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// Base returns the base trie.
func (t *TransitionTrie) Base() *trie.SecureTrie {
return t.base

View file

@ -480,6 +480,72 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
}
}
// UpdateBatch updates a batch of entries concurrently.
func (t *Trie) UpdateBatch(keys [][]byte, values [][]byte) error {
// Short circuit if the trie is already committed and unusable.
if t.committed {
return ErrCommitted
}
if len(keys) != len(values) {
return fmt.Errorf("keys and values length mismatch: %d != %d", len(keys), len(values))
}
// Insert the entries sequentially if there are not too many
// trie nodes in the trie.
fn, ok := t.root.(*fullNode)
if !ok || len(keys) < 4 { // TODO(rjl493456442) the parallelism threshold should be twisted
for i, key := range keys {
err := t.Update(key, values[i])
if err != nil {
return err
}
}
return nil
}
var (
ikeys = make(map[byte][][]byte)
ivals = make(map[byte][][]byte)
eg errgroup.Group
)
for i, key := range keys {
hkey := keybytesToHex(key)
ikeys[hkey[0]] = append(ikeys[hkey[0]], hkey)
ivals[hkey[0]] = append(ivals[hkey[0]], values[i])
}
if len(keys) > 0 {
fn.flags = t.newFlag()
}
for p, k := range ikeys {
pos := p
ks := k
eg.Go(func() error {
vs := ivals[pos]
for i, k := range ks {
if len(vs[i]) != 0 {
_, n, err := t.insert(fn.Children[pos], []byte{pos}, k[1:], valueNode(vs[i]))
if err != nil {
return err
}
fn.Children[pos] = n
} else {
_, n, err := t.delete(fn.Children[pos], []byte{pos}, k[1:])
if err != nil {
return err
}
fn.Children[pos] = n
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
t.unhashed += len(keys)
t.uncommitted += len(keys)
return nil
}
// MustDelete is a wrapper of Delete and will omit any encountered error but
// just print out an error message.
func (t *Trie) MustDelete(key []byte) {

View file

@ -1500,82 +1500,56 @@ func testTrieCopyNewTrie(t *testing.T, entries []kv) {
}
}
// goos: darwin
// goarch: arm64
// pkg: github.com/ethereum/go-ethereum/trie
// cpu: Apple M1 Pro
// BenchmarkTriePrefetch
// BenchmarkTriePrefetch-8 9961 100706 ns/op
func BenchmarkTriePrefetch(b *testing.B) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
tr := NewEmpty(db)
vals := make(map[string]*kv)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
}
tr.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
}
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
func TestUpdateBatch(t *testing.T) {
testUpdateBatch(t, []kv{
{k: []byte("do"), v: []byte("verb")},
{k: []byte("ether"), v: []byte("wookiedoo")},
{k: []byte("horse"), v: []byte("stallion")},
{k: []byte("shaman"), v: []byte("horse")},
{k: []byte("doge"), v: []byte("coin")},
{k: []byte("dog"), v: []byte("puppy")},
})
for i := 0; i < b.N; i++ {
tr, err := New(TrieID(root), db)
if err != nil {
b.Fatalf("Failed to open the trie")
}
var keys [][]byte
for k := range vals {
keys = append(keys, []byte(k))
if len(keys) > 64 {
break
}
}
tr.Prefetch(keys)
var entries []kv
for i := 0; i < 256; i++ {
entries = append(entries, kv{k: testrand.Bytes(32), v: testrand.Bytes(32)})
}
testUpdateBatch(t, entries)
}
// goos: darwin
// goarch: arm64
// pkg: github.com/ethereum/go-ethereum/trie
// cpu: Apple M1 Pro
// BenchmarkTrieSeqPrefetch
// BenchmarkTrieSeqPrefetch-8 12879 96710 ns/op
func BenchmarkTrieSeqPrefetch(b *testing.B) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
tr := NewEmpty(db)
vals := make(map[string]*kv)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
}
tr.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
func testUpdateBatch(t *testing.T, entries []kv) {
var (
base = NewEmpty(nil)
keys [][]byte
vals [][]byte
)
for _, entry := range entries {
base.Update(entry.k, entry.v)
keys = append(keys, entry.k)
vals = append(vals, entry.v)
}
for i := 0; i < 10; i++ {
k, v := testrand.Bytes(32), testrand.Bytes(32)
base.Update(k, v)
keys = append(keys, k)
vals = append(vals, v)
}
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
for i := 0; i < b.N; i++ {
tr, err := New(TrieID(root), db)
if err != nil {
b.Fatalf("Failed to open the trie")
}
var keys [][]byte
for k := range vals {
keys = append(keys, []byte(k))
if len(keys) > 64 {
break
}
}
for _, k := range keys {
tr.Get(k)
cmp := NewEmpty(nil)
if err := cmp.UpdateBatch(keys, vals); err != nil {
t.Fatalf("Failed to update batch, %v", err)
}
// Traverse the original tree, the changes made on the copy one shouldn't
// affect the old one
for _, key := range keys {
v1, _ := base.Get(key)
v2, _ := cmp.Get(key)
if !bytes.Equal(v1, v2) {
t.Errorf("Unexpected data, key: %v, want: %v, got: %v", key, v1, v2)
}
}
if base.Hash() != cmp.Hash() {
t.Errorf("Hash mismatch: want %x, got %x", base.Hash(), cmp.Hash())
}
}

View file

@ -18,7 +18,6 @@ package triedb
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"