all: implement eip 7928 block access lists

This commit is contained in:
Jared Wasinger 2026-03-02 23:13:16 -05:00
parent 8f1d2aee94
commit 3f8e9f5c02
82 changed files with 4958 additions and 1330 deletions

View file

@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
)
var _ = (*executableDataMarshaling)(nil)
@ -17,24 +18,25 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@ -59,6 +61,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.BlockAccessList = e.BlockAccessList
enc.SlotNumber = (*hexutil.Uint64)(e.SlotNumber)
return json.Marshal(&enc)
}
@ -66,24 +69,25 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@ -157,6 +161,9 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.BlockAccessList != nil {
e.BlockAccessList = dec.BlockAccessList
}
if dec.SlotNumber != nil {
e.SlotNumber = (*uint64)(dec.SlotNumber)
}

View file

@ -18,6 +18,7 @@ package engine
import (
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"slices"
@ -82,24 +83,25 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
SlotNumber *uint64 `json:"slotNumber"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *uint64 `json:"slotNumber"`
}
// JSON type overrides for executableData.
@ -303,6 +305,8 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
requestsHash = &h
}
body := types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
@ -326,33 +330,41 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash,
SlotNumber: data.SlotNumber,
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
nil
if data.BlockAccessList != nil {
balHash := data.BlockAccessList.Hash()
header.BlockAccessListHash = &balHash
block := types.NewBlockWithHeader(header).WithBody(body).WithAccessList(data.BlockAccessList)
return block, nil
}
return types.NewBlockWithHeader(header).WithBody(body), nil
}
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
SlotNumber: block.SlotNumber(),
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
BlockAccessList: block.AccessList(),
SlotNumber: block.SlotNumber(),
}
// Add blobs.
@ -391,8 +403,9 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
type ExecutionPayloadBody struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
AccessList *bal.BlockAccessList `json:"blockAccessList"`
}
// Client identifiers to support ClientVersionV1.

View file

@ -5,6 +5,11 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
# version:spec-tests-bal v5.0.0
# https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/bal%40v5.1.0
c8a7406e6337c1dfd2540f0477afb8abe965c5ed2a63382d7a483eb818f79939 fixtures_bal.tar.gz
# version:golang 1.25.7
# https://go.dev/dl/
178f2832820274b43e177d32f06a3ebb0129e427dd20a5e4c88df2c1763cf10a go1.25.7.src.tar.gz

View file

@ -172,6 +172,9 @@ var (
// This is where the tests should be unpacked.
executionSpecTestsDir = "tests/spec-tests"
// This is where the bal-specific release of the tests should be unpacked.
executionSpecTestsBALDir = "tests/spec-tests-bal"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -380,6 +383,7 @@ func doTest(cmdline []string) {
// Get test fixtures.
if !*short {
downloadSpecTestFixtures(csdb, *cachedir)
downloadBALSpecTestFixtures(csdb, *cachedir)
}
// Configure the toolchain.
@ -445,6 +449,19 @@ func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string
return filepath.Join(cachedir, base)
}
func downloadBALSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string {
ext := ".tar.gz"
base := "fixtures_bal"
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFileFromKnownURL(archivePath); err != nil {
log.Fatal(err)
}
if err := build.ExtractArchive(archivePath, executionSpecTestsBALDir); err != nil {
log.Fatal(err)
}
return filepath.Join(cachedir, base)
}
// doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree.
func doCheckGenerate() {

View file

@ -117,7 +117,7 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
test := tests[name]
result := &testResult{Name: name, Pass: true}
var finalRoot *common.Hash
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), false, tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil {
result.State = dump(s)

View file

@ -240,7 +240,21 @@ func makeFullNode(ctx *cli.Context) *node.Node {
cfg.Eth.OverrideVerkle = &v
}
// Start metrics export if enabled.
if ctx.IsSet(utils.BlockAccessListExecutionModeFlag.Name) {
val := ctx.String(utils.BlockAccessListExecutionModeFlag.Name)
switch val {
case utils.BalExecutionModeFull:
cfg.Eth.BALExecutionMode = 0
case utils.BalExecutionModeNoBatchIO:
cfg.Eth.BALExecutionMode = 1
case utils.BalExecutionModeSequential:
cfg.Eth.BALExecutionMode = 2
default:
utils.Fatalf("invalid option for --bal.executionmode: %s. acceptable values are full|nobatchio|sequential", val)
}
}
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics)
// Setup OpenTelemetry reporting if enabled.

View file

@ -273,6 +273,7 @@ func init() {
consoleFlags,
debug.Flags,
metricsFlags,
[]cli.Flag{utils.BlockAccessListExecutionModeFlag},
)
flags.AutoEnvVars(app.Flags, "GETH")

View file

@ -1108,6 +1108,31 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Name: "era.format",
Usage: "Archive format: 'era1' or 'erae'",
}
// Block Access List flags
ExperimentalBALFlag = &cli.BoolFlag{
Name: "experimental.bal",
Usage: "Enable generation of EIP-7928 block access lists when importing post-Cancun blocks which lack them. When this flag is specified, importing blocks containing access lists triggers validation of their correctness and execution based off them. The header block access list field is not set with blocks created when this flag is specified, nor is it validated when importing blocks that contain access lists. This is used for development purposes only. Do not enable it otherwise.",
Category: flags.MiscCategory,
}
// block access list flags
BlockAccessListExecutionModeFlag = &cli.StringFlag{
Name: "bal.executionmode",
Usage: `
block access list execution type. possible inputs are:
- sequential: no performance acceleration
- full: parallel transaction execution, state root calculation, async warming of access list reads
- nobatchio: same as 'full', but without async warming of access list reads`,
Value: BalExecutionModeFull,
Category: flags.MiscCategory,
}
)
const (
BalExecutionModeFull = "full"
BalExecutionModeNoBatchIO = "nobatchio"
BalExecutionModeSequential = "sequential"
)
var (

View file

@ -19,6 +19,7 @@ package beacon
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -280,6 +281,12 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if !amsterdam && header.SlotNumber != nil {
return fmt.Errorf("invalid slotNumber: have %d, expected nil", *header.SlotNumber)
}
if !amsterdam && header.BlockAccessListHash != nil {
return fmt.Errorf("invalid block access list hash: have %x, expected nil", header.BlockAccessListHash)
}
if amsterdam && header.BlockAccessListHash == nil {
return fmt.Errorf("header is missing block access list hash")
}
return nil
}
@ -334,26 +341,29 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
// Finalize implements consensus.Engine and processes withdrawals on top.
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) bal.StateMutations {
if !beacon.IsPoSHeader(header) {
beacon.ethone.Finalize(chain, header, state, body)
return
return beacon.ethone.Finalize(chain, header, state, body)
}
// Withdrawals processing.
for _, w := range body.Withdrawals {
// always read the target account regardless of withdrawal amt to include it in the BAL
state.GetBalance(w.Address)
// Convert amount from gwei to wei.
amount := new(uint256.Int).SetUint64(w.Amount)
amount = amount.Mul(amount, uint256.NewInt(params.GWei))
state.AddBalance(w.Address, amount, tracing.BalanceIncreaseWithdrawal)
}
return state.Finalise(true)
// No block reward which is issued by consensus layer instead.
}
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(postMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if !beacon.IsPoSHeader(header) {
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts)
return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts, nil)
}
shanghai := chain.Config().IsShanghai(header.Number, header.Time)
if shanghai {
@ -366,14 +376,24 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
return nil, errors.New("withdrawals set before Shanghai activation")
}
}
// Finalize and assemble the block.
beacon.Finalize(chain, header, state, body)
postMut := beacon.Finalize(chain, header, state, body)
// Assign the final state root to header.
header.Root = state.IntermediateRoot(true)
// Assemble the final block.
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
if onFinalizeAccessList != nil {
al := onFinalizeAccessList(postMut)
alHash := al.Hash()
header.BlockAccessListHash = &alHash
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)).WithAccessList(al)
return block, nil
} else {
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
}
}
// Seal generates a new sealing request for the given input block and pushes

View file

@ -21,6 +21,7 @@ import (
"bytes"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"io"
"math/big"
"math/rand"
@ -575,13 +576,14 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
// Finalize implements consensus.Engine. There is no post-transaction
// consensus rules in clique, do nothing here.
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) (mut bal.StateMutations) {
// No block rewards in PoA, so the state remains as is
return
}
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(withdrawalMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("clique does not support withdrawals")
}
@ -591,6 +593,9 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalizeAccessList != nil {
panic("access list embedding not enabled for clique consensus")
}
// Assemble and return the final block for sealing.
return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -18,6 +18,7 @@
package consensus
import (
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -85,14 +86,14 @@ type Engine interface {
//
// Note: The state database might be updated to reflect any consensus rules
// that happen at finalization (e.g. block rewards).
Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body)
Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) bal.StateMutations
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
// rewards or process withdrawals) and assembles the final block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error)
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(mutations bal.StateMutations) *bal.BlockAccessList) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.

View file

@ -19,6 +19,7 @@ package ethash
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"time"
@ -506,14 +507,15 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H
}
// Finalize implements consensus.Engine, accumulating the block and uncle rewards.
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) {
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) (mut bal.StateMutations) {
// Accumulate any block and uncle rewards
accumulateRewards(chain.Config(), state, header, body.Uncles)
return
}
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) {
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(withdrawalMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if len(body.Withdrawals) > 0 {
return nil, errors.New("ethash does not support withdrawals")
}
@ -523,6 +525,9 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalizeAccessList != nil {
panic("access list embedding not supported for ethash consenus")
}
// Header seems complete, assemble into a block and return
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil
}

View file

@ -69,6 +69,8 @@ func latestBlobConfig(cfg *params.ChainConfig, time uint64) (BlobConfig, error)
bc = s.BPO4
case cfg.IsBPO3(london, time) && s.BPO3 != nil:
bc = s.BPO3
case cfg.IsAmsterdam(london, time) && s.Amsterdam != nil:
bc = s.Amsterdam
case cfg.IsBPO2(london, time) && s.BPO2 != nil:
bc = s.BPO2
case cfg.IsBPO1(london, time) && s.BPO1 != nil:

View file

@ -19,7 +19,6 @@ package core
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@ -111,6 +110,30 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
}
// block access lists must be present after the Amsterdam hard fork
if v.config.IsAmsterdam(block.Number(), block.Time()) {
if block.Header().BlockAccessListHash == nil {
// TODO: verify that this check isn't also done elsewhere
return fmt.Errorf("block access list hash not set in header")
}
if block.AccessList() != nil {
if *block.Header().BlockAccessListHash != block.AccessList().Hash() {
return fmt.Errorf("access list hash mismatch. local: %x. remote: %x\n", block.AccessList().Hash(), *block.Header().BlockAccessListHash)
} else if err := block.AccessList().Validate(len(block.Transactions())); err != nil {
return fmt.Errorf("invalid block access list: %v", err)
}
} else {
//panic("TODO: implement local access list construction path if importing a block without an access list")
}
} else {
// if experimental.bal is not enabled, block headers cannot have access list hash and bodies cannot have access lists.
if block.AccessList() != nil {
return fmt.Errorf("access list not allowed in block body if not in amsterdam or experimental.bal is set")
} else if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed when experimental.bal is set")
}
}
// Ancestor block must be known.
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
@ -123,7 +146,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
func (v *BlockValidator) ValidateState(block *types.Block, stateTransition state.BlockStateTransition, res *ProcessResult, stateless bool) error {
if res == nil {
return errors.New("nil ProcessResult value")
}
@ -160,10 +183,11 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} else if res.Requests != nil {
return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
if root := stateTransition.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, stateTransition.Error())
}
return nil
}

View file

@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"io"
"math/big"
"runtime"
@ -103,6 +104,21 @@ var (
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
// BALspecific timers
blockPreprocessingTimer = metrics.NewRegisteredResettingTimer("chain/preprocess", nil)
txExecutionTimer = metrics.NewRegisteredResettingTimer("chain/txexecution", nil)
stateTrieHashTimer = metrics.NewRegisteredResettingTimer("chain/statetriehash", nil)
accountTriesUpdateTimer = metrics.NewRegisteredResettingTimer("chain/accounttriesupdate", nil)
stateTriePrefetchTimer = metrics.NewRegisteredResettingTimer("chain/statetrieprefetch", nil)
stateTrieUpdateTimer = metrics.NewRegisteredResettingTimer("chain/statetrieupdate", nil)
originStorageLoadTimer = metrics.NewRegisteredResettingTimer("chain/originstorageload", nil)
stateRootComputeTimer = metrics.NewRegisteredResettingTimer("chain/staterootcompute", nil)
stateCommitTimer = metrics.NewRegisteredResettingTimer("chain/statetriecommit", nil)
blockPostprocessingTimer = metrics.NewRegisteredResettingTimer("chain/postprocess", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
@ -163,6 +179,12 @@ const (
BlockChainVersion uint64 = 9
)
const (
BALExecutionModeFull = 0
BALExecutionModeNoBatchIO = iota
BALExecutionModeSequential = iota
)
// BlockChainConfig contains the configuration of the BlockChain object.
type BlockChainConfig struct {
// Trie database related options
@ -219,6 +241,8 @@ type BlockChainConfig struct {
// detailed statistics will be logged. Negative value means disabled (default),
// zero logs all blocks, positive value filters blocks by execution time.
SlowBlockThreshold time.Duration
BALExecutionMode int
}
// DefaultConfig returns the default config.
@ -357,12 +381,13 @@ type BlockChain struct {
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
engine consensus.Engine
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
parallelProcessor ParallelStateProcessor
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
slowBlockThreshold time.Duration // Block execution time threshold beyond which detailed statistics will be logged
@ -424,6 +449,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(bc.hc)
bc.parallelProcessor = NewParallelStateProcessor(bc.hc, &cfg.VmConfig)
genesisHeader := bc.GetHeaderByNumber(0)
if genesisHeader == nil {
@ -571,6 +597,113 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
}
return bc, nil
}
func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *types.Block, setHead bool) (procRes *blockProcessingResult, blockEndErr error) {
var (
startTime = time.Now()
procTime time.Duration
)
useAsyncReads := bc.cfg.BALExecutionMode != BALExecutionModeNoBatchIO
al := block.AccessList() // TODO: make the return of this method not be a pointer
accessListReader := bal.NewAccessListReader(*al)
prefetchReader, err := bc.statedb.ReaderEIP7928(parentRoot, accessListReader.StorageKeys(useAsyncReads), runtime.NumCPU())
if err != nil {
return nil, err
}
stateTransition, err := state.NewBALStateTransition(block, prefetchReader, bc.statedb, parentRoot)
if err != nil {
return nil, err
}
statedb, err := state.NewWithReader(parentRoot, bc.statedb, prefetchReader)
if bc.logger != nil && bc.logger.OnBlockStart != nil {
bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block,
Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(),
})
}
if bc.logger != nil && bc.logger.OnBlockEnd != nil {
defer func() {
bc.logger.OnBlockEnd(blockEndErr)
}()
}
res, err := bc.parallelProcessor.Process(block, stateTransition, statedb, bc.cfg.VmConfig)
if err != nil {
return nil, err
}
if err := bc.validator.ValidateState(block, stateTransition, res.ProcessResult, false); err != nil {
return nil, err
}
procTime = time.Since(startTime)
writeStart := time.Now()
// Write the block to the chain and get the status.
var (
//wstart = time.Now()
status WriteStatus
)
if !setHead {
// Don't set the head, only insert the block
err = bc.writeBlockWithState(block, res.ProcessResult.Receipts, stateTransition)
} else {
status, err = bc.writeBlockAndSetHead(block, res.ProcessResult.Receipts, res.ProcessResult.Logs, stateTransition, false)
}
if err != nil {
return nil, err
}
writeTime := time.Since(writeStart)
var stats ExecuteStats
/*
// TODO: implement the gathering of this data
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)
stats.CodeReads = statedb.CodeReads
stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
stats.AccountDeleted = statedb.AccountDeleted
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())
stats.CodeLoaded = statedb.CodeLoaded
stats.CodeLoadBytes = statedb.CodeLoadBytes
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
*/
// Update the metrics touched during block commit
stats.AccountCommits = stateTransition.Metrics().AccountCommits
stats.StorageCommits = stateTransition.Metrics().StorageCommits
stats.SnapshotCommit = stateTransition.Metrics().SnapshotCommits
stats.TrieDBCommit = stateTransition.Metrics().TrieDBCommits
// stats.StateReadCacheStats = whichReader.GetStats()
// ^ TODO fix this
elapsed := time.Since(startTime) + 1 // prevent zero division
stats.TotalTime = elapsed
stats.MgasPerSecond = float64(res.ProcessResult.GasUsed) * 1000 / float64(elapsed)
stats.BlockWrite = writeTime
stats.balTransitionStats = res.StateTransitionMetrics
return &blockProcessingResult{
usedGas: res.ProcessResult.GasUsed,
procTime: procTime,
status: status,
witness: nil,
stats: &stats,
}, nil
}
func (bc *BlockChain) setupSnapshot() {
// Short circuit if the chain is established with path scheme, as the
@ -1641,7 +1774,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the
// database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, transition state.BlockStateTransition) error {
if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
@ -1655,7 +1788,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
)
rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(batch, statedb.Preimages())
rawdb.WritePreimages(batch, transition.Preimages())
if err := batch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
@ -1670,7 +1803,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
hasStateSizer = bc.stateSizer != nil
)
if hasStateHook || hasStateSizer {
r, update, err := statedb.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun)
r, update, err := transition.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun)
if err != nil {
return err
}
@ -1686,7 +1819,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
root = r
} else {
root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun)
root, err = transition.Commit(block.NumberU64(), isEIP158, isCancun)
if err != nil {
return err
}
@ -1753,7 +1886,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state state.BlockStateTransition, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err
}
@ -1990,11 +2123,16 @@ func (bc *BlockChain) insertChain(ctx context.Context, chain types.Blocks, setHe
}
// The traced section of block import.
start := time.Now()
blockHasAccessList := block.AccessList() != nil
res, err := bc.ProcessBlock(ctx, parent.Root, block, setHead, makeWitness && len(chain) == 1)
if err != nil {
return nil, it.index, err
}
res.stats.reportMetrics()
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
res.stats.reportBALMetrics()
} else {
res.stats.reportMetrics()
}
// Log slow block only if a single block is inserted (usually after the
// initial sync) to not overwhelm the users.
@ -2076,6 +2214,16 @@ func (bpr *blockProcessingResult) Stats() *ExecuteStats {
// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) {
isAmsterdam := bc.chainConfig.IsAmsterdam(block.Number(), block.Time())
// TODO: need to check that the block is also postcancun if it contained an access list?
// this should be checked during decoding (?)
blockHasAccessList := block.AccessList() != nil
// optimized execution path for blocks which contain BALs
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
return bc.processBlockWithAccessList(parentRoot, block, setHead)
}
var (
err error
startTime = time.Now()
@ -2085,9 +2233,22 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
defer interrupt.Store(true) // terminate the prefetch at the end
if bc.cfg.NoPrefetch {
statedb, err = state.New(parentRoot, bc.statedb)
if err != nil {
return nil, err
if isAmsterdam {
reader, err := bc.statedb.Reader(parentRoot)
if err != nil {
return nil, err
}
readerTracker := state.NewReaderWithTracker(reader)
statedb, err = state.NewWithReader(parentRoot, bc.statedb, readerTracker)
if err != nil {
return nil, err
}
} else {
statedb, err = state.New(parentRoot, bc.statedb)
if err != nil {
return nil, err
}
}
} else {
// If prefetching is enabled, run that against the current state to pre-cache
@ -2095,7 +2256,7 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
//
// Note: the main processor and prefetcher share the same reader with a local
// cache for mitigating the overhead of state access.
prefetch, process, err := bc.statedb.ReadersWithCacheStats(parentRoot)
prefetch, process, err := bc.statedb.ReadersWithCache(parentRoot)
if err != nil {
return nil, err
}
@ -2103,6 +2264,9 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
if err != nil {
return nil, err
}
if isAmsterdam {
process = state.NewReaderWithTracker(process)
}
statedb, err = state.NewWithReader(parentRoot, bc.statedb, process)
if err != nil {
return nil, err
@ -2110,15 +2274,22 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
// Upload the statistics of reader at the end
defer func() {
if result != nil {
result.stats.StatePrefetchCacheStats = prefetch.GetStats()
result.stats.StateReadCacheStats = process.GetStats()
if stater, ok := prefetch.(state.ReaderStater); ok {
result.stats.StatePrefetchCacheStats = stater.GetStats()
}
if stater, ok := process.(state.ReaderStater); ok {
result.stats.StateReadCacheStats = stater.GetStats()
}
}
}()
go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
// Disable tracing for prefetcher executions.
vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
if block.AccessList() == nil {
// only use the state prefetcher for non-BAL blocks.
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
}
blockPrefetchExecuteTimer.Update(time.Since(start))
if interrupt.Load() {
@ -2147,6 +2318,7 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
witnessStats = stateless.NewWitnessStats()
}
}
statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher()
}
@ -2164,16 +2336,19 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
}()
}
var res *ProcessResult
var ptime, vtime time.Duration
// Process block using the parent state as reference point
pstart := time.Now()
pctx, _, spanEnd := telemetry.StartSpan(ctx, "bc.processor.Process")
res, err := bc.processor.Process(pctx, block, statedb, bc.cfg.VmConfig)
res, err = bc.processor.Process(pctx, block, statedb, bc.cfg.VmConfig)
spanEnd(&err)
if err != nil {
bc.reportBadBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)
ptime = time.Since(pstart)
vstart := time.Now()
_, _, spanEnd = telemetry.StartSpan(ctx, "bc.validator.ValidateState")
@ -2183,7 +2358,28 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
bc.reportBadBlock(block, res, err)
return nil, err
}
vtime := time.Since(vstart)
vtime = time.Since(vstart)
if isAmsterdam {
computedAccessList := res.AccessList.ToEncodingObj()
computedAccessListHash := computedAccessList.Hash()
if *block.Header().BlockAccessListHash != computedAccessListHash {
//fmt.Printf("remote:\n%s\nlocal:\n%s\n", block.Body().AccessList.JSONString(), computedAccessList.JSONString())
err := fmt.Errorf("block header access list hash mismatch with computed (header=%x computed=%x)", *block.Header().BlockAccessListHash, computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
if block.AccessList() == nil {
// attach the computed access list to the block so it gets persisted
// when the block is written to disk
block = block.WithAccessList(computedAccessList)
} else if block.AccessList().Hash() != computedAccessListHash {
err := fmt.Errorf("block access list hash mismatch (remote=%x computed=%x)", block.AccessList().Hash(), computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
}
// If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of
@ -2775,6 +2971,10 @@ func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err
log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
}
func (bc *BlockChain) reportBALBlock(block *types.Block, res *ProcessResult, err error) {
}
// logForkReadiness will write a log when a future fork is scheduled, but not
// active. This is useful so operators know their client is ready for the fork.
func (bc *BlockChain) logForkReadiness(block *types.Block) {

View file

@ -61,6 +61,9 @@ type ExecuteStats struct {
// Cache hit rates
StateReadCacheStats state.ReaderStats
StatePrefetchCacheStats state.ReaderStats
// Stats specific to BAL state update
balTransitionStats *state.BALStateTransitionMetrics
}
// reportMetrics uploads execution statistics to the metrics system.
@ -94,15 +97,15 @@ func (s *ExecuteStats) reportMetrics() {
chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer
// Cache hit rates
accountCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheHit)
accountCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheMiss)
storageCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheHit)
storageCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheMiss)
accountCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StateStats.AccountCacheHit)
accountCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StateStats.AccountCacheMiss)
storageCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StateStats.StorageCacheHit)
storageCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StateStats.StorageCacheMiss)
accountCacheHitMeter.Mark(s.StateReadCacheStats.AccountCacheHit)
accountCacheMissMeter.Mark(s.StateReadCacheStats.AccountCacheMiss)
storageCacheHitMeter.Mark(s.StateReadCacheStats.StorageCacheHit)
storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss)
accountCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheHit)
accountCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheMiss)
storageCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheHit)
storageCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheMiss)
}
// slowBlockLog represents the JSON structure for slow block logging.
@ -238,14 +241,14 @@ func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Durat
},
Cache: slowBlockCache{
Account: slowBlockCacheEntry{
Hits: s.StateReadCacheStats.AccountCacheHit,
Misses: s.StateReadCacheStats.AccountCacheMiss,
HitRate: calculateHitRate(s.StateReadCacheStats.AccountCacheHit, s.StateReadCacheStats.AccountCacheMiss),
Hits: s.StateReadCacheStats.StateStats.AccountCacheHit,
Misses: s.StateReadCacheStats.StateStats.AccountCacheMiss,
HitRate: calculateHitRate(s.StateReadCacheStats.StateStats.AccountCacheHit, s.StateReadCacheStats.StateStats.AccountCacheMiss),
},
Storage: slowBlockCacheEntry{
Hits: s.StateReadCacheStats.StorageCacheHit,
Misses: s.StateReadCacheStats.StorageCacheMiss,
HitRate: calculateHitRate(s.StateReadCacheStats.StorageCacheHit, s.StateReadCacheStats.StorageCacheMiss),
Hits: s.StateReadCacheStats.StateStats.StorageCacheHit,
Misses: s.StateReadCacheStats.StateStats.StorageCacheMiss,
HitRate: calculateHitRate(s.StateReadCacheStats.StateStats.StorageCacheHit, s.StateReadCacheStats.StateStats.StorageCacheMiss),
},
Code: slowBlockCodeCacheEntry{
Hits: s.StateReadCacheStats.CodeStats.CacheHit,
@ -263,3 +266,53 @@ func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Durat
}
log.Warn(string(jsonBytes))
}
func (s *ExecuteStats) reportBALMetrics() {
/*
if s.AccountLoaded != 0 {
accountReadTimer.Update(s.AccountReads)
accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded))
}
if s.StorageLoaded != 0 {
storageReadTimer.Update(s.StorageReads)
storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded))
}
if s.CodeLoaded != 0 {
codeReadTimer.Update(s.CodeReads)
codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded))
codeReadBytesTimer.Update(time.Duration(s.CodeLoadBytes))
}
// TODO: implement these ^
*/
//accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation)
//storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation)
//accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation)
accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them
stateTriePrefetchTimer.Update(s.balTransitionStats.StatePrefetch)
accountTriesUpdateTimer.Update(s.balTransitionStats.AccountUpdate)
stateTrieUpdateTimer.Update(s.balTransitionStats.StateUpdate)
stateTrieHashTimer.Update(s.balTransitionStats.StateHash)
stateRootComputeTimer.Update(s.balTransitionStats.AccountUpdate + s.balTransitionStats.StateUpdate + s.balTransitionStats.StateHash)
//blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing
// ^basically impossible to get this metric with parallel execution
//blockValidationTimer.Update(s.Validation) // The time spent on block validation
//blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation
snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them
blockWriteTimer.Update(s.BlockWrite) // The time spent on block write
blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution
chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer
// Cache hit rates
accountCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheHit)
accountCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheMiss)
storageCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheHit)
storageCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheMiss)
}

View file

@ -166,7 +166,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.reportBadBlock(block, res, err)
return err
}
err = blockchain.validator.ValidateState(block, statedb, res, false)
err = blockchain.validator.ValidateState(block, statedb, res, true, false)
if err != nil {
blockchain.reportBadBlock(block, res, err)
return err

View file

@ -117,7 +117,7 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
evm = vm.NewEVM(blockContext, b.statedb, b.cm.config, vmConfig)
)
b.statedb.SetTxContext(tx.Hash(), len(b.txs))
receipt, err := ApplyTransaction(evm, b.gasPool, b.statedb, b.header, tx)
_, receipt, err := ApplyTransaction(evm, b.gasPool, b.statedb, b.header, tx)
if err != nil {
panic(err)
}
@ -329,11 +329,11 @@ func (b *BlockGen) collectRequests(readonly bool) (requests [][]byte) {
blockContext := NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase)
evm := vm.NewEVM(blockContext, statedb, b.cm.config, vm.Config{})
// EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil {
if _, err := ProcessWithdrawalQueue(&requests, evm); err != nil {
panic(fmt.Sprintf("could not process withdrawal requests: %v", err))
}
// EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil {
if _, err := ProcessConsolidationQueue(&requests, evm); err != nil {
panic(fmt.Sprintf("could not process consolidation requests: %v", err))
}
}
@ -411,7 +411,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts, nil)
if err != nil {
panic(err)
}

View file

@ -19,22 +19,23 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
SlotNumber *uint64 `json:"slotNumber"`
Config *params.ChainConfig `json:"config"`
Nonce math.HexOrDecimal64 `json:"nonce"`
Timestamp math.HexOrDecimal64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
SlotNumber *uint64 `json:"slotNumber"`
}
var enc Genesis
enc.Config = g.Config
@ -57,6 +58,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
enc.BlockAccessListHash = g.BlockAccessListHash
enc.SlotNumber = g.SlotNumber
return json.Marshal(&enc)
}
@ -64,22 +66,23 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
SlotNumber *uint64 `json:"slotNumber"`
Config *params.ChainConfig `json:"config"`
Nonce *math.HexOrDecimal64 `json:"nonce"`
Timestamp *math.HexOrDecimal64 `json:"timestamp"`
ExtraData *hexutil.Bytes `json:"extraData"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
SlotNumber *uint64 `json:"slotNumber"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@ -136,6 +139,9 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
if dec.BlockAccessListHash != nil {
g.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil {
g.SlotNumber = dec.SlotNumber
}

View file

@ -67,13 +67,14 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
SlotNumber *uint64 `json:"slotNumber"` // EIP-7843
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"` // EIP-7928
SlotNumber *uint64 `json:"slotNumber"` // EIP-7843
}
// copy copies the genesis.
@ -123,6 +124,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
genesis.BaseFee = genesisHeader.BaseFee
genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas
genesis.BlobGasUsed = genesisHeader.BlobGasUsed
genesis.BlockAccessListHash = genesisHeader.BlockAccessListHash
genesis.SlotNumber = genesisHeader.SlotNumber
return &genesis, nil
@ -487,18 +489,19 @@ func (g *Genesis) ToBlock() *types.Block {
// toBlockWithRoot constructs the genesis block with the given genesis state root.
func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
Root: root,
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
GasUsed: g.GasUsed,
BaseFee: g.BaseFee,
Difficulty: g.Difficulty,
MixDigest: g.Mixhash,
Coinbase: g.Coinbase,
BlockAccessListHash: g.BlockAccessListHash,
Root: root,
}
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit

View file

@ -0,0 +1,397 @@
package core
import (
"cmp"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/sync/errgroup"
"runtime"
"slices"
"time"
)
// ProcessResultWithMetrics wraps ProcessResult with some metrics that are
// emitted when executing blocks containing access lists.
type ProcessResultWithMetrics struct {
ProcessResult *ProcessResult
PreProcessTime time.Duration
StateTransitionMetrics *state.BALStateTransitionMetrics
// the time it took to execute all txs in the block
ExecTime time.Duration
PostProcessTime time.Duration
}
// ParallelStateProcessor is used to execute and verify blocks containing
// access lists.
type ParallelStateProcessor struct {
*StateProcessor
vmCfg *vm.Config
}
// NewParallelStateProcessor returns a new ParallelStateProcessor instance.
func NewParallelStateProcessor(chain *HeaderChain, vmConfig *vm.Config) ParallelStateProcessor {
res := NewStateProcessor(chain)
return ParallelStateProcessor{
res,
vmConfig,
}
}
func validateStateAccesses(lastIdx int, accessList bal.AccessListReader, localAccesses bal.StateAccesses) bool {
// 1. strip out any state in the localAccesses that was modified
muts := accessList.Mutations(lastIdx + 1)
for acct, mut := range *muts {
if _, exist := localAccesses[acct]; !exist {
continue
}
// delete any storage slots that were mutated from the read set
if len(localAccesses[acct]) > 0 {
for key, _ := range mut.StorageWrites {
if _, ok := localAccesses[acct][key]; ok {
delete(localAccesses[acct], key)
}
}
}
if len(localAccesses[acct]) == 0 {
delete(localAccesses, acct)
}
}
if !accessList.Accesses().Eq(localAccesses) {
return false
}
return true
}
// called by resultHandler when all transactions have successfully executed.
// performs post-tx state transition (system contracts and withdrawals)
// and calculates the ProcessResult, returning it to be sent on resCh
// by resultHandler
func (p *ParallelStateProcessor) prepareExecResult(block *types.Block, tExecStart time.Time, accesses bal.StateAccesses, statedb *state.StateDB, prefetchReader state.Reader, results []txExecResult) *ProcessResultWithMetrics {
tExec := time.Since(tExecStart)
var requests [][]byte
tPostprocessStart := time.Now()
header := block.Header()
context := NewEVMBlockContext(header, p.chain, nil)
lastBALIdx := len(block.Transactions()) + 1
postTxState := statedb.WithReader(state.NewReaderWithTracker(state.NewReaderWithBlockLevelAccessList(prefetchReader, *block.AccessList(), lastBALIdx)))
cfg := vm.Config{
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
evm := vm.NewEVM(context, postTxState, p.chainConfig(), cfg)
// 1. order the receipts by tx index
// 2. correctly calculate the cumulative gas used per receipt, returning bad block error if it goes over the allowed
slices.SortFunc(results, func(a, b txExecResult) int {
return cmp.Compare(a.receipt.TransactionIndex, b.receipt.TransactionIndex)
})
var (
// total gas used not applying refunds
blockGas = uint64(0)
// total gas used applying refunds
execGas = uint64(0)
)
var allLogs []*types.Log
var allReceipts []*types.Receipt
for _, result := range results {
blockGas += result.blockGas
execGas += result.execGas
result.receipt.CumulativeGasUsed = blockGas
if blockGas > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
allLogs = append(allLogs, result.receipt.Logs...)
allReceipts = append(allReceipts, result.receipt)
}
// Block gas limit is enforced against usedGas (pre-refund after Amsterdam, post-refund before).
if blockGas > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
var postMut bal.StateMutations
// Read requests if Prague is enabled.
if p.chainConfig().IsPrague(block.Number(), block.Time()) {
requests = [][]byte{}
var err error
// EIP-6110
if err = ParseDepositLogs(&requests, allLogs, p.chainConfig()); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7002
postMut, err = ProcessWithdrawalQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7251
consolidationMut, err := ProcessConsolidationQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
postMut.Merge(consolidationMut)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
postMut.Merge(p.chain.Engine().Finalize(p.chain, header, postTxState, block.Body()))
postTxAccesses := postTxState.Reader().(state.StateReaderTracker).GetStateAccessList()
accessList := bal.NewAccessListReader(*block.AccessList())
if !postMut.Eq(*accessList.MutationsAt(lastBALIdx)) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("mismatch between local/remote access list mutations for final idx")},
}
}
accesses.Merge(postTxAccesses)
if !validateStateAccesses(lastBALIdx, accessList, accesses) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("mismatch between local/remote access list for state accesses")},
}
}
tPostprocess := time.Since(tPostprocessStart)
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{
Receipts: allReceipts,
Requests: requests,
Logs: allLogs,
GasUsed: execGas,
},
PostProcessTime: tPostprocess,
ExecTime: tExec,
}
}
type txExecResult struct {
idx int // transaction index
receipt *types.Receipt
err error // non-EVM error which would render the block invalid
blockGas uint64
execGas uint64
stateReads bal.StateAccesses
}
// resultHandler polls until all transactions have finished executing and the
// state root calculation is complete. The result is emitted on resCh.
func (p *ParallelStateProcessor) resultHandler(block *types.Block, preTxReads bal.StateAccesses, statedb *state.StateDB, prefetchReader state.Reader, tExecStart time.Time, txResCh <-chan txExecResult, stateRootCalcResCh <-chan stateRootCalculationResult, resCh chan *ProcessResultWithMetrics) {
// 1. if the block has transactions, receive the execution results from all of them and return an error on resCh if any txs err'd
// 2. once all txs are executed, compute the post-tx state transition and produce the ProcessResult sending it on resCh (or an error if the post-tx state didn't match what is reported in the BAL)
var results []txExecResult
gp := NewGasPool(block.GasLimit())
var execErr error
var numTxComplete int
accesses := preTxReads
if len(block.Transactions()) > 0 {
loop:
for {
select {
case res := <-txResCh:
if execErr == nil {
// short-circuit if invalid block was detected
if res.err != nil {
execErr = res.err
continue
}
if err := gp.SubGas(res.receipt.CumulativeGasUsed); err != nil {
execErr = err
} else {
results = append(results, res)
accesses.Merge(res.stateReads)
}
}
numTxComplete++
if numTxComplete == len(block.Transactions()) {
break loop
}
}
}
if execErr != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: execErr}}
return
}
}
execResults := p.prepareExecResult(block, tExecStart, accesses, statedb, prefetchReader, results)
rootCalcRes := <-stateRootCalcResCh
if execResults.ProcessResult.Error != nil {
resCh <- execResults
} else if rootCalcRes.err != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: rootCalcRes.err}}
} else {
execResults.StateTransitionMetrics = rootCalcRes.metrics
resCh <- execResults
}
}
type stateRootCalculationResult struct {
err error
metrics *state.BALStateTransitionMetrics
root common.Hash
}
// calcAndVerifyRoot performs the post-state root hash calculation, verifying
// it against what is reported by the block and returning a result on resCh.
func (p *ParallelStateProcessor) calcAndVerifyRoot(block *types.Block, stateTransition *state.BALStateTransition, resCh chan stateRootCalculationResult) {
root := stateTransition.IntermediateRoot(false)
res := stateRootCalculationResult{
metrics: stateTransition.Metrics(),
}
if root != block.Root() {
res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x", root, block.Root())
}
resCh <- res
}
// execTx executes single transaction returning a result which includes state accessed/modified
func (p *ParallelStateProcessor) execTx(block *types.Block, tx *types.Transaction, balIdx int, db *state.StateDB, signer types.Signer) *txExecResult {
header := block.Header()
context := NewEVMBlockContext(header, p.chain, nil)
cfg := vm.Config{
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
EnableWitnessStats: p.vmCfg.EnableWitnessStats,
}
evm := vm.NewEVM(context, db, p.chainConfig(), cfg)
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
if err != nil {
err = fmt.Errorf("could not apply tx %d [%v]: %w", balIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
gp := NewGasPool(block.GasLimit())
db.SetTxContext(tx.Hash(), balIdx-1)
mut, receipt, err := ApplyTransactionWithEVM(msg, gp, db, block.Number(), block.Hash(), context.Time, tx, evm)
if err != nil {
err := fmt.Errorf("could not apply tx %d [%v]: %w", balIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
accessList := bal.NewAccessListReader(*block.AccessList())
if !accessList.MutationsAt(balIdx).Eq(mut) {
err := fmt.Errorf("mismatch between local/remote computed state mutations at bal idx %d. got:\n%s\nexpected:\n%s\n", balIdx, mut.String(), accessList.MutationsAt(balIdx).String())
return &txExecResult{err: err}
}
return &txExecResult{
idx: balIdx,
receipt: receipt,
execGas: receipt.GasUsed,
blockGas: gp.CumulativeUsed(),
stateReads: db.Reader().(state.StateReaderTracker).GetStateAccessList(),
}
}
func (p *ParallelStateProcessor) processBlockPreTx(block *types.Block, statedb *state.StateDB, prefetchReader state.Reader, cfg vm.Config) (bal.StateAccesses, error) {
var (
header = block.Header()
)
alReader := state.NewReaderWithBlockLevelAccessList(prefetchReader, *block.AccessList(), 0)
readerWithTracker := state.NewReaderWithTracker(alReader)
sdb := statedb.WithReader(readerWithTracker)
accessList := bal.NewAccessListReader(*block.AccessList())
context := NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, sdb, p.chainConfig(), cfg)
var mutations bal.StateMutations
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
mutations = ProcessBeaconBlockRoot(*beaconRoot, evm)
}
pbhMutations := ProcessParentBlockHash(block.ParentHash(), evm)
mutations.Merge(pbhMutations)
reads := readerWithTracker.(state.StateReaderTracker).GetStateAccessList()
if !accessList.MutationsAt(0).Eq(mutations) {
return nil, fmt.Errorf("mismatch between local/remote access list mutations at idx 0")
}
return reads, nil
}
// Process performs EVM execution and state root computation for a block which is known
// to contain an access list.
func (p *ParallelStateProcessor) Process(block *types.Block, stateTransition *state.BALStateTransition, statedb *state.StateDB, cfg vm.Config) (*ProcessResultWithMetrics, error) {
var (
header = block.Header()
resCh = make(chan *ProcessResultWithMetrics)
signer = types.MakeSigner(p.chainConfig(), header.Number, header.Time)
rootCalcResultCh = make(chan stateRootCalculationResult)
txResCh = make(chan txExecResult)
pStart = time.Now()
tExecStart time.Time
tPreprocess time.Duration // time to create a set of prestates for parallel transaction execution
balReader = statedb.Reader()
)
startingState := statedb.Copy()
preReads, err := p.processBlockPreTx(block, statedb, balReader, cfg)
if err != nil {
return nil, err
}
// compute the reads/mutations at the last bal index
tPreprocess = time.Since(pStart)
// execute transactions and state root calculation in parallel
tExecStart = time.Now()
go p.resultHandler(block, preReads, statedb, balReader, tExecStart, txResCh, rootCalcResultCh, resCh)
var workers errgroup.Group
workers.SetLimit(runtime.NumCPU())
for i, t := range block.Transactions() {
tx := t
idx := i
sdb := startingState.Copy()
workers.Go(func() error {
startingStateWithReadTracker := sdb.WithReader(state.NewReaderWithTracker(state.NewReaderWithBlockLevelAccessList(balReader, *block.AccessList(), idx+1)))
res := p.execTx(block, tx, idx+1, startingStateWithReadTracker, signer)
txResCh <- *res
return nil
})
}
go p.calcAndVerifyRoot(block, stateTransition, rootCalcResultCh)
res := <-resCh
if res.ProcessResult.Error != nil {
return nil, res.ProcessResult.Error
}
// TODO: remove preprocess metric ?
res.PreProcessTime = tPreprocess
return res, nil
}

View file

@ -20,6 +20,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"slices"
@ -421,6 +422,17 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
}
}
func WriteAccessListRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(accessListKey(number, hash), rlp); err != nil {
log.Crit("failed to store block access list", "err", err)
}
}
func ReadAccessListRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(accessListKey(number, hash))
return data
}
// HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if isCanon(db, number, hash) {
@ -455,6 +467,26 @@ func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *t
WriteBodyRLP(db, hash, number, data)
}
func ReadAccessList(db ethdb.Reader, hash common.Hash, number uint64) *bal.BlockAccessList {
var al bal.BlockAccessList
data := ReadAccessListRLP(db, hash, number)
if data != nil {
err := rlp.DecodeBytes(data, &al)
if err != nil {
log.Crit("failed to RLP decode access list", "err", err)
}
}
return &al
}
func WriteAccessList(db ethdb.KeyValueWriter, hash common.Hash, number uint64, al *bal.BlockAccessList) {
data, err := rlp.EncodeToBytes(al)
if err != nil {
log.Crit("failed to RLP encode block access list", "err", err)
}
WriteAccessListRLP(db, hash, number, data)
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
@ -659,13 +691,25 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
if body == nil {
return nil
}
return types.NewBlockWithHeader(header).WithBody(*body)
block := types.NewBlockWithHeader(header).WithBody(*body)
if header.BlockAccessListHash != nil {
accessList := ReadAccessList(db, hash, number)
if accessList != nil {
block = block.WithAccessList(accessList)
}
}
return block
}
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
WriteHeader(db, block.Header())
if block.AccessList() != nil {
WriteAccessList(db, block.Hash(), block.NumberU64(), block.AccessList())
}
}
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.

View file

@ -111,6 +111,7 @@ var (
headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
accessListPrefix = []byte("z")
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
@ -209,6 +210,11 @@ func blockBodyKey(number uint64, hash common.Hash) []byte {
return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
// accessListKey = accessListPrefix + num (uint64 big endian) + hash
func accessListKey(number uint64, hash common.Hash) []byte {
return append(append(accessListPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
func blockReceiptsKey(number uint64, hash common.Hash) []byte {
return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)

View file

@ -0,0 +1,514 @@
package state
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
"maps"
"sync"
"sync/atomic"
"time"
)
// BALStateTransition is responsible for performing the state root update
// and commit for EIP 7928 access-list-containing blocks. An instance of
// this object is only used for a single block.
type BALStateTransition struct {
accessList bal.AccessListReader
db Database
reader Reader
stateTrie Trie
parentRoot common.Hash
// the computed state root of the block
rootHash common.Hash
// the state modifications performed by the block
diffs bal.StateMutations
// a map of common.Address -> *types.StateAccount containing the block
// prestate of all accounts that will be modified
prestates sync.Map
postStates map[common.Address]*types.StateAccount
// a map of common.Address -> Trie containing the account tries for all
// accounts with mutated storage
tries sync.Map //map[common.Address]Trie
deletions map[common.Address]struct{}
accountDeleted int64
accountUpdated int64
storageDeleted atomic.Int64
storageUpdated atomic.Int64
stateUpdate *stateUpdate
metrics BALStateTransitionMetrics
maxBALIdx int
err error
}
func (s *BALStateTransition) Metrics() *BALStateTransitionMetrics {
return &s.metrics
}
type BALStateTransitionMetrics struct {
// trie hashing metrics
AccountUpdate time.Duration
StatePrefetch time.Duration
StateUpdate time.Duration
StateHash time.Duration
OriginStorageLoadTime time.Duration
// commit metrics
AccountCommits time.Duration
StorageCommits time.Duration
SnapshotCommits time.Duration
TrieDBCommits time.Duration
TotalCommitTime time.Duration
}
func NewBALStateTransition(block *types.Block, prefetchReader Reader, db Database, parentRoot common.Hash) (*BALStateTransition, error) {
stateTrie, err := db.OpenTrie(parentRoot)
if err != nil {
return nil, err
}
return &BALStateTransition{
accessList: bal.NewAccessListReader(*block.AccessList()),
db: db,
reader: prefetchReader,
stateTrie: stateTrie,
parentRoot: parentRoot,
rootHash: common.Hash{},
diffs: make(bal.StateMutations),
prestates: sync.Map{},
postStates: make(map[common.Address]*types.StateAccount),
tries: sync.Map{},
deletions: make(map[common.Address]struct{}),
stateUpdate: nil,
maxBALIdx: len(block.Transactions()) + 1,
}, nil
}
func (s *BALStateTransition) Error() error {
return s.err
}
func (s *BALStateTransition) setError(err error) {
if s.err != nil {
s.err = err
}
}
// TODO: refresh my knowledge of the storage-clearing EIP and ensure that my assumptions around
// an empty account which contains storage are valid here.
//
// isAccountDeleted checks whether the state account was deleted in this block. Post selfdestruct-removal,
// deletions can only occur if an account which has a balance becomes the target of a CREATE2 initcode
// which calls SENDALL, clearing the account and marking it for deletion.
func isAccountDeleted(prestate *types.StateAccount, mutations bal.AccountMutations) bool {
// TODO: figure out how to simplify this method
if mutations.Code != nil && len(mutations.Code) != 0 {
return false
}
if mutations.Nonce != nil && *mutations.Nonce != 0 {
return false
}
if mutations.StorageWrites != nil && len(mutations.StorageWrites) > 0 {
return false
}
if mutations.Balance != nil {
if mutations.Balance.IsZero() {
if prestate.Nonce != 0 || prestate.Balance.IsZero() || common.BytesToHash(prestate.CodeHash) != types.EmptyCodeHash {
return false
}
// consider an empty account with storage to be deleted, so we don't check root here
return true
}
}
return false
}
// updateAccount applies the block state mutations to a given account returning
// the updated state account and new code (if the account code changed)
func (s *BALStateTransition) updateAccount(addr common.Address) (*types.StateAccount, []byte) {
a, _ := s.prestates.Load(addr)
acct := a.(*types.StateAccount)
acct, diff := acct.Copy(), s.diffs[addr]
code := diff.Code
if diff.Nonce != nil {
acct.Nonce = *diff.Nonce
}
if diff.Balance != nil {
acct.Balance = new(uint256.Int).Set(diff.Balance)
}
if tr, ok := s.tries.Load(addr); ok {
acct.Root = tr.(Trie).Hash()
}
return acct, code
}
func (s *BALStateTransition) commitAccount(addr common.Address) (*accountUpdate, *trienode.NodeSet, error) {
var (
encode = func(val common.Hash) []byte {
if val == (common.Hash{}) {
return nil
}
blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:]))
return blob
}
)
op := &accountUpdate{
address: addr,
data: types.SlimAccountRLP(*s.postStates[addr]), // TODO: cache the updated state acocunt somewhere
}
if prestate, exist := s.prestates.Load(addr); exist {
prestate := prestate.(*types.StateAccount)
op.origin = types.SlimAccountRLP(*prestate)
}
if s.diffs[addr].Code != nil {
code := contractCode{
hash: crypto.Keccak256Hash(s.diffs[addr].Code),
blob: s.diffs[addr].Code,
}
if op.origin == nil {
code.originHash = types.EmptyCodeHash
} else {
code.originHash = crypto.Keccak256Hash(op.origin)
}
op.code = &code
}
if len(s.diffs[addr].StorageWrites) == 0 {
return op, nil, nil
}
op.storages = make(map[common.Hash][]byte)
op.storagesOriginByHash = make(map[common.Hash][]byte)
op.storagesOriginByKey = make(map[common.Hash][]byte)
for key, value := range s.diffs[addr].StorageWrites {
hash := crypto.Keccak256Hash(key[:])
op.storages[hash] = encode(value)
storage, err := s.reader.Storage(addr, key)
if err != nil {
return nil, nil, err
}
origin := encode(storage)
op.storagesOriginByHash[hash] = origin
op.storagesOriginByKey[key] = origin
}
tr, _ := s.tries.Load(addr)
root, nodes := tr.(Trie).Commit(false)
s.postStates[addr].Root = root
return op, nodes, nil
}
// CommitWithUpdate flushes mutated trie nodes and state accounts to disk.
func (s *BALStateTransition) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
// 1) create a stateUpdate object
// Commit objects to the trie, measuring the elapsed time
var (
commitStart = time.Now()
accountTrieNodesUpdated int
accountTrieNodesDeleted int
storageTrieNodesUpdated int
storageTrieNodesDeleted int
lock sync.Mutex // protect two maps below
nodes = trienode.NewMergedNodeSet() // aggregated trie nodes
updates = make(map[common.Hash]*accountUpdate, len(s.diffs)) // aggregated account updates
// merge aggregates the dirty trie nodes into the global set.
//
// Given that some accounts may be destroyed and then recreated within
// the same block, it's possible that a node set with the same owner
// may already exist. In such cases, these two sets are combined, with
// the later one overwriting the previous one if any nodes are modified
// or deleted in both sets.
//
// merge run concurrently across all the state objects and account trie.
merge = func(set *trienode.NodeSet) error {
if set == nil {
return nil
}
lock.Lock()
defer lock.Unlock()
updates, deletes := set.Size()
if set.Owner == (common.Hash{}) {
accountTrieNodesUpdated += updates
accountTrieNodesDeleted += deletes
} else {
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deletes
}
return nodes.Merge(set)
}
)
destructedPrestates := make(map[common.Address]*types.StateAccount)
s.prestates.Range(func(key, value any) bool {
addr := key.(common.Address)
acct := value.(*types.StateAccount)
destructedPrestates[addr] = acct
return true
})
deletes, delNodes, err := handleDestruction(s.db, s.stateTrie, noStorageWiping, maps.Keys(s.deletions), destructedPrestates)
if err != nil {
return common.Hash{}, nil, err
}
for _, set := range delNodes {
if err := merge(set); err != nil {
return common.Hash{}, nil, err
}
}
// Handle all state updates afterwards, concurrently to one another to shave
// off some milliseconds from the commit operation. Also accumulate the code
// writes to run in parallel with the computations.
var (
start = time.Now()
root common.Hash
workers errgroup.Group
)
// Schedule the account trie first since that will be the biggest, so give
// it the most time to crunch.
//
// TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain
// heads, which seems excessive given that it doesn't do hashing, it just
// shuffles some data. For comparison, the *hashing* at chain head is 2-3ms.
// We need to investigate what's happening as it seems something's wonky.
// Obviously it's not an end of the world issue, just something the original
// code didn't anticipate for.
workers.Go(func() error {
// Write the account trie changes, measuring the amount of wasted time
newroot, set := s.stateTrie.Commit(true)
root = newroot
if err := merge(set); err != nil {
return err
}
s.metrics.AccountCommits = time.Since(start)
return nil
})
// Schedule each of the storage tries that need to be updated, so they can
// run concurrently to one another.
//
// TODO(karalabe): Experimentally, the account commit takes approximately the
// same time as all the storage commits combined, so we could maybe only have
// 2 threads in total. But that kind of depends on the account commit being
// more expensive than it should be, so let's fix that and revisit this todo.
for addr, _ := range s.diffs {
if _, isDeleted := s.deletions[addr]; isDeleted {
continue
}
address := addr
// Run the storage updates concurrently to one another
workers.Go(func() error {
// Write any storage changes in the state object to its storage trie
update, set, err := s.commitAccount(address)
if err != nil {
return err
}
if err := merge(set); err != nil {
return err
}
lock.Lock()
updates[crypto.Keccak256Hash(address[:])] = update
s.metrics.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime
lock.Unlock()
return nil
})
}
// Wait for everything to finish and update the metrics
if err := workers.Wait(); err != nil {
return common.Hash{}, nil, err
}
accountUpdatedMeter.Mark(s.accountUpdated)
storageUpdatedMeter.Mark(s.storageUpdated.Load())
accountDeletedMeter.Mark(s.accountDeleted)
storageDeletedMeter.Mark(s.storageDeleted.Load())
accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
ret := newStateUpdate(noStorageWiping, s.parentRoot, root, block, deletes, updates, nodes)
snapshotCommits, trieDBCommits, err := flushStateUpdate(s.db, block, ret)
if err != nil {
return common.Hash{}, nil, err
}
s.metrics.SnapshotCommits, s.metrics.TrieDBCommits = snapshotCommits, trieDBCommits
s.metrics.TotalCommitTime = time.Since(commitStart)
return root, ret, nil
}
func (s *BALStateTransition) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
root, _, err := s.CommitWithUpdate(block, deleteEmptyObjects, noStorageWiping)
return root, err
}
// IntermediateRoot applies block state mutations and computes the updated state
// trie root.
func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
if s.rootHash != (common.Hash{}) {
return s.rootHash
}
// State root calculation proceeds as follows:
// 1 (b): load the origin storage values for all slots which were modified during the block (this is needed for computing the stateUpdate)
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
//
// 2: compute the post-state root of the account trie
//
// Steps 1/2 are performed sequentially, with steps 1a-d performed in parallel
start := time.Now()
var wg sync.WaitGroup
s.diffs = *s.accessList.Mutations(s.maxBALIdx + 1)
for addr, d := range s.diffs {
wg.Add(1)
address := addr
diff := d
go func() {
defer wg.Done()
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
acct, err := s.reader.Account(address)
if err != nil {
s.setError(err)
return
}
if acct == nil {
acct = types.NewEmptyStateAccount()
}
s.prestates.Store(address, acct)
if len(diff.StorageWrites) > 0 {
tr, err := s.db.OpenStorageTrie(s.parentRoot, address, acct.Root, s.stateTrie)
if err != nil {
s.setError(err)
return
}
s.tries.Store(address, tr)
var (
updateKeys, updateValues [][]byte
deleteKeys [][]byte
)
for key, val := range diff.StorageWrites {
if val != (common.Hash{}) {
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(val[:]))
s.storageUpdated.Add(1)
} else {
deleteKeys = append(deleteKeys, key[:])
s.storageDeleted.Add(1)
}
}
if err := tr.UpdateStorageBatch(address, updateKeys, updateValues); err != nil {
s.setError(err)
return
}
for _, key := range deleteKeys {
if err := tr.DeleteStorage(address, key); err != nil {
s.setError(err)
return
}
}
hashStart := time.Now()
tr.Hash()
s.metrics.StateHash = time.Since(hashStart)
}
}()
}
wg.Add(1)
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
go func() {
defer wg.Done()
prefetchStart := time.Now()
var prefetchAddrs []common.Address
for addr, _ := range s.diffs {
prefetchAddrs = append(prefetchAddrs, addr)
}
if err := s.stateTrie.PrefetchAccount(prefetchAddrs); err != nil {
s.setError(err)
return
}
s.metrics.StatePrefetch = time.Since(prefetchStart)
}()
wg.Wait()
s.metrics.AccountUpdate = time.Since(start)
// 2: compute the post-state root of the account trie
stateUpdateStart := time.Now()
for mutatedAddr, _ := range s.diffs {
p, _ := s.prestates.Load(mutatedAddr)
prestate := p.(*types.StateAccount)
isDeleted := isAccountDeleted(prestate, s.diffs[mutatedAddr])
if isDeleted {
if err := s.stateTrie.DeleteAccount(mutatedAddr); err != nil {
s.setError(err)
return common.Hash{}
}
s.deletions[mutatedAddr] = struct{}{}
} else {
acct, code := s.updateAccount(mutatedAddr)
if code != nil {
codeHash := crypto.Keccak256Hash(code)
acct.CodeHash = codeHash.Bytes()
if err := s.stateTrie.UpdateContractCode(mutatedAddr, codeHash, code); err != nil {
s.setError(err)
return common.Hash{}
}
}
if err := s.stateTrie.UpdateAccount(mutatedAddr, acct, len(code)); err != nil {
s.setError(err)
return common.Hash{}
}
s.postStates[mutatedAddr] = acct
}
}
s.metrics.StateUpdate = time.Since(stateUpdateStart)
stateTrieHashStart := time.Now()
s.rootHash = s.stateTrie.Hash()
s.metrics.StateHash = time.Since(stateTrieHashStart)
return s.rootHash
}
func (s *BALStateTransition) Preimages() map[common.Hash][]byte {
// TODO: implement this
return make(map[common.Hash][]byte)
}

View file

@ -76,7 +76,7 @@ type Trie interface {
// be returned.
GetAccount(address common.Address) (*types.StateAccount, error)
// PrefetchAccount attempts to resolve specific accounts from the database
// PrefetchAccount attempts to schedule specific accounts from the database
// to accelerate subsequent trie operations.
PrefetchAccount([]common.Address) error
@ -85,7 +85,7 @@ type Trie interface {
// a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error)
// PrefetchStorage attempts to resolve specific storage slots from the database
// PrefetchStorage attempts to schedule specific storage slots from the database
// to accelerate subsequent trie operations.
PrefetchStorage(addr common.Address, keys [][]byte) error
@ -94,12 +94,18 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
// UpdateStorageBatch attempts to update a list storages in the batch manner.
UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error
@ -221,21 +227,35 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), sr), nil
}
// ReadersWithCacheStats creates a pair of state readers that share the same
// underlying state reader and internal state cache, while maintaining separate
// statistics respectively.
func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (ReaderWithStats, ReaderWithStats, error) {
// ReadersWithCache creates a pair of state readers that share the same
// underlying state reader and internal state cache, while maintaining
// separate statistics respectively.
func (db *CachingDB) ReadersWithCache(stateRoot common.Hash) (Reader, Reader, error) {
r, err := db.StateReader(stateRoot)
if err != nil {
return nil, nil, err
}
sr := newStateReaderWithCache(r)
ra := newReaderWithStats(sr, newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache))
rb := newReaderWithStats(sr, newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache))
ra := newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), newStateReaderWithStats(sr))
rb := newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), newStateReaderWithStats(sr))
return ra, rb, nil
}
// ReaderEIP7928 creates a state reader with the manner of Block-level accessList.
func (db *CachingDB) ReaderEIP7928(stateRoot common.Hash, accessList map[common.Address][]common.Hash, threads int) (Reader, error) {
base, err := db.StateReader(stateRoot)
if err != nil {
return nil, err
}
// Construct the state reader with native cache and associated statistics
r := newStateReaderWithStats(newStateReaderWithCache(base))
// Construct the state reader with background prefetching
pr := newPrefetchStateReader(r, accessList, threads)
return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), pr), nil
}
// OpenTrie opens the main account trie at a specific root hash.
func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {

View file

@ -29,7 +29,7 @@ import (
// nodeIterator is an iterator to traverse the entire state trie post-order,
// including all of the contract code and contract state tries. Preimage is
// required in order to resolve the contract address.
// required in order to schedule the contract address.
type nodeIterator struct {
state *StateDB // State being iterated
tr Trie // Primary account trie for traversal

View file

@ -382,7 +382,7 @@ func (ch nonceChange) copy() journalEntry {
}
func (ch codeChange) revert(s *StateDB) {
s.getStateObject(ch.account).setCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
s.getStateObject(ch.account).SetCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
}
func (ch codeChange) dirtied() (common.Address, bool) {

View file

@ -18,7 +18,6 @@ package state
import (
"errors"
"fmt"
"sync"
"sync/atomic"
@ -38,6 +37,8 @@ import (
)
// ContractCodeReader defines the interface for accessing contract code.
//
// ContractCodeReader is supposed to be thread-safe.
type ContractCodeReader interface {
// Has returns the flag indicating whether the contract code with
// specified address and hash exists or not.
@ -58,35 +59,10 @@ type ContractCodeReader interface {
CodeSize(addr common.Address, codeHash common.Hash) (int, error)
}
// ContractCodeReaderStats aggregates statistics for the contract code reader.
type ContractCodeReaderStats struct {
CacheHit int64 // Number of cache hits
CacheMiss int64 // Number of cache misses
CacheHitBytes int64 // Total bytes served from cache
CacheMissBytes int64 // Total bytes read on cache misses
}
// HitRate returns the cache hit rate.
func (s ContractCodeReaderStats) HitRate() float64 {
if s.CacheHit == 0 {
return 0
}
return float64(s.CacheHit) / float64(s.CacheHit+s.CacheMiss)
}
// ContractCodeReaderWithStats extends ContractCodeReader by adding GetStats to
// expose statistics of code reader.
type ContractCodeReaderWithStats interface {
ContractCodeReader
GetStats() ContractCodeReaderStats
}
// StateReader defines the interface for accessing accounts and storage slots
// associated with a specific state.
//
// StateReader is assumed to be thread-safe and implementation must take care
// of the concurrency issue by themselves.
// StateReader is supposed to be thread-safe.
type StateReader interface {
// Account retrieves the account associated with a particular address.
//
@ -114,40 +90,6 @@ type Reader interface {
StateReader
}
// ReaderStats wraps the statistics of reader.
type ReaderStats struct {
AccountCacheHit int64
AccountCacheMiss int64
StorageCacheHit int64
StorageCacheMiss int64
CodeStats ContractCodeReaderStats
}
// String implements fmt.Stringer, returning string format statistics.
func (s ReaderStats) String() string {
var (
accountCacheHitRate float64
storageCacheHitRate float64
)
if s.AccountCacheHit > 0 {
accountCacheHitRate = float64(s.AccountCacheHit) / float64(s.AccountCacheHit+s.AccountCacheMiss) * 100
}
if s.StorageCacheHit > 0 {
storageCacheHitRate = float64(s.StorageCacheHit) / float64(s.StorageCacheHit+s.StorageCacheMiss) * 100
}
msg := fmt.Sprintf("Reader statistics\n")
msg += fmt.Sprintf("account: hit: %d, miss: %d, rate: %.2f\n", s.AccountCacheHit, s.AccountCacheMiss, accountCacheHitRate)
msg += fmt.Sprintf("storage: hit: %d, miss: %d, rate: %.2f\n", s.StorageCacheHit, s.StorageCacheMiss, storageCacheHitRate)
msg += fmt.Sprintf("code: hit: %d(%v), miss: %d(%v), rate: %.2f\n", s.CodeStats.CacheHit, common.StorageSize(s.CodeStats.CacheHitBytes), s.CodeStats.CacheMiss, common.StorageSize(s.CodeStats.CacheMissBytes), s.CodeStats.HitRate())
return msg
}
// ReaderWithStats wraps the additional method to retrieve the reader statistics from.
type ReaderWithStats interface {
Reader
GetStats() ReaderStats
}
// cachingCodeReader implements ContractCodeReader, accessing contract code either in
// local key-value store or the shared code cache.
//
@ -210,15 +152,16 @@ func (r *cachingCodeReader) CodeSize(addr common.Address, codeHash common.Hash)
return len(code), nil
}
// Has returns the flag indicating whether the contract code with
// specified address and hash exists or not.
// Has implements ContractCodeReader, returning the flag indicating whether
// the contract code with specified address and hash exists or not.
func (r *cachingCodeReader) Has(addr common.Address, codeHash common.Hash) bool {
code, _ := r.Code(addr, codeHash)
return len(code) > 0
}
// GetStats returns the statistics of the code reader.
func (r *cachingCodeReader) GetStats() ContractCodeReaderStats {
// GetCodeStats implements ContractCodeReaderStater, returning the statistics
// of the code reader.
func (r *cachingCodeReader) GetCodeStats() ContractCodeReaderStats {
return ContractCodeReaderStats{
CacheHit: r.hit.Load(),
CacheMiss: r.miss.Load(),
@ -495,20 +438,6 @@ func (r *multiStateReader) Storage(addr common.Address, slot common.Hash) (commo
return common.Hash{}, errors.Join(errs...)
}
// reader is the wrapper of ContractCodeReader and StateReader interface.
type reader struct {
ContractCodeReader
StateReader
}
// newReader constructs a reader with the supplied code reader and state reader.
func newReader(codeReader ContractCodeReader, stateReader StateReader) *reader {
return &reader{
ContractCodeReader: codeReader,
StateReader: stateReader,
}
}
// stateReaderWithCache is a wrapper around StateReader that maintains additional
// state caches to support concurrent state access.
type stateReaderWithCache struct {
@ -619,9 +548,10 @@ func (r *stateReaderWithCache) Storage(addr common.Address, slot common.Hash) (c
return value, err
}
type readerWithStats struct {
// stateReaderWithStats is a wrapper over the stateReaderWithCache, tracking
// the cache hit statistics of the reader.
type stateReaderWithStats struct {
*stateReaderWithCache
ContractCodeReaderWithStats
accountCacheHit atomic.Int64
accountCacheMiss atomic.Int64
@ -629,11 +559,10 @@ type readerWithStats struct {
storageCacheMiss atomic.Int64
}
// newReaderWithStats constructs the reader with additional statistics tracked.
func newReaderWithStats(sr *stateReaderWithCache, cr ContractCodeReaderWithStats) *readerWithStats {
return &readerWithStats{
stateReaderWithCache: sr,
ContractCodeReaderWithStats: cr,
// newReaderWithStats constructs the state reader with additional statistics tracked.
func newStateReaderWithStats(sr *stateReaderWithCache) *stateReaderWithStats {
return &stateReaderWithStats{
stateReaderWithCache: sr,
}
}
@ -641,7 +570,7 @@ func newReaderWithStats(sr *stateReaderWithCache, cr ContractCodeReaderWithStats
// The returned account might be nil if it's not existent.
//
// An error will be returned if the state is corrupted in the underlying reader.
func (r *readerWithStats) Account(addr common.Address) (*types.StateAccount, error) {
func (r *stateReaderWithStats) Account(addr common.Address) (*types.StateAccount, error) {
account, incache, err := r.stateReaderWithCache.account(addr)
if err != nil {
return nil, err
@ -659,7 +588,7 @@ func (r *readerWithStats) Account(addr common.Address) (*types.StateAccount, err
// existent.
//
// An error will be returned if the state is corrupted in the underlying reader.
func (r *readerWithStats) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
func (r *stateReaderWithStats) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
value, incache, err := r.stateReaderWithCache.storage(addr, slot)
if err != nil {
return common.Hash{}, err
@ -672,13 +601,51 @@ func (r *readerWithStats) Storage(addr common.Address, slot common.Hash) (common
return value, nil
}
// GetStats implements ReaderWithStats, returning the statistics of state reader.
func (r *readerWithStats) GetStats() ReaderStats {
return ReaderStats{
// GetStateStats implements StateReaderStater, returning the statistics of the
// state reader.
func (r *stateReaderWithStats) GetStateStats() StateReaderStats {
return StateReaderStats{
AccountCacheHit: r.accountCacheHit.Load(),
AccountCacheMiss: r.accountCacheMiss.Load(),
StorageCacheHit: r.storageCacheHit.Load(),
StorageCacheMiss: r.storageCacheMiss.Load(),
CodeStats: r.ContractCodeReaderWithStats.GetStats(),
}
}
// reader aggregates a code reader and a state reader into a single object.
type reader struct {
ContractCodeReader
StateReader
}
// newReader constructs a reader with the supplied code reader and state reader.
func newReader(codeReader ContractCodeReader, stateReader StateReader) *reader {
return &reader{
ContractCodeReader: codeReader,
StateReader: stateReader,
}
}
// GetCodeStats returns the statistics of code access.
func (r *reader) GetCodeStats() ContractCodeReaderStats {
if stater, ok := r.ContractCodeReader.(ContractCodeReaderStater); ok {
return stater.GetCodeStats()
}
return ContractCodeReaderStats{}
}
// GetStateStats returns the statistics of state access.
func (r *reader) GetStateStats() StateReaderStats {
if stater, ok := r.StateReader.(StateReaderStater); ok {
return stater.GetStateStats()
}
return StateReaderStats{}
}
// GetStats returns the aggregated statistics for both state and code access.
func (r *reader) GetStats() ReaderStats {
return ReaderStats{
CodeStats: r.GetCodeStats(),
StateStats: r.GetStateStats(),
}
}

View file

@ -0,0 +1,334 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
// The EIP27928 reader utilizes a hierarchical architecture to optimize state
// access during block execution:
//
// - Base layer: The reader is initialized with the pre-transition state root,
// providing the access of the state.
//
// - Prefetching Layer: This base reader is wrapped by newPrefetchStateReader.
// Using an Access List hint, it asynchronously fetches required state data
// in the background, minimizing I/O blocking during transaction processing.
//
// - Execution Layer: To support parallel transaction execution within the EIP
// 7928 context, readers are wrapped in ReaderWithBlockLevelAccessList.
// This layer provides a "unified view" by merging the pre-transition state
// with mutated states from preceding transactions in the block.
//
// - Tracking Layer: Finally, the readerTracker wraps the execution reader to
// capture all state accesses made during a specific transaction. These individual
// access are subsequently merged to construct a comprehensive access list
// for the entire block.
//
// The architecture can be illustrated by the diagram below:
// [ Block Level Access List ] <────────────────┐
// ▲ │ (Merge)
// │ │
// ┌───────┴───────┐ ┌───────┴───────┐
// │ readerTracker │ │ readerTracker │ (Access Tracking)
// └───────┬───────┘ └───────┬───────┘
// │ │
// ┌──────────────┴──────────────┐ ┌──────────────┴──────────────┐
// │ ReaderWithBlockLevelAL │ │ ReaderWithBlockLevelAL │ (Unified View)
// │ (Pre-state + Mutations) │ │ (Pre-state + Mutations) │
// └──────────────┬──────────────┘ └──────────────┬──────────────┘
// │ │
// └────────────────┬─────────────────┘
// │
// ┌──────────────┴──────────────┐
// │ newPrefetchStateReader │ (Async I/O)
// │ (Access List Hint driven) │
// └──────────────┬──────────────┘
// │
// ┌──────────────┴──────────────┐
// │ Base Reader │ (State Root)
// │ (State & Contract Code) │
// └─────────────────────────────┘
import (
"github.com/ethereum/go-ethereum/crypto"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
)
type fetchTask struct {
addr common.Address
slots []common.Hash
}
func (t *fetchTask) weight() int { return 1 + len(t.slots) }
type prefetchStateReader struct {
StateReader
tasks []*fetchTask
nThreads int
done chan struct{}
term chan struct{}
closeOnce sync.Once
}
func newPrefetchStateReader(reader StateReader, accessList bal.StorageKeys, nThreads int) *prefetchStateReader {
tasks := make([]*fetchTask, 0, len(accessList))
for addr, slots := range accessList {
tasks = append(tasks, &fetchTask{
addr: addr,
slots: slots,
})
}
return newPrefetchStateReaderInternal(reader, tasks, nThreads)
}
func newPrefetchStateReaderInternal(reader StateReader, tasks []*fetchTask, nThreads int) *prefetchStateReader {
r := &prefetchStateReader{
StateReader: reader,
tasks: tasks,
nThreads: nThreads,
done: make(chan struct{}),
term: make(chan struct{}),
}
go r.prefetch()
return r
}
func (r *prefetchStateReader) Close() {
r.closeOnce.Do(func() {
close(r.term)
<-r.done
})
}
func (r *prefetchStateReader) Wait() error {
select {
case <-r.term:
return nil
case <-r.done:
return nil
}
}
func (r *prefetchStateReader) prefetch() {
defer close(r.done)
if len(r.tasks) == 0 {
return
}
var total int
for _, t := range r.tasks {
total += t.weight()
}
var (
wg sync.WaitGroup
unit = (total + r.nThreads - 1) / r.nThreads // round-up the per worker unit
)
for i := 0; i < r.nThreads; i++ {
start := i * unit
if start >= total {
break
}
limit := (i + 1) * unit
if i == r.nThreads-1 {
limit = total
}
// Schedule the worker for prefetching, the items on the range [start, limit)
// is exclusively assigned for this worker.
wg.Add(1)
go func(workerID, startW, endW int) {
r.process(startW, endW)
wg.Done()
}(i, start, limit)
}
wg.Wait()
}
func (r *prefetchStateReader) process(start, limit int) {
var total = 0
for _, t := range r.tasks {
tw := t.weight()
if total+tw > start {
s := 0
if start > total {
s = start - total
}
l := tw
if limit < total+tw {
l = limit - total
}
for j := s; j < l; j++ {
select {
case <-r.term:
return
default:
if j == 0 {
r.StateReader.Account(t.addr)
} else {
r.StateReader.Storage(t.addr, t.slots[j-1])
}
}
}
}
total += tw
if total >= limit {
return
}
}
}
// ReaderWithBlockLevelAccessList provides state access that reflects the
// pre-transition state combined with the mutations made by transactions
// prior to TxIndex.
type ReaderWithBlockLevelAccessList struct {
Reader
AccessList bal.AccessListReader
TxIndex int
}
func NewReaderWithBlockLevelAccessList(base Reader, accessList bal.BlockAccessList, txIndex int) *ReaderWithBlockLevelAccessList {
return &ReaderWithBlockLevelAccessList{
Reader: base,
AccessList: bal.NewAccessListReader(accessList),
TxIndex: txIndex,
}
}
// Account implements Reader, returning the account with the specific address.
func (r *ReaderWithBlockLevelAccessList) Account(addr common.Address) (acct *types.StateAccount, err error) {
acct, err = r.Reader.Account(addr)
if err != nil {
return nil, err
}
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut == nil {
return
}
if acct == nil {
acct = types.NewEmptyStateAccount()
} else {
// the account returned by the underlying reader is a reference
// copy it to avoid mutating the reader's instance
acct = acct.Copy()
}
if mut.Balance != nil {
acct.Balance = mut.Balance
}
if mut.Code != nil {
codeHash := crypto.Keccak256Hash(mut.Code)
acct.CodeHash = codeHash[:]
}
if mut.Nonce != nil {
acct.Nonce = *mut.Nonce
}
return
}
// Storage implements Reader, returning the storage slot with the specific
// address and slot key.
func (r *ReaderWithBlockLevelAccessList) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
val := r.AccessList.Storage(addr, slot, r.TxIndex)
if val != nil {
return *val, nil
}
return r.Reader.Storage(addr, slot)
}
// Has implements Reader, returning the flag indicating whether the contract
// code with specified address and hash exists or not.
func (r *ReaderWithBlockLevelAccessList) Has(addr common.Address, codeHash common.Hash) bool {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil {
return crypto.Keccak256Hash(mut.Code) == codeHash
}
return r.Reader.Has(addr, codeHash)
}
// Code implements Reader, returning the contract code with specified address
// and hash.
func (r *ReaderWithBlockLevelAccessList) Code(addr common.Address, codeHash common.Hash) ([]byte, error) {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil && crypto.Keccak256Hash(mut.Code) == codeHash {
// TODO: need to copy here?
return mut.Code, nil
}
return r.Reader.Code(addr, codeHash)
}
// CodeSize implements Reader, returning the contract code size with specified
// address and hash.
func (r *ReaderWithBlockLevelAccessList) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil && crypto.Keccak256Hash(mut.Code) == codeHash {
return len(mut.Code), nil
}
return r.Reader.CodeSize(addr, codeHash)
}
// StateReaderTracker defines the capability to retrieve the access footprint
// recorded during state reading operations.
type StateReaderTracker interface {
GetStateAccessList() bal.StateAccesses
}
func NewReaderWithTracker(r Reader) Reader {
return newReaderTracker(r)
}
type readerTracker struct {
Reader
access bal.StateAccesses
}
func newReaderTracker(reader Reader) *readerTracker {
return &readerTracker{
Reader: reader,
access: make(bal.StateAccesses),
}
}
// Account implements StateReader, tracking the accessed address locally.
func (r *readerTracker) Account(addr common.Address) (*types.StateAccount, error) {
_, exists := r.access[addr]
if !exists {
r.access[addr] = make(bal.StorageAccessList)
}
return r.Reader.Account(addr)
}
// Storage implements StateReader, tracking the accessed slot identifier locally.
func (r *readerTracker) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
list, exists := r.access[addr]
if !exists {
list = make(bal.StorageAccessList)
r.access[addr] = list
}
list[slot] = struct{}{}
return r.Reader.Storage(addr, slot)
}
// GetStateAccessList implements StateReaderTracker, returning the access footprint.
func (r *readerTracker) GetStateAccessList() bal.StateAccesses {
return r.access
}

View file

@ -0,0 +1,201 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"fmt"
"maps"
"math/rand"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/testrand"
)
type countingStateReader struct {
accounts map[common.Address]int
storages map[common.Address]map[common.Hash]int
lock sync.Mutex
}
func newRefStateReader() *countingStateReader {
return &countingStateReader{
accounts: make(map[common.Address]int),
storages: make(map[common.Address]map[common.Hash]int),
}
}
func (r *countingStateReader) validate(total int) error {
var sum int
for addr, n := range r.accounts {
if n != 1 {
return fmt.Errorf("duplicated account access: %x-%d", addr, n)
}
sum += 1
slots, exists := r.storages[addr]
if !exists {
continue
}
for key, n := range slots {
if n != 1 {
return fmt.Errorf("duplicated storage access: %x-%x-%d", addr, key, n)
}
sum += 1
}
}
for addr := range r.storages {
_, exists := r.accounts[addr]
if !exists {
return fmt.Errorf("dangling storage access: %x", addr)
}
}
if sum != total {
return fmt.Errorf("unexpected number of access, want: %d, got: %d", total, sum)
}
return nil
}
func (r *countingStateReader) Account(addr common.Address) (*types.StateAccount, error) {
r.lock.Lock()
defer r.lock.Unlock()
r.accounts[addr] += 1
return nil, nil
}
func (r *countingStateReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
r.lock.Lock()
defer r.lock.Unlock()
slots, exists := r.storages[addr]
if !exists {
slots = make(map[common.Hash]int)
r.storages[addr] = slots
}
slots[slot] += 1
return common.Hash{}, nil
}
func makeFetchTasks(n int) ([]*fetchTask, int) {
var (
total int
tasks []*fetchTask
)
for i := 0; i < n; i++ {
var slots []common.Hash
if rand.Intn(3) != 0 {
for j := 0; j < rand.Intn(100); j++ {
slots = append(slots, testrand.Hash())
}
}
tasks = append(tasks, &fetchTask{
addr: testrand.Address(),
slots: slots,
})
total += len(slots) + 1
}
return tasks, total
}
func TestPrefetchReader(t *testing.T) {
type suite struct {
tasks []*fetchTask
threads int
total int
}
var suites []suite
for i := 0; i < 100; i++ {
tasks, total := makeFetchTasks(100)
suites = append(suites, suite{
tasks: tasks,
threads: rand.Intn(30) + 1,
total: total,
})
}
// num(tasks) < num(threads)
tasks, total := makeFetchTasks(1)
suites = append(suites, suite{
tasks: tasks,
threads: 100,
total: total,
})
for _, s := range suites {
r := newRefStateReader()
pr := newPrefetchStateReaderInternal(r, s.tasks, s.threads)
pr.Wait()
if err := r.validate(s.total); err != nil {
t.Fatal(err)
}
}
}
func makeFakeSlots(n int) map[common.Hash]struct{} {
slots := make(map[common.Hash]struct{})
for i := 0; i < n; i++ {
slots[testrand.Hash()] = struct{}{}
}
return slots
}
type noopStateReader struct{}
func (r *noopStateReader) Account(addr common.Address) (*types.StateAccount, error) { return nil, nil }
func (r *noopStateReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
return common.Hash{}, nil
}
type noopCodeReader struct{}
func (r *noopCodeReader) Has(addr common.Address, codeHash common.Hash) bool { return false }
func (r *noopCodeReader) Code(addr common.Address, codeHash common.Hash) ([]byte, error) {
return nil, nil
}
func (r *noopCodeReader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
return 0, nil
}
func TestReaderWithTracker(t *testing.T) {
var r Reader = newReaderTracker(newReader(&noopCodeReader{}, &noopStateReader{}))
accesses := map[common.Address]map[common.Hash]struct{}{
testrand.Address(): makeFakeSlots(10),
testrand.Address(): makeFakeSlots(0),
}
for addr, slots := range accesses {
r.Account(addr)
for slot := range slots {
r.Storage(addr, slot)
}
}
got := r.(StateReaderTracker).GetStateAccessList()
if len(got) != len(accesses) {
t.Fatalf("Unexpected access list, want: %d, got: %d", len(accesses), len(got))
}
for addr, slots := range got {
entry, ok := accesses[addr]
if !ok {
t.Fatal("Unexpected access list")
}
if !maps.Equal(slots, entry) {
t.Fatal("Unexpected slots")
}
}
}

View file

@ -0,0 +1,82 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
// ContractCodeReaderStats aggregates statistics for the contract code reader.
type ContractCodeReaderStats struct {
CacheHit int64 // Number of cache hits
CacheMiss int64 // Number of cache misses
CacheHitBytes int64 // Total bytes served from cache
CacheMissBytes int64 // Total bytes read on cache misses
}
// HitRate returns the cache hit rate in percentage.
func (s ContractCodeReaderStats) HitRate() float64 {
total := s.CacheHit + s.CacheMiss
if total == 0 {
return 0
}
return float64(s.CacheHit) / float64(total) * 100
}
// ContractCodeReaderStater wraps the method to retrieve the statistics of
// contract code reader.
type ContractCodeReaderStater interface {
GetCodeStats() ContractCodeReaderStats
}
// StateReaderStats aggregates statistics for the state reader.
type StateReaderStats struct {
AccountCacheHit int64 // Number of account cache hits
AccountCacheMiss int64 // Number of account cache misses
StorageCacheHit int64 // Number of storage cache hits
StorageCacheMiss int64 // Number of storage cache misses
}
// AccountCacheHitRate returns the cache hit rate of account requests in percentage.
func (s StateReaderStats) AccountCacheHitRate() float64 {
total := s.AccountCacheHit + s.AccountCacheMiss
if total == 0 {
return 0
}
return float64(s.AccountCacheHit) / float64(total) * 100
}
// StorageCacheHitRate returns the cache hit rate of storage requests in percentage.
func (s StateReaderStats) StorageCacheHitRate() float64 {
total := s.StorageCacheHit + s.StorageCacheMiss
if total == 0 {
return 0
}
return float64(s.StorageCacheHit) / float64(total) * 100
}
// StateReaderStater wraps the method to retrieve the statistics of state reader.
type StateReaderStater interface {
GetStateStats() StateReaderStats
}
// ReaderStats wraps the statistics of reader.
type ReaderStats struct {
CodeStats ContractCodeReaderStats
StateStats StateReaderStats
}
// ReaderStater defines the capability to retrieve aggregated statistics.
type ReaderStater interface {
GetStats() ReaderStats
}

View file

@ -19,6 +19,7 @@ package state
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"maps"
"slices"
"time"
@ -54,6 +55,9 @@ type stateObject struct {
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
data types.StateAccount // Account data with all mutations applied in the scope of block
txPreBalance *uint256.Int // the account balance after the last call to finalise
txPreNonce uint64 // the account nonce after the last call to finalise
// Write caches.
trie Trie // storage trie, which becomes non-nil on first access
code []byte // contract bytecode, which gets set when code is loaded
@ -76,6 +80,9 @@ type stateObject struct {
// Cache flags.
dirtyCode bool // true if the code was updated
nonFinalizedCode bool // true if the code has been changed in the current transaction
txPrestateCode []byte // set to the value of the code at the beginning of the transaction if it changed in the current transaction
// Flag whether the account was marked as self-destructed. The self-destructed
// account is still accessible in the scope of same transaction.
selfDestructed bool
@ -85,6 +92,8 @@ type stateObject struct {
// the contract is just created within the current transaction, or when the
// object was previously existent and is being deployed as a contract within
// the current transaction.
//
// the flag is set upon beginning of contract initcode execution, not when the code is actually deployed to the address.
newContract bool
}
@ -104,6 +113,8 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s
address: address,
origin: origin,
data: *acct,
txPreBalance: acct.Balance.Clone(),
txPreNonce: acct.Nonce,
originStorage: make(Storage),
dirtyStorage: make(Storage),
pendingStorage: make(Storage),
@ -185,6 +196,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
if value, pending := s.pendingStorage[key]; pending {
return value
}
if value, cached := s.originStorage[key]; cached {
return value
}
@ -240,6 +252,7 @@ func (s *stateObject) SetState(key, value common.Hash) common.Hash {
if prev == value {
return prev
}
// New value is different, update and journal the change
s.db.journal.storageChange(s.address, key, prev, origin)
s.setState(key, value, origin)
@ -259,22 +272,64 @@ func (s *stateObject) setState(key common.Hash, value common.Hash, origin common
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
func (s *stateObject) finalise() {
func (s *stateObject) finalise() (mut *bal.AccountMutations) {
mut = &bal.AccountMutations{}
if s.Balance().Cmp(s.txPreBalance) != 0 {
mut.Balance = s.Balance()
}
if s.Nonce() != s.txPreNonce {
mut.Nonce = new(uint64)
*mut.Nonce = s.Nonce()
}
// include account code changes: created contracts and 7702 delegation authority code changes
if s.nonFinalizedCode {
if s.code == nil {
// code cleared (7702). code must be non-nil in the post to signal that it's part of the diff vs being unchanged.
mut.Code = []byte{}
} else {
mut.Code = s.code
}
}
mut.StorageWrites = make(map[common.Hash]common.Hash)
slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage))
for key, value := range s.dirtyStorage {
if origin, exist := s.uncommittedStorage[key]; exist && origin == value {
// non-parallel-execution:
// The slot is reverted to its original value, delete the entry
// to avoid thrashing the data structures.
//
// parallel-exec-with-BAL:
// each statedb instance only executes a single transaction so the previous value
// of the slot won't be in uncommittedStorage
txPrestateVal := s.GetCommittedState(key)
if txPrestateVal != value {
mut.StorageWrites[key] = value
}
delete(s.uncommittedStorage, key)
} else if exist {
// non-parallel-execution:
// The slot is modified to another value and the slot has been
// tracked for commit, do nothing here.
// tracked for commit in uncommittedStorage.
//
// parallel-exec-with-BAL:
// each statedb instance only executes a single transaction so the previous value
// of the slot won't be in uncommittedStorage
mut.StorageWrites[key] = value
} else {
// The slot is different from its original value and hasn't been
// tracked for commit yet.
s.uncommittedStorage[key] = s.GetCommittedState(key)
// Whether executing parallel with BAL or not, the value of the slot before the execution
// of the current transaction is in originStorage
origin := s.GetCommittedState(key)
if value != origin {
mut.StorageWrites[key] = value
}
s.uncommittedStorage[key] = origin
slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure
}
// Aggregate the dirty storage slots into the pending area. It might
// be possible that the value of tracked slot here is same with the
// one in originStorage (e.g. the slot was modified in tx_a and then
@ -283,6 +338,7 @@ func (s *stateObject) finalise() {
// byzantium fork) and entry is necessary to modify the value back.
s.pendingStorage[key] = value
}
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
if err := s.db.prefetcher.prefetch(s.addrHash(), s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
@ -295,6 +351,18 @@ func (s *stateObject) finalise() {
// of the newly-created object as it's no longer eligible for self-destruct
// by EIP-6780. For non-newly-created objects, it's a no-op.
s.newContract = false
s.nonFinalizedCode = false
s.txPrestateCode = nil
// TODO: I had a bug here where i would set both of these to the value of s.data.* and there were no test failures. need to figure out why.
s.txPreBalance = s.Balance().Clone()
s.txPreNonce = s.Nonce()
if mut.Nonce == nil && mut.Code == nil && mut.Balance == nil && len(mut.StorageWrites) == 0 {
return nil
}
return mut
}
// updateTrie is responsible for persisting cached storage changes into the
@ -314,6 +382,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
return s.trie, nil
}
}
// Retrieve a pretecher populated trie, or fall back to the database. This will
// block until all prefetch tasks are done, which are needed for witnesses even
// for unmodified state objects.
@ -345,8 +414,10 @@ func (s *stateObject) updateTrie() (Trie, error) {
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage))
updateKeys [][]byte
updateValues [][]byte
)
for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes
@ -360,10 +431,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue
}
if (value != common.Hash{}) {
if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil {
s.db.setError(err)
return nil, err
}
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(value[:]))
s.db.StorageUpdated.Add(1)
} else {
deletions = append(deletions, key)
@ -371,6 +440,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading
used = append(used, key) // Copy needed for closure
}
if len(updateKeys) > 0 {
if err := tr.UpdateStorageBatch(common.Address{}, updateKeys, updateValues); err != nil {
s.db.setError(err)
return nil, err
}
}
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@ -522,6 +597,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
dirtyCode: s.dirtyCode,
selfDestructed: s.selfDestructed,
newContract: s.newContract,
txPreBalance: s.txPreBalance.Clone(),
txPreNonce: s.txPreNonce,
}
switch s.trie.(type) {
@ -604,13 +681,25 @@ func (s *stateObject) SetCode(codeHash common.Hash, code []byte) (prev []byte) {
prev = slices.Clone(s.code)
s.db.journal.setCode(s.address, prev)
s.setCode(codeHash, code)
if s.txPrestateCode == nil {
if prev == nil {
prev = []byte{}
}
s.txPrestateCode = prev
}
if !bytes.Equal(code, s.txPrestateCode) {
s.dirtyCode = true
s.nonFinalizedCode = true
} else {
s.nonFinalizedCode = false
}
return prev
}
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
s.dirtyCode = true
}
func (s *stateObject) SetNonce(nonce uint64) {

View file

@ -20,6 +20,8 @@ package state
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"iter"
"maps"
"slices"
"sort"
@ -65,6 +67,14 @@ func (m *mutation) isDelete() bool {
return m.typ == deletion
}
type BlockStateTransition interface {
CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error)
Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error)
IntermediateRoot(deleteEmpty bool) common.Hash
Error() error
Preimages() map[common.Hash][]byte
}
// StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
@ -118,6 +128,13 @@ type StateDB struct {
// The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
// block access list modifications will be recorded with this index.
// 0 - state access before transaction execution
// 1 -> len(block txs) - state access of each transaction
// len(block txs) + 1 - state access after transaction execution.
balIndex int
logs map[common.Hash][]*types.Log
logSize uint
@ -200,6 +217,13 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
return sdb, nil
}
// WithReader returns a copy of the statedb instance with the specified reader.
func (s *StateDB) WithReader(reader Reader) *StateDB {
cpy := s.Copy()
cpy.reader = reader
return cpy
}
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
@ -315,6 +339,11 @@ func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil
}
func (s *StateDB) ExistBeforeCurTx(addr common.Address) bool {
obj := s.getStateObject(addr)
return obj != nil && !obj.newContract
}
// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (s *StateDB) Empty(addr common.Address) bool {
@ -570,6 +599,25 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
func (s *StateDB) updateStateObjects(objs []*stateObject) {
var addrs []common.Address
var accts []*types.StateAccount
for _, obj := range objs {
addrs = append(addrs, obj.Address())
accts = append(accts, &obj.data)
}
if err := s.trie.UpdateAccountBatch(addrs, accts, nil); err != nil {
s.setError(fmt.Errorf("updateStateObjects error: %v", err))
}
for _, obj := range objs {
if obj.dirtyCode {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
}
// deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(addr common.Address) {
@ -589,6 +637,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil
}
s.AccountLoaded++
start := time.Now()
@ -625,6 +674,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
if obj == nil {
obj = s.createObject(addr)
}
return obj
}
@ -683,6 +733,7 @@ func (s *StateDB) Copy() *StateDB {
refund: s.refund,
thash: s.thash,
txIndex: s.txIndex,
balIndex: s.txIndex,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: maps.Clone(s.preimages),
@ -770,8 +821,10 @@ func (s *StateDB) GetRemovedAccountsWithBalance() (list []RemovedAccountWithBala
// Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
func (s *StateDB) Finalise(deleteEmptyObjects bool) (mutations bal.StateMutations) {
addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties))
mutations = make(bal.StateMutations)
for addr := range s.journal.dirties {
obj, exist := s.stateObjects[addr]
if !exist {
@ -792,8 +845,19 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
s.stateObjectsDestruct[obj.address] = obj
}
// a pre-existing account can only be removed from the state under the following circumstance:
// it had a balance and was the target of a create2 which selfdestructed in the initcode
if !obj.txPreBalance.IsZero() {
mutations[addr] = bal.AccountMutations{
Balance: uint256.NewInt(0),
}
}
} else {
obj.finalise()
mut := obj.finalise()
if mut != nil {
mutations[addr] = *mut
}
s.markUpdate(addr)
}
// At this point, also ship the address off to the precacher. The precacher
@ -808,6 +872,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
return mutations
}
// IntermediateRoot computes the current root hash of the state trie.
@ -855,12 +920,18 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// later time.
workers.SetLimit(1)
}
var updatedAddrs []common.Address
for addr, op := range s.mutations {
if op.applied || op.isDelete() {
continue
}
obj := s.stateObjects[addr] // closure for the task runner below
updatedAddrs = append(updatedAddrs, addr)
}
for _, addr := range updatedAddrs {
workers.Go(func() error {
obj := s.stateObjects[addr] // closure for the task runner below
if s.db.TrieDB().IsVerkle() {
obj.updateTrie()
} else {
@ -955,6 +1026,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
var (
usedAddrs []common.Address
deletedAddrs []common.Address
updatedObjs []*stateObject
)
for addr, op := range s.mutations {
if op.applied {
@ -966,7 +1038,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
deletedAddrs = append(deletedAddrs, addr)
} else {
obj := s.stateObjects[addr]
s.updateStateObject(obj)
updatedObjs = append(updatedObjs, obj)
s.AccountUpdated += 1
// Count code writes post-Finalise so reverted CREATEs are excluded.
@ -977,6 +1049,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
}
if len(updatedObjs) > 0 {
s.updateStateObjects(updatedObjs)
}
for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1
@ -988,9 +1063,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
// Track the amount of time wasted on hashing the account trie
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
hash := s.trie.Hash()
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
witness := s.trie.Witness()
@ -999,6 +1072,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witnessStats.Add(witness, common.Hash{})
}
}
return hash
}
@ -1008,6 +1082,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
s.balIndex = ti + 1
}
// SetAccessListIndex sets the current index that state mutations will
// be reported as in the BAL. It is only relevant if this StateDB instance
// is being used in the BAL construction path.
func (s *StateDB) SetAccessListIndex(idx int) {
s.balIndex = idx
}
func (s *StateDB) clearJournalAndRefund() {
@ -1019,8 +1101,8 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
func fastDeleteStorage(originalRoot common.Hash, snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(originalRoot, addrHash, common.Hash{})
if err != nil {
return nil, nil, nil, err
}
@ -1059,8 +1141,8 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie)
func slowDeleteStorage(db Database, trie Trie, originalRoot common.Hash, addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := db.OpenStorageTrie(originalRoot, addr, root, trie)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
@ -1095,7 +1177,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
// The function will make an attempt to utilize an efficient strategy if the
// associated state snapshot is reachable; otherwise, it will resort to a less
// efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
func deleteStorage(db Database, trie Trie, addr common.Address, addrHash common.Hash, root, originalRoot common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
var (
err error
nodes *trienode.NodeSet // the set for trie node mutations (value is nil)
@ -1105,12 +1187,12 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
snaps := s.db.Snapshot()
snaps := db.Snapshot()
if snaps != nil {
storages, storageOrigins, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
storages, storageOrigins, nodes, err = fastDeleteStorage(originalRoot, snaps, addrHash, root)
}
if snaps == nil || err != nil {
storages, storageOrigins, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
storages, storageOrigins, nodes, err = slowDeleteStorage(db, trie, originalRoot, addr, addrHash, root)
}
if err != nil {
return nil, nil, nil, err
@ -1136,39 +1218,38 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
func handleDestruction(db Database, trie Trie, noStorageWiping bool, destructions iter.Seq[common.Address], prestates map[common.Address]*types.StateAccount) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var (
nodes []*trienode.NodeSet
deletes = make(map[common.Hash]*accountDelete)
)
for addr, prevObj := range s.stateObjectsDestruct {
prev := prevObj.origin
for addr := range destructions {
prestate := prestates[addr]
// The account was non-existent, and it's marked as destructed in the scope
// of block. It can be either case (a) or (b) and will be interpreted as
// null->null state transition.
// - for (a), skip it without doing anything
// - for (b), the resurrected account with nil as original will be handled afterwards
if prev == nil {
if prestate == nil {
continue
}
// The account was existent, it can be either case (c) or (d).
addrHash := crypto.Keccak256Hash(addr.Bytes())
op := &accountDelete{
address: addr,
origin: types.SlimAccountRLP(*prev),
origin: types.SlimAccountRLP(*prestate),
}
deletes[addrHash] = op
// Short circuit if the origin storage was empty.
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
if prestate.Root == types.EmptyRootHash || db.TrieDB().IsVerkle() {
continue
}
if noStorageWiping {
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
}
// Remove storage slots belonging to the account.
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
storages, storagesOrigin, set, err := deleteStorage(db, trie, addr, addrHash, prestate.Root, prestate.Root)
if err != nil {
return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err)
}
@ -1193,6 +1274,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
// Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects)
@ -1242,7 +1324,12 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
// the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly.
deletes, delNodes, err := s.handleDestruction(noStorageWiping)
var stateAccountsDestruct, destructAccountsOrigins = make(map[common.Address]*types.StateAccount), make(map[common.Address]*types.StateAccount)
for addr, obj := range s.stateObjectsDestruct {
stateAccountsDestruct[addr] = &obj.data
destructAccountsOrigins[addr] = obj.origin
}
deletes, delNodes, err := handleDestruction(s.db, s.trie, noStorageWiping, maps.Keys(stateAccountsDestruct), destructAccountsOrigins)
if err != nil {
return nil, err
}
@ -1343,6 +1430,44 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil
}
func flushStateUpdate(d Database, block uint64, update *stateUpdate) (snapshotCommits, trieDBCommits time.Duration, err error) {
if db := d.TrieDB().Disk(); db != nil && len(update.codes) > 0 {
batch := db.NewBatch()
for _, code := range update.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
}
if err := batch.Write(); err != nil {
return 0, 0, err
}
}
if !update.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := d.Snapshot(); snap != nil && snap.Snapshot(update.originRoot) != nil {
start := time.Now()
if err := snap.Update(update.root, update.originRoot, update.accounts, update.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", update.originRoot, "to", update.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := snap.Cap(update.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", update.root, "layers", TriesInMemory, "err", err)
}
snapshotCommits += time.Since(start)
}
// If trie database is enabled, commit the state update as a new layer
if db := d.TrieDB(); db != nil {
start := time.Now()
if err := db.Update(update.root, update.originRoot, block, update.nodes, update.stateSet()); err != nil {
return 0, 0, err
}
trieDBCommits += time.Since(start)
}
}
return snapshotCommits, trieDBCommits, nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, deriveCodeFields bool) (*stateUpdate, error) {
@ -1350,46 +1475,18 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
if err != nil {
return nil, err
}
// TODO: move this check inside flushStateUpdate?
if deriveCodeFields {
if err := ret.deriveCodeFields(s.reader); err != nil {
return nil, err
}
}
// Commit dirty contract code if any exists
if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 {
batch := db.NewBatch()
for _, code := range ret.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
}
if err := batch.Write(); err != nil {
return nil, err
}
}
if !ret.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil {
start := time.Now()
if err := snap.Update(ret.root, ret.originRoot, ret.accounts, ret.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := snap.Cap(ret.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", TriesInMemory, "err", err)
}
s.SnapshotCommits += time.Since(start)
}
// If trie database is enabled, commit the state update as a new layer
if db := s.db.TrieDB(); db != nil {
start := time.Now()
if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, ret.stateSet()); err != nil {
return nil, err
}
s.TrieDBCommits += time.Since(start)
}
snapshotCommits, trieDBCommits, err := flushStateUpdate(s.db, block, ret)
if err != nil {
return nil, err
}
s.SnapshotCommits = snapshotCommits
s.TrieDBCommits = trieDBCommits
s.reader, _ = s.db.Reader(s.originalRoot)
return ret, err
}

View file

@ -18,6 +18,7 @@ package state
import (
"bytes"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"sort"
@ -59,22 +60,37 @@ func (s *hookedStateDB) IsNewContract(addr common.Address) bool {
}
func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetBalance(addr)
}
func (s *hookedStateDB) GetNonce(addr common.Address) uint64 {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetNonce(addr)
}
func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeHash(addr)
}
func (s *hookedStateDB) GetCode(addr common.Address) []byte {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCode(addr)
}
func (s *hookedStateDB) GetCodeSize(addr common.Address) int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeSize(addr)
}
@ -91,14 +107,23 @@ func (s *hookedStateDB) GetRefund() uint64 {
}
func (s *hookedStateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetStateAndCommittedState(addr, hash)
}
func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetState(addr, hash)
}
func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetStorageRoot(addr)
}
@ -111,14 +136,23 @@ func (s *hookedStateDB) SetTransientState(addr common.Address, key, value common
}
func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.HasSelfDestructed(addr)
}
func (s *hookedStateDB) Exist(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Exist(addr)
}
func (s *hookedStateDB) Empty(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Empty(addr)
}
@ -221,6 +255,10 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) {
s.inner.SelfDestruct(address)
}
func (s *hookedStateDB) ExistBeforeCurTx(addr common.Address) bool {
return s.inner.ExistBeforeCurTx(addr)
}
func (s *hookedStateDB) AddLog(log *types.Log) {
// The inner will modify the log (add fields), so invoke that first
s.inner.AddLog(log)
@ -229,11 +267,10 @@ func (s *hookedStateDB) AddLog(log *types.Log) {
}
}
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) bal.StateMutations {
if s.hooks.OnBalanceChange == nil && s.hooks.OnNonceChangeV2 == nil && s.hooks.OnNonceChange == nil && s.hooks.OnCodeChangeV2 == nil && s.hooks.OnCodeChange == nil {
// Short circuit if no relevant hooks are set.
s.inner.Finalise(deleteEmptyObjects)
return
return s.inner.Finalise(deleteEmptyObjects)
}
// Collect all self-destructed addresses first, then sort them to ensure
@ -272,16 +309,23 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
// If an initcode invokes selfdestruct, do not emit a code change.
prevCodeHash := s.inner.GetCodeHash(addr)
if prevCodeHash == types.EmptyCodeHash {
continue
if prevCodeHash != types.EmptyCodeHash {
// Otherwise, trace the change.
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil)
}
}
// Otherwise, trace the change.
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil)
if s.hooks.OnSelfDestructChange != nil {
s.hooks.OnSelfDestructChange(addr)
}
}
s.inner.Finalise(deleteEmptyObjects)
return s.inner.Finalise(deleteEmptyObjects)
}
func (s *hookedStateDB) TxIndex() int {
return s.inner.TxIndex()
}

View file

@ -19,6 +19,7 @@ package core
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big"
"sort"
@ -62,13 +63,15 @@ func (p *StateProcessor) chainConfig() *params.ChainConfig {
// transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(ctx context.Context, block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error) {
var (
config = p.chainConfig()
receipts types.Receipts
header = block.Header()
blockHash = block.Hash()
blockNumber = block.Number()
allLogs []*types.Log
gp = NewGasPool(block.GasLimit())
config = p.chainConfig()
receipts types.Receipts
header = block.Header()
blockHash = block.Hash()
blockNumber = block.Number()
allLogs []*types.Log
gp = NewGasPool(block.GasLimit())
computedAccessList = make(bal.ConstructionBlockAccessList)
isAmsterdam = p.chainConfig().IsAmsterdam(block.Number(), block.Time())
)
var tracingStateDB = vm.StateDB(statedb)
if hooks := cfg.Tracer; hooks != nil {
@ -89,10 +92,16 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
evm := vm.NewEVM(context, tracingStateDB, config, cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm)
mutations := ProcessBeaconBlockRoot(*beaconRoot, evm)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, 0)
}
}
if config.IsPrague(block.Number(), block.Time()) || config.IsVerkle(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), evm)
mutations := ProcessParentBlockHash(block.ParentHash(), evm)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, 0)
}
}
// Iterate over and process the individual transactions
@ -106,8 +115,11 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
telemetry.StringAttribute("tx.hash", tx.Hash().Hex()),
telemetry.Int64Attribute("tx.index", int64(i)),
)
receipt, err := ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, context.Time, tx, evm)
var (
receipt *types.Receipt
mutations bal.StateMutations
)
mutations, receipt, err = ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, context.Time, tx, evm)
if err != nil {
return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
@ -115,52 +127,66 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
allLogs = append(allLogs, receipt.Logs...)
spanEnd(&err)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, uint16(i)+1)
}
}
requests, err := postExecution(ctx, config, block, allLogs, evm)
postMut, requests, err := postExecution(ctx, config, block, allLogs, evm)
if err != nil {
return nil, err
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
eip4985WithdrawalMuts := p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
postMut.Merge(eip4985WithdrawalMuts)
if isAmsterdam {
computedAccessList.AccumulateMutations(postMut, uint16(len(block.Transactions()))+1)
accesses := statedb.Reader().(state.StateReaderTracker).GetStateAccessList()
computedAccessList.AccumulateReads(accesses)
}
return &ProcessResult{
Receipts: receipts,
Requests: requests,
Logs: allLogs,
GasUsed: gp.Used(),
Receipts: receipts,
Requests: requests,
Logs: allLogs,
GasUsed: gp.Used(),
AccessList: computedAccessList,
}, nil
}
// postExecution processes the post-execution system calls if Prague is enabled.
func postExecution(ctx context.Context, config *params.ChainConfig, block *types.Block, allLogs []*types.Log, evm *vm.EVM) (requests [][]byte, err error) {
func postExecution(ctx context.Context, config *params.ChainConfig, block *types.Block, allLogs []*types.Log, evm *vm.EVM) (mut bal.StateMutations, requests [][]byte, err error) {
_, _, spanEnd := telemetry.StartSpan(ctx, "core.postExecution")
defer spanEnd(&err)
mut = make(bal.StateMutations)
// Read requests if Prague is enabled.
if config.IsPrague(block.Number(), block.Time()) {
requests = [][]byte{}
// EIP-6110
if err := ParseDepositLogs(&requests, allLogs, config); err != nil {
return requests, fmt.Errorf("failed to parse deposit logs: %w", err)
return mut, requests, fmt.Errorf("failed to parse deposit logs: %w", err)
}
// EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil {
return requests, fmt.Errorf("failed to process withdrawal queue: %w", err)
if mut, err = ProcessWithdrawalQueue(&requests, evm); err != nil {
return mut, requests, fmt.Errorf("failed to process withdrawal queue: %w", err)
}
// EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil {
return requests, fmt.Errorf("failed to process consolidation queue: %w", err)
consolidationMut, err := ProcessConsolidationQueue(&requests, evm)
if err != nil {
return mut, requests, fmt.Errorf("failed to process consolidation queue: %w", err)
}
mut.Merge(consolidationMut)
}
return requests, nil
return mut, requests, nil
}
// ApplyTransactionWithEVM attempts to apply a transaction to the given state database
// and uses the input parameters for its environment similar to ApplyTransaction. However,
// this method takes an already created EVM instance as input.
func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, blockTime uint64, tx *types.Transaction, evm *vm.EVM) (receipt *types.Receipt, err error) {
func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, blockTime uint64, tx *types.Transaction, evm *vm.EVM) (mutations bal.StateMutations, receipt *types.Receipt, err error) {
if hooks := evm.Config.Tracer; hooks != nil {
if hooks.OnTxStart != nil {
hooks.OnTxStart(evm.GetVMContext(), tx, msg.From)
@ -172,7 +198,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
// Apply the transaction to the current state (included in the env).
result, err := ApplyMessage(evm, msg, gp)
if err != nil {
return nil, err
return nil, nil, err
}
if evm.ChainConfig().IsAmsterdam(blockNumber, blockTime) {
// Emit burn logs where accounts with non-empty balances have been deleted
@ -189,7 +215,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
// Update the state with pending changes.
var root []byte
if evm.ChainConfig().IsByzantium(blockNumber) {
evm.StateDB.Finalise(true)
mutations = evm.StateDB.Finalise(true)
} else {
root = statedb.IntermediateRoot(evm.ChainConfig().IsEIP158(blockNumber)).Bytes()
}
@ -198,7 +224,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
if statedb.Database().TrieDB().IsVerkle() {
statedb.AccessEvents().Merge(evm.AccessEvents)
}
return MakeReceipt(evm, result, statedb, blockNumber, blockHash, blockTime, tx, gp.CumulativeUsed(), root), nil
return mutations, MakeReceipt(evm, result, statedb, blockNumber, blockHash, blockTime, tx, gp.CumulativeUsed(), root), nil
}
// MakeReceipt generates the receipt object for a transaction given its execution result.
@ -243,10 +269,10 @@ func MakeReceipt(evm *vm.EVM, result *ExecutionResult, statedb *state.StateDB, b
// and uses the input parameters for its environment. It returns the receipt
// for the transaction and an error if the transaction failed,
// indicating the block was invalid.
func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction) (*types.Receipt, error) {
func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction) (bal.StateMutations, *types.Receipt, error) {
msg, err := TransactionToMessage(tx, types.MakeSigner(evm.ChainConfig(), header.Number, header.Time), header.BaseFee)
if err != nil {
return nil, err
return nil, nil, err
}
// Create a new context to be used in the EVM environment
return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, evm)
@ -254,7 +280,7 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) {
func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) bal.StateMutations {
if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil {
@ -273,12 +299,12 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) {
evm.SetTxContext(NewEVMTxContext(msg))
evm.StateDB.AddAddressToAccessList(params.BeaconRootsAddress)
_, _, _ = evm.Call(msg.From, *msg.To, msg.Data, 30_000_000, common.U2560)
evm.StateDB.Finalise(true)
return evm.StateDB.Finalise(true)
}
// ProcessParentBlockHash stores the parent block hash in the history storage contract
// as per EIP-2935/7709.
func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) bal.StateMutations {
if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil {
@ -303,22 +329,23 @@ func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
if evm.StateDB.AccessEvents() != nil {
evm.StateDB.AccessEvents().Merge(evm.AccessEvents)
}
evm.StateDB.Finalise(true)
return evm.StateDB.Finalise(true)
}
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
// It returns the opaque request data returned by the contract.
func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) error {
func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) (bal.StateMutations, error) {
return processRequestsSystemCall(requests, evm, 0x01, params.WithdrawalQueueAddress)
}
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
// It returns the opaque request data returned by the contract.
func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) error {
func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) (bal.StateMutations, error) {
return processRequestsSystemCall(requests, evm, 0x02, params.ConsolidationQueueAddress)
}
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) error {
// TODO: does the requests contract produce mutations? I think it just parses the logs into requests but idk
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) (bal.StateMutations, error) {
if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil {
@ -336,19 +363,19 @@ func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte
evm.SetTxContext(NewEVMTxContext(msg))
evm.StateDB.AddAddressToAccessList(addr)
ret, _, err := evm.Call(msg.From, *msg.To, msg.Data, 30_000_000, common.U2560)
evm.StateDB.Finalise(true)
mut := evm.StateDB.Finalise(true)
if err != nil {
return fmt.Errorf("system call failed to execute: %v", err)
return nil, fmt.Errorf("system call failed to execute: %v", err)
}
if len(ret) == 0 {
return nil // skip empty output
return mut, nil // skip empty output
}
// Append prefixed requestsData to the requests list.
requestsData := make([]byte, len(ret)+1)
requestsData[0] = requestType
copy(requestsData[1:], ret)
*requests = append(*requests, requestsData)
return nil
return mut, nil
}
var depositTopic = common.HexToHash("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")

View file

@ -19,9 +19,6 @@ package core
import (
"bytes"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
@ -29,6 +26,8 @@ import (
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"math"
"math/big"
)
// ExecutionResult includes all output after executing given evm
@ -574,6 +573,10 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
} else {
fee := new(uint256.Int).SetUint64(st.gasUsed())
fee.Mul(fee, effectiveTipU256)
// always read the coinbase account to include it in the BAL (TODO check this is actually part of the spec)
st.state.GetBalance(st.evm.Context.Coinbase)
st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee)
// add the coinbase to the witness iff the fee is greater than 0
@ -582,7 +585,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
}
}
return &ExecutionResult{
UsedGas: st.gasUsed(),
UsedGas: peakGasUsed,
MaxUsedGas: peakGasUsed,
Err: vmerr,
ReturnData: ret,
@ -633,16 +636,22 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization)
st.state.AddRefund(params.CallNewAccountGas - params.TxAuthTupleGas)
}
prevDelegation, isDelegated := types.ParseDelegation(st.state.GetCode(authority))
// Update nonce and account code.
st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization)
if auth.Address == (common.Address{}) {
// Delegation to zero address means clear.
st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear)
if isDelegated {
st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear)
}
return nil
}
// Otherwise install delegation to auth.Address.
st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization)
// install delegation to auth.Address if the delegation changed
if !isDelegated || auth.Address != prevDelegation {
st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization)
}
return nil
}

View file

@ -67,6 +67,10 @@ func ExecuteStateless(ctx context.Context, config *params.ChainConfig, vmconfig
processor := NewStateProcessor(chain)
validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block
if config.IsAmsterdam(block.Number(), block.Time()) {
db = db.WithReader(state.NewReaderWithTracker(db.Reader()))
}
// Run the stateless blocks processing and self-validate certain fields
res, err := processor.Process(ctx, block, db, vmconfig)
if err != nil {

View file

@ -178,7 +178,6 @@ type (
CloseHook = func()
// BlockStartHook is called before executing `block`.
// `td` is the total difficulty prior to `block`.
BlockStartHook = func(event BlockEvent)
// BlockEndHook is called after executing a block.
@ -192,24 +191,25 @@ type (
// GenesisBlockHook is called when the genesis block is being processed.
GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc)
// OnSystemCallStartHook is called when a system call is about to be executed. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
// OnSystemCallStartHook is called when a system call is about to be executed.
// Today, this hook is invoked when the EIP-4788 system call is about to be
// executed to set the beacon block root.
//
// After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit`
// as well as state hooks between this hook and the `OnSystemCallEndHook`.
// After this hook, the EVM call tracing will happened as usual so you will
// receive a `OnEnter/OnExit` as well as state hooks between this hook and
// the `OnSystemCallEndHook`.
//
// Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks
// will not be invoked.
// Note that system call happens outside normal transaction execution, so
// the `OnTxStart/OnTxEnd` hooks will not be invoked.
OnSystemCallStartHook = func()
// OnSystemCallStartHookV2 is called when a system call is about to be executed. Refer
// to `OnSystemCallStartHook` for more information.
// OnSystemCallStartHookV2 is called when a system call is about to be executed.
// Refer to `OnSystemCallStartHook` for more information.
OnSystemCallStartHookV2 = func(vm *VMContext)
// OnSystemCallEndHook is called when a system call has finished executing. Today,
// this hook is invoked when the EIP-4788 system call is about to be executed to set the
// beacon block root.
// OnSystemCallEndHook is called when a system call has finished executing.
// Today, this hook is invoked when the EIP-4788 system call is about to be
// executed to set the beacon block root.
OnSystemCallEndHook = func()
// StateUpdateHook is called after state is committed for a block.
@ -239,9 +239,17 @@ type (
// StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
SelfDestructHook = func(address common.Address)
// LogHook is called when a log is emitted.
LogHook = func(log *types.Log)
// AccountReadHook is called when the account is accessed.
AccountReadHook = func(addr common.Address)
// StorageReadHook is called when the storage slot is accessed.
StorageReadHook = func(addr common.Address, slot common.Hash)
// BlockHashReadHook is called when EVM reads the blockhash of a block.
BlockHashReadHook = func(blockNumber uint64, hash common.Hash)
)
@ -255,6 +263,7 @@ type Hooks struct {
OnOpcode OpcodeHook
OnFault FaultHook
OnGasChange GasChangeHook
// Chain events
OnBlockchainInit BlockchainInitHook
OnClose CloseHook
@ -266,14 +275,23 @@ type Hooks struct {
OnSystemCallStartV2 OnSystemCallStartHookV2
OnSystemCallEnd OnSystemCallEndHook
OnStateUpdate StateUpdateHook
// State events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnBlockFinalization func() // called after post-tx system contracts and consensus finalization are invoked
// State mutation events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
OnSelfDestructChange SelfDestructHook
// State access events
OnAccountRead AccountReadHook
OnStorageRead StorageReadHook
// Block hash read
OnBlockHashRead BlockHashReadHook
}
@ -290,57 +308,74 @@ const (
// Issuance
// BalanceIncreaseRewardMineUncle is a reward for mining an uncle block.
BalanceIncreaseRewardMineUncle BalanceChangeReason = 1
// BalanceIncreaseRewardMineBlock is a reward for mining a block.
BalanceIncreaseRewardMineBlock BalanceChangeReason = 2
// BalanceIncreaseWithdrawal is ether withdrawn from the beacon chain.
BalanceIncreaseWithdrawal BalanceChangeReason = 3
// BalanceIncreaseGenesisBalance is ether allocated at the genesis block.
BalanceIncreaseGenesisBalance BalanceChangeReason = 4
// Transaction fees
// BalanceIncreaseRewardTransactionFee is the transaction tip increasing block builder's balance.
// BalanceIncreaseRewardTransactionFee is the transaction tip increasing
// block builder's balance.
BalanceIncreaseRewardTransactionFee BalanceChangeReason = 5
// BalanceDecreaseGasBuy is spent to purchase gas for execution a transaction.
// Part of this gas will be burnt as per EIP-1559 rules.
BalanceDecreaseGasBuy BalanceChangeReason = 6
// BalanceIncreaseGasReturn is ether returned for unused gas at the end of execution.
BalanceIncreaseGasReturn BalanceChangeReason = 7
// DAO fork
// BalanceIncreaseDaoContract is ether sent to the DAO refund contract.
BalanceIncreaseDaoContract BalanceChangeReason = 8
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved to the refund contract.
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved
// to the refund contract.
BalanceDecreaseDaoAccount BalanceChangeReason = 9
// BalanceChangeTransfer is ether transferred via a call.
// it is a decrease for the sender and an increase for the recipient.
BalanceChangeTransfer BalanceChangeReason = 10
// BalanceChangeTouchAccount is a transfer of zero value. It is only there to
// touch-create an account.
BalanceChangeTouchAccount BalanceChangeReason = 11
// BalanceIncreaseSelfdestruct is added to the recipient as indicated by a selfdestructing account.
// BalanceIncreaseSelfdestruct is added to the recipient as indicated by a
// selfdestructing account.
BalanceIncreaseSelfdestruct BalanceChangeReason = 12
// BalanceDecreaseSelfdestruct is deducted from a contract due to self-destruct.
BalanceDecreaseSelfdestruct BalanceChangeReason = 13
// BalanceDecreaseSelfdestructBurn is ether that is sent to an already self-destructed
// account within the same tx (captured at end of tx).
// Note it doesn't account for a self-destruct which appoints itself as recipient.
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
// BalanceChangeRevert is emitted when the balance is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// BalanceChangeRevert is emitted when the balance is reverted back to a
// previous value due to call failure.
//
// It is only emitted when the tracer has opted in to use the journaling
// wrapper (WrapWithJournal).
BalanceChangeRevert BalanceChangeReason = 15
)
// GasChangeReason is used to indicate the reason for a gas change, useful
// for tracing and reporting.
//
// There is essentially two types of gas changes, those that can be emitted once per transaction
// and those that can be emitted on a call basis, so possibly multiple times per transaction.
// There is essentially two types of gas changes, those that can be emitted
// once per transaction and those that can be emitted on a call basis, so possibly
// multiple times per transaction.
//
// They can be recognized easily by their name, those that start with `GasChangeTx` are emitted
// once per transaction, while those that start with `GasChangeCall` are emitted on a call basis.
// They can be recognized easily by their name, those that start with `GasChangeTx`
// are emitted once per transaction, while those that start with `GasChangeCall`
// are emitted on a call basis.
type GasChangeReason byte
//go:generate go run golang.org/x/tools/cmd/stringer -type=GasChangeReason -trimprefix=GasChange -output gen_gas_change_reason_stringer.go
@ -348,61 +383,99 @@ type GasChangeReason byte
const (
GasChangeUnspecified GasChangeReason = 0
// GasChangeTxInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only
// one such gas change per transaction.
// GasChangeTxInitialBalance is the initial balance for the call which will
// be equal to the gasLimit of the call. There is only one such gas change
// per transaction.
GasChangeTxInitialBalance GasChangeReason = 1
// GasChangeTxIntrinsicGas is the amount of gas that will be charged for the intrinsic cost of the transaction, there is
// always exactly one of those per transaction.
// GasChangeTxIntrinsicGas is the amount of gas that will be charged for the
// intrinsic cost of the transaction, there is always exactly one of those
// per transaction.
GasChangeTxIntrinsicGas GasChangeReason = 2
// GasChangeTxRefunds is the sum of all refunds which happened during the tx execution (e.g. storage slot being cleared)
// this generates an increase in gas. There is at most one of such gas change per transaction.
// GasChangeTxRefunds is the sum of all refunds which happened during the tx
// execution (e.g. storage slot being cleared). this generates an increase in
// gas. There is at most one of such gas change per transaction.
GasChangeTxRefunds GasChangeReason = 3
// GasChangeTxLeftOverReturned is the amount of gas left over at the end of transaction's execution that will be returned
// to the account. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas
// left at the end of execution, no such even will be emitted. The returned gas's value in Wei is returned to caller.
// There is at most one of such gas change per transaction.
// GasChangeTxLeftOverReturned is the amount of gas left over at the end of
// transaction's execution that will be returned to the chain. This change
// will always be a negative change as we "drain" left over gas towards 0.
// If there was no gas left at the end of execution, no such even will be
// emitted. The returned gas's value in Wei is returned to caller. There is
// at most one of such gas change per transaction.
GasChangeTxLeftOverReturned GasChangeReason = 4
// GasChangeCallInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only
// one such gas change per call.
// GasChangeCallInitialBalance is the initial balance for the call which
// will be equal to the gasLimit of the call. There is only one such gas
// change per call.
GasChangeCallInitialBalance GasChangeReason = 5
// GasChangeCallLeftOverReturned is the amount of gas left over that will be returned to the caller, this change will always
// be a negative change as we "drain" left over gas towards 0. If there was no gas left at the end of execution, no such even
// will be emitted.
// GasChangeCallLeftOverReturned is the amount of gas left over that will
// be returned to the caller, this change will always be a negative change
// as we "drain" left over gas towards 0. If there was no gas left at the
// end of execution, no such even will be emitted.
GasChangeCallLeftOverReturned GasChangeReason = 6
// GasChangeCallLeftOverRefunded is the amount of gas that will be refunded to the call after the child call execution it
// executed completed. This value is always positive as we are giving gas back to the you, the left over gas of the child.
// If there was no gas left to be refunded, no such even will be emitted.
// GasChangeCallLeftOverRefunded is the amount of gas that will be refunded
// to the call after the child call execution it executed completed. This
// value is always positive as we are giving gas back to the you, the left over
// gas of the child. If there was no gas left to be refunded, no such event
// will be emitted.
GasChangeCallLeftOverRefunded GasChangeReason = 7
// GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE.
// GasChangeCallContractCreation is the amount of gas that will be burned
// for a CREATE.
GasChangeCallContractCreation GasChangeReason = 8
// GasChangeCallContractCreation2 is the amount of gas that will be burned for a CREATE2.
// GasChangeCallContractCreation2 is the amount of gas that will be burned
// for a CREATE2.
GasChangeCallContractCreation2 GasChangeReason = 9
// GasChangeCallCodeStorage is the amount of gas that will be charged for code storage.
// GasChangeCallCodeStorage is the amount of gas that will be charged for
// code storage.
GasChangeCallCodeStorage GasChangeReason = 10
// GasChangeCallOpCode is the amount of gas that will be charged for an opcode executed by the EVM, exact opcode that was
// performed can be check by `OnOpcode` handling.
// GasChangeCallOpCode is the amount of gas that will be charged for an opcode
// executed by the EVM, exact opcode that was performed can be check by
// `OnOpcode` handling.
GasChangeCallOpCode GasChangeReason = 11
// GasChangeCallPrecompiledContract is the amount of gas that will be charged for a precompiled contract execution.
// GasChangeCallPrecompiledContract is the amount of gas that will be charged
// for a precompiled contract execution.
GasChangeCallPrecompiledContract GasChangeReason = 12
// GasChangeCallStorageColdAccess is the amount of gas that will be charged for a cold storage access as controlled by EIP2929 rules.
// GasChangeCallStorageColdAccess is the amount of gas that will be charged
// for a cold storage access as controlled by EIP2929 rules.
GasChangeCallStorageColdAccess GasChangeReason = 13
// GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert.
// GasChangeCallFailedExecution is the burning of the remaining gas when the
// execution failed without a revert.
GasChangeCallFailedExecution GasChangeReason = 14
// GasChangeWitnessContractInit flags the event of adding to the witness during the contract creation initialization step.
// GasChangeWitnessContractInit flags the event of adding to the witness
// during the contract creation initialization step.
GasChangeWitnessContractInit GasChangeReason = 15
// GasChangeWitnessContractCreation flags the event of adding to the witness during the contract creation finalization step.
// GasChangeWitnessContractCreation flags the event of adding to the witness
// during the contract creation finalization step.
GasChangeWitnessContractCreation GasChangeReason = 16
// GasChangeWitnessCodeChunk flags the event of adding one or more contract code chunks to the witness.
// GasChangeWitnessCodeChunk flags the event of adding one or more contract
// code chunks to the witness.
GasChangeWitnessCodeChunk GasChangeReason = 17
// GasChangeWitnessContractCollisionCheck flags the event of adding to the witness when checking for contract address collision.
// GasChangeWitnessContractCollisionCheck flags the event of adding to the
// witness when checking for contract address collision.
GasChangeWitnessContractCollisionCheck GasChangeReason = 18
// GasChangeTxDataFloor is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the
// transaction data. This change will always be a negative change.
// GasChangeTxDataFloor is the amount of extra gas the transaction has to
// pay to reach the minimum gas requirement for the transaction data.
// This change will always be a negative change.
GasChangeTxDataFloor GasChangeReason = 19
// GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as
// it will be "manually" tracked by a direct emit of the gas change event.
// GasChangeIgnored is a special value that can be used to indicate that
// the gas change should be ignored as it will be "manually" tracked by
// a direct emit of the gas change event.
GasChangeIgnored GasChangeReason = 0xFF
)
@ -426,11 +499,12 @@ const (
// NonceChangeNewContract is the nonce change of a newly created contract.
NonceChangeNewContract NonceChangeReason = 4
// NonceChangeTransaction is the nonce change due to a EIP-7702 authorization.
// NonceChangeAuthorization is the nonce change due to a EIP-7702 authorization.
NonceChangeAuthorization NonceChangeReason = 5
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// NonceChangeRevert is emitted when the nonce is reverted back to a previous
// value due to call failure. It is only emitted when the tracer has opted in
// to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
// NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct
@ -445,22 +519,26 @@ type CodeChangeReason byte
const (
CodeChangeUnspecified CodeChangeReason = 0
// CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations.
// CodeChangeContractCreation is when a new contract is deployed via
// CREATE/CREATE2 operations.
CodeChangeContractCreation CodeChangeReason = 1
// CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup.
// CodeChangeGenesis is when contract code is set during blockchain genesis
// or initial setup.
CodeChangeGenesis CodeChangeReason = 2
// CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization.
CodeChangeAuthorization CodeChangeReason = 3
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address.
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by
// setting to zero address.
CodeChangeAuthorizationClear CodeChangeReason = 4
// CodeChangeSelfDestruct is when contract code is cleared due to self-destruct.
CodeChangeSelfDestruct CodeChangeReason = 5
// CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
// CodeChangeRevert is emitted when the code is reverted back to a previous
// value due to call failure. It is only emitted when the tracer has opted
// in to use the journaling wrapper (WrapWithJournal).
CodeChangeRevert CodeChangeReason = 6
)

View file

@ -42,7 +42,9 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
return nil, errors.New("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil &&
hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
// TODO(sina) hooks.OnLog should also be handled here
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
@ -56,11 +58,14 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
wrapped := *hooks
// Create journal
j := &journal{hooks: hooks}
j := &journal{
hooks: hooks,
}
// Scope hooks need to be re-implemented.
wrapped.OnTxEnd = j.OnTxEnd
wrapped.OnEnter = j.OnEnter
wrapped.OnExit = j.OnExit
// Wrap state change hooks.
if hooks.OnBalanceChange != nil {
wrapped.OnBalanceChange = j.OnBalanceChange
@ -69,6 +74,7 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
// Regardless of which hook version is used in the tracer,
// the journal will want to capture the nonce change reason.
wrapped.OnNonceChangeV2 = j.OnNonceChangeV2
// A precaution to ensure EVM doesn't call both hooks.
wrapped.OnNonceChange = nil
}
@ -81,7 +87,6 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks.OnStorageChange != nil {
wrapped.OnStorageChange = j.OnStorageChange
}
return &wrapped, nil
}
@ -148,7 +153,11 @@ func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, re
}
func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) {
j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new})
j.entries = append(j.entries, balanceChange{
addr: addr,
prev: prev,
new: new,
})
if j.hooks.OnBalanceChange != nil {
j.hooks.OnBalanceChange(addr, prev, new, reason)
}
@ -158,7 +167,11 @@ func (j *journal) OnNonceChangeV2(addr common.Address, prev, new uint64, reason
// When a contract is created, the nonce of the creator is incremented.
// This change is not reverted when the creation fails.
if reason != NonceChangeContractCreator {
j.entries = append(j.entries, nonceChange{addr: addr, prev: prev, new: new})
j.entries = append(j.entries, nonceChange{
addr: addr,
prev: prev,
new: new,
})
}
if j.hooks.OnNonceChangeV2 != nil {
j.hooks.OnNonceChangeV2(addr, prev, new, reason)
@ -194,7 +207,12 @@ func (j *journal) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash,
}
func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) {
j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new})
j.entries = append(j.entries, storageChange{
addr: addr,
slot: slot,
prev: prev,
new: new,
})
if j.hooks.OnStorageChange != nil {
j.hooks.OnStorageChange(addr, slot, prev, new)
}

View file

@ -63,7 +63,7 @@ func (t *testTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Has
}
func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new)
t.t.Logf("OnStorageChange(%v, %v, %v -> %v)", addr, slot, prev, new)
if t.storage == nil {
t.storage = make(map[common.Hash]common.Hash)
}
@ -76,7 +76,12 @@ func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev
func TestJournalIntegration(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange, OnCodeChange: tr.OnCodeChange, OnStorageChange: tr.OnStorageChange})
wr, err := WrapWithJournal(&Hooks{
OnBalanceChange: tr.OnBalanceChange,
OnNonceChange: tr.OnNonceChange,
OnCodeChange: tr.OnCodeChange,
OnStorageChange: tr.OnStorageChange,
})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}

View file

@ -18,6 +18,7 @@ package core
import (
"context"
"github.com/ethereum/go-ethereum/core/types/bal"
"sync/atomic"
"github.com/ethereum/go-ethereum/core/state"
@ -33,7 +34,7 @@ type Validator interface {
ValidateBody(block *types.Block) error
// ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
ValidateState(block *types.Block, state state.BlockStateTransition, res *ProcessResult, stateless bool) error
}
// Prefetcher is an interface for pre-caching transaction signatures and state.
@ -54,8 +55,10 @@ type Processor interface {
// ProcessResult contains the values computed by Process.
type ProcessResult struct {
Receipts types.Receipts
Requests [][]byte
Logs []*types.Log
GasUsed uint64
AccessList bal.ConstructionBlockAccessList
Receipts types.Receipts
Requests [][]byte
Logs []*types.Log
GasUsed uint64
Error error
}

View file

@ -18,156 +18,516 @@ package bal
import (
"bytes"
"maps"
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"maps"
)
// ConstructionAccountAccess contains post-block account state for mutations as well as
// ConstructionAccountAccesses contains post-block account state for mutations as well as
// all storage keys that were read during execution. It is used when building block
// access list during execution.
type ConstructionAccountAccess struct {
type ConstructionAccountAccesses struct {
// StorageWrites is the post-state values of an account's storage slots
// that were modified in a block, keyed by the slot key and the tx index
// where the modification occurred.
StorageWrites map[common.Hash]map[uint16]common.Hash `json:"storageWrites,omitempty"`
StorageWrites map[common.Hash]map[uint16]common.Hash
// StorageReads is the set of slot keys that were accessed during block
// execution.
//
// Storage slots which are both read and written (with changed values)
// storage slots which are both read and written (with changed values)
// appear only in StorageWrites.
StorageReads map[common.Hash]struct{} `json:"storageReads,omitempty"`
StorageReads map[common.Hash]struct{}
// BalanceChanges contains the post-transaction balances of an account,
// keyed by transaction indices where it was changed.
BalanceChanges map[uint16]*uint256.Int `json:"balanceChanges,omitempty"`
BalanceChanges map[uint16]*uint256.Int
// NonceChanges contains the post-state nonce values of an account keyed
// by tx index.
NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"`
NonceChanges map[uint16]uint64
// CodeChange contains the post-state contract code of an account keyed
// by tx index.
CodeChange map[uint16][]byte `json:"codeChange,omitempty"`
CodeChanges map[uint16][]byte
}
// NewConstructionAccountAccess initializes the account access object.
func NewConstructionAccountAccess() *ConstructionAccountAccess {
return &ConstructionAccountAccess{
func (c *ConstructionAccountAccesses) Copy() (res ConstructionAccountAccesses) {
if c.StorageWrites != nil {
res.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
for slot, writes := range c.StorageWrites {
res.StorageWrites[slot] = maps.Clone(writes)
}
}
if c.StorageReads != nil {
res.StorageReads = maps.Clone(c.StorageReads)
}
if c.BalanceChanges != nil {
res.BalanceChanges = maps.Clone(c.BalanceChanges)
}
if c.NonceChanges != nil {
res.NonceChanges = maps.Clone(c.NonceChanges)
}
if c.CodeChanges != nil {
res.CodeChanges = maps.Clone(c.CodeChanges)
}
return res
}
type StateMutations map[common.Address]AccountMutations
func (s StateMutations) String() string {
b, _ := json.MarshalIndent(s, "", " ")
return string(b)
}
// Merge merges the state changes present in next into the caller. After,
// the state of the caller is the aggregate diff through next.
func (s StateMutations) Merge(next StateMutations) {
for account, diff := range next {
if mut, ok := s[account]; ok {
if diff.Balance != nil {
mut.Balance = diff.Balance
}
if diff.Code != nil {
mut.Code = diff.Code
}
if diff.Nonce != nil {
mut.Nonce = diff.Nonce
}
if len(diff.StorageWrites) > 0 {
if mut.StorageWrites == nil {
mut.StorageWrites = maps.Clone(diff.StorageWrites)
} else {
for key, val := range diff.StorageWrites {
mut.StorageWrites[key] = val
}
}
}
} else {
s[account] = *diff.Copy()
}
}
}
func (s StateMutations) Eq(other StateMutations) bool {
if len(s) != len(other) {
return false
}
for addr, mut := range s {
otherMut, ok := other[addr]
if !ok {
return false
}
if !mut.Eq(&otherMut) {
return false
}
}
return true
}
type ConstructionBlockAccessList map[common.Address]*ConstructionAccountAccesses
func (c ConstructionBlockAccessList) Copy() ConstructionBlockAccessList {
res := make(ConstructionBlockAccessList)
for addr, accountAccess := range c {
aaCopy := accountAccess.Copy()
res[addr] = &aaCopy
}
return res
}
func (c ConstructionBlockAccessList) AccumulateMutations(muts StateMutations, idx uint16) {
for addr, mut := range muts {
if _, exist := c[addr]; !exist {
c[addr] = newConstructionAccountAccesses()
}
if mut.Nonce != nil {
if c[addr].NonceChanges == nil {
c[addr].NonceChanges = make(map[uint16]uint64)
}
c[addr].NonceChanges[idx] = *mut.Nonce
}
if mut.Balance != nil {
if c[addr].BalanceChanges == nil {
c[addr].BalanceChanges = make(map[uint16]*uint256.Int)
}
c[addr].BalanceChanges[idx] = mut.Balance.Clone()
}
if mut.Code != nil {
if c[addr].CodeChanges == nil {
c[addr].CodeChanges = make(map[uint16][]byte)
}
c[addr].CodeChanges[idx] = bytes.Clone(mut.Code)
}
if len(mut.StorageWrites) > 0 {
for key, val := range mut.StorageWrites {
if c[addr].StorageWrites[key] == nil {
c[addr].StorageWrites[key] = make(map[uint16]common.Hash)
}
c[addr].StorageWrites[key][idx] = val
}
}
}
}
func (c ConstructionBlockAccessList) AccumulateReads(reads StateAccesses) {
for addr, addrReads := range reads {
if _, ok := c[addr]; !ok {
c[addr] = newConstructionAccountAccesses()
}
for storageKey, _ := range addrReads {
if c[addr].StorageWrites != nil {
if _, ok := c[addr].StorageWrites[storageKey]; ok {
continue
}
}
if c[addr].StorageReads == nil {
c[addr].StorageReads = make(map[common.Hash]struct{})
}
c[addr].StorageReads[storageKey] = struct{}{}
}
}
}
func newConstructionAccountAccesses() *ConstructionAccountAccesses {
return &ConstructionAccountAccesses{
StorageWrites: make(map[common.Hash]map[uint16]common.Hash),
StorageReads: make(map[common.Hash]struct{}),
BalanceChanges: make(map[uint16]*uint256.Int),
NonceChanges: make(map[uint16]uint64),
CodeChange: make(map[uint16][]byte),
CodeChanges: make(map[uint16][]byte),
}
}
// ConstructionBlockAccessList contains post-block modified state and some state accessed
// in execution (account addresses and storage keys).
type ConstructionBlockAccessList struct {
Accounts map[common.Address]*ConstructionAccountAccess
// StateDiff contains state mutations occuring over one or more access list
// index.
type StateDiff struct {
Mutations map[common.Address]*AccountMutations `json:"Mutations,omitempty"`
}
// NewConstructionBlockAccessList instantiates an empty access list.
func NewConstructionBlockAccessList() ConstructionBlockAccessList {
return ConstructionBlockAccessList{
Accounts: make(map[common.Address]*ConstructionAccountAccess),
}
}
// StateAccesses contains a set of accounts/storage that were accessed during the
// execution of one or more access list indices.
type StateAccesses map[common.Address]StorageAccessList
type StorageAccessList map[common.Hash]struct{}
// AccountRead records the address of an account that has been read during execution.
func (b *ConstructionBlockAccessList) AccountRead(addr common.Address) {
if _, ok := b.Accounts[addr]; !ok {
b.Accounts[addr] = NewConstructionAccountAccess()
}
}
// StorageRead records a storage key read during execution.
func (b *ConstructionBlockAccessList) StorageRead(address common.Address, key common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; ok {
return
}
b.Accounts[address].StorageReads[key] = struct{}{}
}
// StorageWrite records the post-transaction value of a mutated storage slot.
// The storage slot is removed from the list of read slots.
func (b *ConstructionBlockAccessList) StorageWrite(txIdx uint16, address common.Address, key, value common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; !ok {
b.Accounts[address].StorageWrites[key] = make(map[uint16]common.Hash)
}
b.Accounts[address].StorageWrites[key][txIdx] = value
delete(b.Accounts[address].StorageReads, key)
}
// CodeChange records the code of a newly-created contract.
func (b *ConstructionBlockAccessList) CodeChange(address common.Address, txIndex uint16, code []byte) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
// TODO(rjl493456442) is it essential to deep-copy the code?
b.Accounts[address].CodeChange[txIndex] = bytes.Clone(code)
}
// NonceChange records tx post-state nonce of any contract-like accounts whose
// nonce was incremented.
func (b *ConstructionBlockAccessList) NonceChange(address common.Address, txIdx uint16, postNonce uint64) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].NonceChanges[txIdx] = postNonce
}
// BalanceChange records the post-transaction balance of an account whose
// balance changed.
func (b *ConstructionBlockAccessList) BalanceChange(txIdx uint16, address common.Address, balance *uint256.Int) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
b.Accounts[address].BalanceChanges[txIdx] = balance.Clone()
}
// PrettyPrint returns a human-readable representation of the access list
func (b *ConstructionBlockAccessList) PrettyPrint() string {
enc := b.toEncodingObj()
return enc.PrettyPrint()
}
// Copy returns a deep copy of the access list.
func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
res := NewConstructionBlockAccessList()
for addr, aa := range b.Accounts {
var aaCopy ConstructionAccountAccess
slotWrites := make(map[common.Hash]map[uint16]common.Hash, len(aa.StorageWrites))
for key, m := range aa.StorageWrites {
slotWrites[key] = maps.Clone(m)
// Merge combines adds the accesses from other into s.
func (s StateAccesses) Merge(other StateAccesses) {
for addr, accesses := range other {
if _, ok := s[addr]; !ok {
s[addr] = make(map[common.Hash]struct{})
}
aaCopy.StorageWrites = slotWrites
aaCopy.StorageReads = maps.Clone(aa.StorageReads)
balances := make(map[uint16]*uint256.Int, len(aa.BalanceChanges))
for index, balance := range aa.BalanceChanges {
balances[index] = balance.Clone()
for slot := range accesses {
s[addr][slot] = struct{}{}
}
aaCopy.BalanceChanges = balances
aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
}
}
codes := make(map[uint16][]byte, len(aa.CodeChange))
for index, code := range aa.CodeChange {
codes[index] = bytes.Clone(code)
func (s StateAccesses) Eq(other StateAccesses) bool {
if len(s) != len(other) {
return false
}
for addr, accesses := range s {
if _, ok := other[addr]; !ok {
return false
}
if !maps.Equal(accesses, other[addr]) {
return false
}
}
return true
}
type StorageMutations map[common.Hash]common.Hash
// AccountMutations contains mutations that were made to an account across
// one or more access list indices.
type AccountMutations struct {
Balance *uint256.Int `json:"Balance,omitempty"`
Nonce *uint64 `json:"Nonce,omitempty"`
Code ContractCode `json:"Code,omitempty"`
StorageWrites StorageMutations `json:"StorageWrites,omitempty"`
}
// String returns a human-readable JSON representation of the account mutations.
func (a *AccountMutations) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(a)
return res.String()
}
// Copy returns a deep-copy of the instance.
func (a *AccountMutations) Copy() *AccountMutations {
res := &AccountMutations{
nil,
nil,
nil,
nil,
}
if a.Nonce != nil {
res.Nonce = new(uint64)
*res.Nonce = *a.Nonce
}
if a.Code != nil {
res.Code = bytes.Clone(a.Code)
}
if a.Balance != nil {
res.Balance = new(uint256.Int).Set(a.Balance)
}
if a.StorageWrites != nil {
res.StorageWrites = maps.Clone(a.StorageWrites)
}
return res
}
// String returns the state diff as a formatted JSON string.
func (s *StateDiff) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(s)
return res.String()
}
// Copy returns a deep copy of the StateDiff
func (s *StateDiff) Copy() *StateDiff {
res := &StateDiff{make(map[common.Address]*AccountMutations)}
for addr, accountDiff := range s.Mutations {
cpy := accountDiff.Copy()
res.Mutations[addr] = cpy
}
return res
}
// AccessListReader exposes utilities to read state mutations and accesses from an access list
// TODO: expose this an an interface?
type AccessListReader map[common.Address]*AccountAccess
func NewAccessListReader(bal BlockAccessList) (reader AccessListReader) {
reader = make(AccessListReader)
for _, accountAccess := range bal {
reader[accountAccess.Address] = &accountAccess
}
return
}
func (a AccessListReader) Accesses() (accesses StateAccesses) {
accesses = make(StateAccesses)
for addr, acctAccess := range a {
if len(acctAccess.StorageReads) > 0 {
accesses[addr] = make(StorageAccessList)
for _, key := range acctAccess.StorageReads {
accesses[addr][key.ToHash()] = struct{}{}
}
} else if len(acctAccess.CodeChanges) == 0 && len(acctAccess.StorageChanges) == 0 && len(acctAccess.BalanceChanges) == 0 && len(acctAccess.NonceChanges) == 0 {
accesses[addr] = make(StorageAccessList)
}
}
return
}
// TODO: these methods should return the mutations accrued before the execution of the given index
// TODO: strip the storage mutations from the returned result
// the returned object should be able to be modified
func (a AccessListReader) accountMutationsAt(addr common.Address, idx int) (res *AccountMutations) {
acct, exist := a[addr]
if !exist {
return nil
}
res = &AccountMutations{}
// TODO: remove the reverse iteration here to clean the code up
for i := len(acct.BalanceChanges) - 1; i >= 0; i-- {
if acct.BalanceChanges[i].TxIdx == uint16(idx) {
res.Balance = acct.BalanceChanges[i].Balance
}
if acct.BalanceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.CodeChanges) - 1; i >= 0; i-- {
if acct.CodeChanges[i].TxIndex == uint16(idx) {
res.Code = bytes.Clone(acct.CodeChanges[i].Code)
break
}
if acct.CodeChanges[i].TxIndex < uint16(idx) {
break
}
}
for i := len(acct.NonceChanges) - 1; i >= 0; i-- {
if acct.NonceChanges[i].TxIdx == uint16(idx) {
res.Nonce = new(uint64)
*res.Nonce = acct.NonceChanges[i].Nonce
break
}
if acct.NonceChanges[i].TxIdx < uint16(idx) {
break
}
}
for i := len(acct.StorageChanges) - 1; i >= 0; i-- {
if res.StorageWrites == nil {
res.StorageWrites = make(map[common.Hash]common.Hash)
}
slotWrites := acct.StorageChanges[i]
for j := len(slotWrites.Accesses) - 1; j >= 0; j-- {
if slotWrites.Accesses[j].TxIdx == uint16(idx) {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[j].ValueAfter.ToHash()
break
}
if slotWrites.Accesses[j].TxIdx < uint16(idx) {
break
}
}
if len(res.StorageWrites) == 0 {
res.StorageWrites = nil
}
}
if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
return nil
}
return res
}
func (a AccessListReader) AccountMutations(addr common.Address, idx int) (res *AccountMutations) {
diff, exist := a[addr]
if !exist {
return nil
}
res = &AccountMutations{}
for i := 0; i < len(diff.BalanceChanges) && diff.BalanceChanges[i].TxIdx < uint16(idx); i++ {
res.Balance = diff.BalanceChanges[i].Balance.Clone()
}
for i := 0; i < len(diff.CodeChanges) && diff.CodeChanges[i].TxIndex < uint16(idx); i++ {
res.Code = bytes.Clone(diff.CodeChanges[i].Code)
}
for i := 0; i < len(diff.NonceChanges) && diff.NonceChanges[i].TxIdx < uint16(idx); i++ {
res.Nonce = new(uint64)
*res.Nonce = diff.NonceChanges[i].Nonce
}
if len(diff.StorageChanges) > 0 {
res.StorageWrites = make(map[common.Hash]common.Hash)
for _, slotWrites := range diff.StorageChanges {
for i := 0; i < len(slotWrites.Accesses) && slotWrites.Accesses[i].TxIdx < uint16(idx); i++ {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[i].ValueAfter.ToHash()
}
}
}
if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
return nil
}
return res
}
// Mutations returns the aggregate state mutations from [0, idx)
func (a AccessListReader) Mutations(idx int) *StateMutations {
res := make(StateMutations)
for addr := range a {
if mut := a.AccountMutations(addr, idx); mut != nil {
res[addr] = *mut
}
aaCopy.CodeChange = codes
res.Accounts[addr] = &aaCopy
}
return &res
}
// MutationsAt returns the state mutations from an index
func (a AccessListReader) MutationsAt(idx int) *StateMutations {
res := make(StateMutations)
for addr := range a {
if mut := a.accountMutationsAt(addr, idx); mut != nil {
res[addr] = *mut
}
}
return &res
}
type StorageKeys map[common.Address][]common.Hash
// StorageKeys returns the set of accounts and storage keys mutated in the access list.
// If reads is set, the un-mutated accounts/keys are included in the result.
func (a AccessListReader) StorageKeys(reads bool) (keys StorageKeys) {
keys = make(StorageKeys)
for addr, acct := range a {
for _, storageChange := range acct.StorageChanges {
keys[addr] = append(keys[addr], storageChange.Slot.ToHash())
}
if !(reads && len(acct.StorageReads) > 0) {
continue
}
for _, storageRead := range acct.StorageReads {
keys[addr] = append(keys[addr], storageRead.ToHash())
}
}
return
}
// Storage returns the value of a storage key at the start of executing an index.
// If the slot has no mutations in the access list, it returns nil.
func (a AccessListReader) Storage(addr common.Address, key common.Hash, idx int) (val *common.Hash) {
storageMuts := a.AccountMutations(addr, idx)
if storageMuts != nil {
res, ok := storageMuts.StorageWrites[key]
if ok {
return &res
}
}
return nil
}
// Copy returns a deep copy of the access list
func (e BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e {
res = append(res, accountAccess.Copy())
}
return
}
// Eq returns whether the calling instance is equal to the provided one.
func (a *AccountMutations) Eq(other *AccountMutations) bool {
if a.Balance != nil || other.Balance != nil {
if a.Balance == nil || other.Balance == nil {
return false
}
if !a.Balance.Eq(other.Balance) {
return false
}
}
if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
return false
}
if a.Nonce != nil || other.Nonce != nil {
if a.Nonce == nil || other.Nonce == nil {
return false
}
if *a.Nonce != *other.Nonce {
return false
}
}
if a.StorageWrites != nil || other.StorageWrites != nil {
if !maps.Equal(a.StorageWrites, other.StorageWrites) {
return false
}
}
return true
}

File diff suppressed because one or more lines are too long

View file

@ -19,8 +19,11 @@ package bal
import (
"bytes"
"cmp"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"io"
"maps"
"slices"
@ -33,27 +36,95 @@ import (
"github.com/holiman/uint256"
)
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type BlockAccessList -decoder
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type AccountAccess -decoder
// These are objects used as input for the access list encoding. They mirror
// the spec format.
// BlockAccessList is the encoding format of ConstructionBlockAccessList.
type BlockAccessList struct {
Accesses []AccountAccess `ssz-max:"300000"`
// BlockAccessList is the encoding format of AccessListBuilder.
type BlockAccessList []AccountAccess
func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
l := w.List()
for _, access := range e {
access.EncodeRLP(w)
}
w.ListEnd(l)
return w.Flush()
}
func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
if _, err := dec.List(); err != nil {
return err
}
*e = (*e)[:0]
for dec.MoreDataInList() {
var access AccountAccess
if err := access.DecodeRLP(dec); err != nil {
return err
}
*e = append(*e, access)
}
dec.ListEnd()
return nil
}
func (e *BlockAccessList) EncodedSize() int {
b, err := rlp.EncodeToBytes(e)
if err != nil {
// TODO: proper to crit here?
log.Crit("failed to rlp encode access list", "err", err)
}
return len(b)
}
func (e *BlockAccessList) JSONString() string {
res, _ := json.MarshalIndent(e.StringableRepresentation(), "", " ")
return string(res)
}
// StringableRepresentation returns an instance of the block access list
// which can be converted to a human-readable JSON representation.
func (e *BlockAccessList) StringableRepresentation() interface{} {
res := []AccountAccess{}
for _, aa := range *e {
res = append(res, aa)
}
return &res
}
func (e *BlockAccessList) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
// TODO: check error
enc.Encode(e)
return res.String()
}
// TODO: check that no fields are nil in Validate (unless it's valid for them to be nil)
// Validate returns an error if the contents of the access list are not ordered
// according to the spec or any code changes are contained which exceed protocol
// max code size.
func (e *BlockAccessList) Validate() error {
if !slices.IsSortedFunc(e.Accesses, func(a, b AccountAccess) int {
func (e BlockAccessList) Validate(blockTxCount int) error {
if !slices.IsSortedFunc(e, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
}) {
return errors.New("block access list accounts not in lexicographic order")
}
for _, entry := range e.Accesses {
if err := entry.validate(); err != nil {
// check that the accounts are unique
addrs := make(map[common.Address]struct{})
for _, acct := range e {
addr := acct.Address
if _, ok := addrs[addr]; ok {
return fmt.Errorf("duplicate account in block access list: %x", addr)
}
addrs[addr] = struct{}{}
}
for _, entry := range e {
if err := entry.validate(blockTxCount); err != nil {
return err
}
}
@ -70,53 +141,135 @@ func (e *BlockAccessList) Hash() common.Hash {
// under reasonable conditions.
panic(err)
}
/*
bal, err := json.MarshalIndent(e.StringableRepresentation(), "", " ")
if err != nil {
panic(err)
}
*/
return crypto.Keccak256Hash(enc.Bytes())
}
// encodeBalance encodes the provided balance into 16-bytes.
func encodeBalance(val *uint256.Int) [16]byte {
valBytes := val.Bytes()
if len(valBytes) > 16 {
panic("can't encode value that is greater than 16 bytes in size")
}
var enc [16]byte
copy(enc[16-len(valBytes):], valBytes[:])
return enc
}
// encodingBalanceChange is the encoding format of BalanceChange.
type encodingBalanceChange struct {
TxIdx uint16 `ssz-size:"2"`
Balance [16]byte `ssz-size:"16"`
TxIdx uint16 `json:"txIndex"`
Balance *uint256.Int `json:"balance"`
}
// encodingAccountNonce is the encoding format of NonceChange.
type encodingAccountNonce struct {
TxIdx uint16 `ssz-size:"2"`
Nonce uint64 `ssz-size:"8"`
TxIdx uint16 `json:"txIndex"`
Nonce uint64 `json:"nonce"`
}
// encodingStorageWrite is the encoding format of StorageWrites.
type encodingStorageWrite struct {
TxIdx uint16
ValueAfter [32]byte `ssz-size:"32"`
TxIdx uint16 `json:"txIndex"`
ValueAfter *EncodedStorage `json:"valueAfter"`
}
// EncodedStorage can represent either a storage key or value
type EncodedStorage struct {
inner *uint256.Int
}
var _ rlp.Encoder = &EncodedStorage{}
var _ rlp.Decoder = &EncodedStorage{}
func (e *EncodedStorage) ToHash() common.Hash {
if e == nil {
return common.Hash{}
}
return e.inner.Bytes32()
}
func newEncodedStorageFromHash(hash common.Hash) *EncodedStorage {
return &EncodedStorage{
new(uint256.Int).SetBytes(hash[:]),
}
}
func (s *EncodedStorage) UnmarshalJSON(b []byte) error {
var str string
if err := json.Unmarshal(b, &str); err != nil {
return err
}
str = strings.TrimLeft(str, "0x")
if len(str) == 0 {
return nil
}
if len(str)%2 == 1 {
str = "0" + str
}
val, err := hex.DecodeString(str)
if err != nil {
return err
}
if len(val) > 32 {
return fmt.Errorf("storage key/value cannot be greater than 32 bytes")
}
// TODO: check is s == nil ?? should be programmer error
*s = EncodedStorage{
inner: new(uint256.Int).SetBytes(val),
}
return nil
}
func (s EncodedStorage) MarshalJSON() ([]byte, error) {
return json.Marshal(s.inner.Hex())
}
func (s *EncodedStorage) EncodeRLP(_w io.Writer) error {
return s.inner.EncodeRLP(_w)
}
func (s *EncodedStorage) DecodeRLP(dec *rlp.Stream) error {
if s == nil {
*s = EncodedStorage{}
}
s.inner = uint256.NewInt(0)
return dec.ReadUint256(s.inner)
}
// encodingStorageWrite is the encoding format of SlotWrites.
type encodingSlotWrites struct {
Slot [32]byte `ssz-size:"32"`
Accesses []encodingStorageWrite `ssz-max:"300000"`
Slot *EncodedStorage `json:"slot"`
Accesses []encodingStorageWrite `json:"accesses"`
}
// validate returns an instance of the encoding-representation slot writes in
// working representation.
func (e *encodingSlotWrites) validate() error {
if slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
func (e *encodingSlotWrites) validate(blockTxCount int) error {
if e.Slot == nil {
return errors.New("nil slot key")
}
if !slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return nil
return errors.New("storage write tx indices not in order")
}
return errors.New("storage write tx indices not in order")
for i, access := range e.Accesses {
if access.ValueAfter == nil {
return errors.New("nil storage write post")
}
if i > 0 && e.Accesses[i-1].TxIdx == access.TxIdx {
return errors.New("duplicate storage write index")
}
}
// TODO: add test that covers there are actually storage modifications here
// if there aren't, it should be a bad block
if len(e.Accesses) == 0 {
return fmt.Errorf("empty storage writes")
} else if int(e.Accesses[len(e.Accesses)-1].TxIdx) >= blockTxCount+2 {
return fmt.Errorf("storage access reported index higher than allowed")
}
return nil
}
// encodingCodeChange contains the runtime bytecode deployed at an address
@ -126,64 +279,120 @@ type encodingCodeChange struct {
Code []byte `ssz-max:"300000"` // TODO(rjl493456442) shall we put the limit here? The limit will be increased gradually
}
// AccountAccess is the encoding format of ConstructionAccountAccess.
// AccountAccess is the encoding format of ConstructionAccountAccesses.
type AccountAccess struct {
Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address
StorageWrites []encodingSlotWrites `ssz-max:"300000"` // Storage changes (slot -> [tx_index -> new_value])
StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce])
CodeChanges []encodingCodeChange `ssz-max:"300000"` // Code changes ([tx_index -> new_code])
Address common.Address `json:"address,omitempty"` // 20-byte Ethereum address
StorageChanges []encodingSlotWrites `json:"storageChanges,omitempty"` // EncodedStorage changes (slot -> [tx_index -> new_value])
StorageReads []*EncodedStorage `json:"storageReads,omitempty"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `json:"balanceChanges,omitempty"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `json:"nonceChanges,omitempty"` // Nonce changes ([tx_index -> new_nonce])
CodeChanges []encodingCodeChange `json:"code,omitempty"` // CodeChanges changes ([tx_index -> new_code])
}
// validate converts the account accesses out of encoding format.
// If any of the keys in the encoding object are not ordered according to the
// spec, an error is returned.
func (e *AccountAccess) validate() error {
func (e *AccountAccess) validate(blockTxCount int) error {
// Check the storage write slots are sorted in order
if !slices.IsSortedFunc(e.StorageWrites, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:])
if !slices.IsSortedFunc(e.StorageChanges, func(a, b encodingSlotWrites) int {
aHash, bHash := a.Slot.ToHash(), b.Slot.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) {
return errors.New("storage writes slots not in lexicographic order")
}
for _, write := range e.StorageWrites {
if err := write.validate(); err != nil {
for _, write := range e.StorageChanges {
if err := write.validate(blockTxCount); err != nil {
return err
}
}
readKeys := make(map[common.Hash]struct{})
writeKeys := make(map[common.Hash]struct{})
for _, readKey := range e.StorageReads {
if _, ok := readKeys[readKey.ToHash()]; ok {
return errors.New("duplicate read key")
}
readKeys[readKey.ToHash()] = struct{}{}
}
for _, write := range e.StorageChanges {
writeKey := write.Slot
if _, ok := writeKeys[writeKey.ToHash()]; ok {
return errors.New("duplicate write key")
}
writeKeys[writeKey.ToHash()] = struct{}{}
}
for readKey := range readKeys {
if _, ok := writeKeys[readKey]; ok {
return errors.New("storage key reported in both read/write sets")
}
}
// Check the storage read slots are sorted in order
if !slices.IsSortedFunc(e.StorageReads, func(a, b [32]byte) int {
return bytes.Compare(a[:], b[:])
if !slices.IsSortedFunc(e.StorageReads, func(a, b *EncodedStorage) int {
aHash, bHash := a.ToHash(), b.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) {
return errors.New("storage read slots not in lexicographic order")
}
// Check the balance changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.BalanceChanges, func(a, b encodingBalanceChange) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return errors.New("balance changes not in ascending order by tx index")
}
if len(e.BalanceChanges) > 0 && int(e.BalanceChanges[len(e.BalanceChanges)-1].TxIdx) > blockTxCount+2 {
return errors.New("highest balance change index beyond what is allowed")
}
// check that the balance values are set and there are no duplicate index entries
for i, balanceChange := range e.BalanceChanges {
if balanceChange.Balance == nil {
return errors.New("nil balance change value")
}
if i > 0 && e.BalanceChanges[i-1].TxIdx == balanceChange.TxIdx {
return errors.New("duplicate index for balance change")
}
}
// Check the nonce changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.NonceChanges, func(a, b encodingAccountNonce) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) {
return errors.New("nonce changes not in ascending order by tx index")
}
if len(e.NonceChanges) > 0 && int(e.NonceChanges[len(e.NonceChanges)-1].TxIdx) >= blockTxCount+2 {
return errors.New("highest nonce change index beyond what is allowed")
}
for i, nonceChange := range e.NonceChanges {
if i > 0 && nonceChange.TxIdx == e.NonceChanges[i-1].TxIdx {
return errors.New("duplicate index reported in nonce changes")
}
}
// Check the code changes are sorted in order
// TODO: contact testing team to add a test case which has the code changes out of order,
// as it wasn't checked here previously
if !slices.IsSortedFunc(e.CodeChanges, func(a, b encodingCodeChange) int {
return cmp.Compare[uint16](a.TxIndex, b.TxIndex)
}) {
return errors.New("code changes not in ascending order by tx index")
return errors.New("code changes not in ascending order")
}
for _, change := range e.CodeChanges {
// TODO(rjl493456442): This check should be fork-aware, since the limit may
// differ across forks.
if len(change.Code) > params.MaxCodeSize {
return errors.New("code change contained oversized code")
if len(e.CodeChanges) > 0 && int(e.CodeChanges[len(e.CodeChanges)-1].TxIndex) >= blockTxCount+2 {
return errors.New("highest code change index beyond what is allowed")
}
for i, codeChange := range e.CodeChanges {
if i > 0 && codeChange.TxIndex == e.CodeChanges[i-1].TxIndex {
return errors.New("duplicate index reported in code changes")
}
}
// validate that code changes could plausibly be correct (none exceed
// max code size of a contract)
for _, codeChange := range e.CodeChanges {
if len(codeChange.Code) > params.MaxCodeSize {
return fmt.Errorf("code change contained oversized code")
}
}
return nil
@ -196,41 +405,40 @@ func (e *AccountAccess) Copy() AccountAccess {
StorageReads: slices.Clone(e.StorageReads),
BalanceChanges: slices.Clone(e.BalanceChanges),
NonceChanges: slices.Clone(e.NonceChanges),
StorageWrites: make([]encodingSlotWrites, 0, len(e.StorageWrites)),
CodeChanges: make([]encodingCodeChange, 0, len(e.CodeChanges)),
}
for _, storageWrite := range e.StorageWrites {
res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{
for _, storageWrite := range e.StorageChanges {
res.StorageChanges = append(res.StorageChanges, encodingSlotWrites{
Slot: storageWrite.Slot,
Accesses: slices.Clone(storageWrite.Accesses),
})
}
for _, codeChange := range e.CodeChanges {
res.CodeChanges = append(res.CodeChanges, encodingCodeChange{
TxIndex: codeChange.TxIndex,
Code: bytes.Clone(codeChange.Code),
})
res.CodeChanges = append(res.CodeChanges,
encodingCodeChange{
codeChange.TxIndex,
bytes.Clone(codeChange.Code),
})
}
return res
}
// EncodeRLP returns the RLP-encoded access list
func (b *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return b.toEncodingObj().EncodeRLP(wr)
func (c ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return c.ToEncodingObj().EncodeRLP(wr)
}
var _ rlp.Encoder = &ConstructionBlockAccessList{}
// toEncodingObj creates an instance of the ConstructionAccountAccess of the type that is
// toEncodingObj creates an instance of the ConstructionAccountAccesses of the type that is
// used as input for the encoding.
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess {
func (a *ConstructionAccountAccesses) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{
Address: addr,
StorageWrites: make([]encodingSlotWrites, 0, len(a.StorageWrites)),
StorageReads: make([][32]byte, 0, len(a.StorageReads)),
BalanceChanges: make([]encodingBalanceChange, 0, len(a.BalanceChanges)),
NonceChanges: make([]encodingAccountNonce, 0, len(a.NonceChanges)),
CodeChanges: make([]encodingCodeChange, 0, len(a.CodeChange)),
StorageChanges: make([]encodingSlotWrites, 0),
StorageReads: make([]*EncodedStorage, 0),
BalanceChanges: make([]encodingBalanceChange, 0),
NonceChanges: make([]encodingAccountNonce, 0),
CodeChanges: make([]encodingCodeChange, 0),
}
// Convert write slots
@ -238,7 +446,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
slices.SortFunc(writeSlots, common.Hash.Cmp)
for _, slot := range writeSlots {
var obj encodingSlotWrites
obj.Slot = slot
obj.Slot = newEncodedStorageFromHash(slot)
slotWrites := a.StorageWrites[slot]
obj.Accesses = make([]encodingStorageWrite, 0, len(slotWrites))
@ -248,17 +456,17 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, index := range indices {
obj.Accesses = append(obj.Accesses, encodingStorageWrite{
TxIdx: index,
ValueAfter: slotWrites[index],
ValueAfter: newEncodedStorageFromHash(slotWrites[index]),
})
}
res.StorageWrites = append(res.StorageWrites, obj)
res.StorageChanges = append(res.StorageChanges, obj)
}
// Convert read slots
readSlots := slices.Collect(maps.Keys(a.StorageReads))
slices.SortFunc(readSlots, common.Hash.Cmp)
for _, slot := range readSlots {
res.StorageReads = append(res.StorageReads, slot)
res.StorageReads = append(res.StorageReads, newEncodedStorageFromHash(slot))
}
// Convert balance changes
@ -267,7 +475,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, idx := range balanceIndices {
res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{
TxIdx: idx,
Balance: encodeBalance(a.BalanceChanges[idx]),
Balance: new(uint256.Int).Set(a.BalanceChanges[idx]),
})
}
@ -282,77 +490,31 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
}
// Convert code change
codeIndices := slices.Collect(maps.Keys(a.CodeChange))
slices.SortFunc(codeIndices, cmp.Compare[uint16])
for _, idx := range codeIndices {
codeChangeIdxs := slices.Collect(maps.Keys(a.CodeChanges))
slices.SortFunc(codeChangeIdxs, cmp.Compare[uint16])
for _, idx := range codeChangeIdxs {
res.CodeChanges = append(res.CodeChanges, encodingCodeChange{
TxIndex: idx,
Code: a.CodeChange[idx],
idx,
bytes.Clone(a.CodeChanges[idx]),
})
}
return res
}
// toEncodingObj returns an instance of the access list expressed as the type
// ToEncodingObj returns an instance of the access list expressed as the type
// which is used as input for the encoding/decoding.
func (b *ConstructionBlockAccessList) toEncodingObj() *BlockAccessList {
func (c ConstructionBlockAccessList) ToEncodingObj() *BlockAccessList {
var addresses []common.Address
for addr := range b.Accounts {
for addr := range c {
addresses = append(addresses, addr)
}
slices.SortFunc(addresses, common.Address.Cmp)
var res BlockAccessList
for _, addr := range addresses {
res.Accesses = append(res.Accesses, b.Accounts[addr].toEncodingObj(addr))
res = append(res, c[addr].toEncodingObj(addr))
}
return &res
}
func (e *BlockAccessList) PrettyPrint() string {
var res bytes.Buffer
printWithIndent := func(indent int, text string) {
fmt.Fprintf(&res, "%s%s\n", strings.Repeat(" ", indent), text)
}
for _, accountDiff := range e.Accesses {
printWithIndent(0, fmt.Sprintf("%x:", accountDiff.Address))
printWithIndent(1, "storage writes:")
for _, sWrite := range accountDiff.StorageWrites {
printWithIndent(2, fmt.Sprintf("%x:", sWrite.Slot))
for _, access := range sWrite.Accesses {
printWithIndent(3, fmt.Sprintf("%d: %x", access.TxIdx, access.ValueAfter))
}
}
printWithIndent(1, "storage reads:")
for _, slot := range accountDiff.StorageReads {
printWithIndent(2, fmt.Sprintf("%x", slot))
}
printWithIndent(1, "balance changes:")
for _, change := range accountDiff.BalanceChanges {
balance := new(uint256.Int).SetBytes(change.Balance[:]).String()
printWithIndent(2, fmt.Sprintf("%d: %s", change.TxIdx, balance))
}
printWithIndent(1, "nonce changes:")
for _, change := range accountDiff.NonceChanges {
printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce))
}
printWithIndent(1, "code changes:")
for _, change := range accountDiff.CodeChanges {
printWithIndent(2, fmt.Sprintf("%d: %x", change.TxIndex, change.Code))
}
}
return res.String()
}
// Copy returns a deep copy of the access list
func (e *BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e.Accesses {
res.Accesses = append(res.Accesses, accountAccess.Copy())
}
return
}
type ContractCode []byte

View file

@ -0,0 +1,107 @@
package bal
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
func (c *ContractCode) MarshalJSON() ([]byte, error) {
hexStr := fmt.Sprintf("%x", *c)
return json.Marshal(hexStr)
}
func (e encodingBalanceChange) MarshalJSON() ([]byte, error) {
type Alias encodingBalanceChange
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Alias: (*Alias)(&e),
})
}
func (e *encodingBalanceChange) UnmarshalJSON(data []byte) error {
type Alias encodingBalanceChange
aux := &struct {
TxIdx string `json:"txIndex"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
return nil
}
func (e encodingAccountNonce) MarshalJSON() ([]byte, error) {
type Alias encodingAccountNonce
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Nonce: fmt.Sprintf("0x%x", e.Nonce),
Alias: (*Alias)(&e),
})
}
func (e *encodingAccountNonce) UnmarshalJSON(data []byte) error {
type Alias encodingAccountNonce
aux := &struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
if len(aux.Nonce) >= 2 && aux.Nonce[:2] == "0x" {
if _, err := fmt.Sscanf(aux.Nonce, "0x%x", &e.Nonce); err != nil {
return err
}
}
return nil
}
// UnmarshalJSON implements json.Unmarshaler to decode from RLP hex bytes
func (b *BlockAccessList) UnmarshalJSON(input []byte) error {
// Handle both hex string and object formats
var hexBytes hexutil.Bytes
if err := json.Unmarshal(input, &hexBytes); err == nil {
// It's a hex string, decode from RLP
return rlp.DecodeBytes(hexBytes, b)
}
// Otherwise try to unmarshal as structured JSON
var tmp []AccountAccess
if err := json.Unmarshal(input, &tmp); err != nil {
return err
}
*b = BlockAccessList(tmp)
return nil
}
// MarshalJSON implements json.Marshaler to encode as RLP hex bytes
func (b BlockAccessList) MarshalJSON() ([]byte, error) {
// Encode to RLP then to hex
rlpBytes, err := rlp.EncodeToBytes(b)
if err != nil {
return nil, err
}
return json.Marshal(hexutil.Bytes(rlpBytes))
}

View file

@ -2,275 +2,260 @@
package bal
import "github.com/ethereum/go-ethereum/common"
import "github.com/ethereum/go-ethereum/rlp"
import "github.com/holiman/uint256"
import "io"
func (obj *BlockAccessList) EncodeRLP(_w io.Writer) error {
func (obj *AccountAccess) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteBytes(obj.Address[:])
_tmp1 := w.List()
for _, _tmp2 := range obj.Accesses {
for _, _tmp2 := range obj.StorageChanges {
_tmp3 := w.List()
w.WriteBytes(_tmp2.Address[:])
if err := _tmp2.Slot.EncodeRLP(w); err != nil {
return err
}
_tmp4 := w.List()
for _, _tmp5 := range _tmp2.StorageWrites {
for _, _tmp5 := range _tmp2.Accesses {
_tmp6 := w.List()
w.WriteBytes(_tmp5.Slot[:])
_tmp7 := w.List()
for _, _tmp8 := range _tmp5.Accesses {
_tmp9 := w.List()
w.WriteUint64(uint64(_tmp8.TxIdx))
w.WriteBytes(_tmp8.ValueAfter[:])
w.ListEnd(_tmp9)
w.WriteUint64(uint64(_tmp5.TxIdx))
if err := _tmp5.ValueAfter.EncodeRLP(w); err != nil {
return err
}
w.ListEnd(_tmp7)
w.ListEnd(_tmp6)
}
w.ListEnd(_tmp4)
_tmp10 := w.List()
for _, _tmp11 := range _tmp2.StorageReads {
w.WriteBytes(_tmp11[:])
}
w.ListEnd(_tmp10)
_tmp12 := w.List()
for _, _tmp13 := range _tmp2.BalanceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteBytes(_tmp13.Balance[:])
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range _tmp2.NonceChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx))
w.WriteUint64(_tmp16.Nonce)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
_tmp18 := w.List()
for _, _tmp19 := range _tmp2.CodeChanges {
_tmp20 := w.List()
w.WriteUint64(uint64(_tmp19.TxIndex))
w.WriteBytes(_tmp19.Code)
w.ListEnd(_tmp20)
}
w.ListEnd(_tmp18)
w.ListEnd(_tmp3)
}
w.ListEnd(_tmp1)
_tmp7 := w.List()
for _, _tmp8 := range obj.StorageReads {
if err := _tmp8.EncodeRLP(w); err != nil {
return err
}
}
w.ListEnd(_tmp7)
_tmp9 := w.List()
for _, _tmp10 := range obj.BalanceChanges {
_tmp11 := w.List()
w.WriteUint64(uint64(_tmp10.TxIdx))
if _tmp10.Balance == nil {
w.Write(rlp.EmptyString)
} else {
w.WriteUint256(_tmp10.Balance)
}
w.ListEnd(_tmp11)
}
w.ListEnd(_tmp9)
_tmp12 := w.List()
for _, _tmp13 := range obj.NonceChanges {
_tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteUint64(_tmp13.Nonce)
w.ListEnd(_tmp14)
}
w.ListEnd(_tmp12)
_tmp15 := w.List()
for _, _tmp16 := range obj.CodeChanges {
_tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIndex))
w.WriteBytes(_tmp16.Code)
w.ListEnd(_tmp17)
}
w.ListEnd(_tmp15)
w.ListEnd(_tmp0)
return w.Flush()
}
func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 BlockAccessList
func (obj *AccountAccess) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 AccountAccess
{
if _, err := dec.List(); err != nil {
return err
}
// Accesses:
var _tmp1 []AccountAccess
// Address:
var _tmp1 common.Address
if err := dec.ReadBytes(_tmp1[:]); err != nil {
return err
}
_tmp0.Address = _tmp1
// StorageChanges:
var _tmp2 []encodingSlotWrites
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp2 AccountAccess
var _tmp3 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Address:
var _tmp3 [20]byte
if err := dec.ReadBytes(_tmp3[:]); err != nil {
// Slot:
_tmp4 := new(EncodedStorage)
if err := _tmp4.DecodeRLP(dec); err != nil {
return err
}
_tmp2.Address = _tmp3
// StorageWrites:
var _tmp4 []encodingSlotWrites
_tmp3.Slot = _tmp4
// Accesses:
var _tmp5 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp5 encodingSlotWrites
{
if _, err := dec.List(); err != nil {
return err
}
// Slot:
var _tmp6 [32]byte
if err := dec.ReadBytes(_tmp6[:]); err != nil {
return err
}
_tmp5.Slot = _tmp6
// Accesses:
var _tmp7 []encodingStorageWrite
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp8 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp9, err := dec.Uint16()
if err != nil {
return err
}
_tmp8.TxIdx = _tmp9
// ValueAfter:
var _tmp10 [32]byte
if err := dec.ReadBytes(_tmp10[:]); err != nil {
return err
}
_tmp8.ValueAfter = _tmp10
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp7 = append(_tmp7, _tmp8)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp5.Accesses = _tmp7
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp4 = append(_tmp4, _tmp5)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageWrites = _tmp4
// StorageReads:
var _tmp11 [][32]byte
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 [32]byte
if err := dec.ReadBytes(_tmp12[:]); err != nil {
return err
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.StorageReads = _tmp11
// BalanceChanges:
var _tmp13 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp14 encodingBalanceChange
var _tmp6 encodingStorageWrite
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp15, err := dec.Uint16()
_tmp7, err := dec.Uint16()
if err != nil {
return err
}
_tmp14.TxIdx = _tmp15
// Balance:
var _tmp16 [16]byte
if err := dec.ReadBytes(_tmp16[:]); err != nil {
_tmp6.TxIdx = _tmp7
// ValueAfter:
_tmp8 := new(EncodedStorage)
if err := _tmp8.DecodeRLP(dec); err != nil {
return err
}
_tmp14.Balance = _tmp16
_tmp6.ValueAfter = _tmp8
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp13 = append(_tmp13, _tmp14)
_tmp5 = append(_tmp5, _tmp6)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.BalanceChanges = _tmp13
// NonceChanges:
var _tmp17 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp18 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp19, err := dec.Uint16()
if err != nil {
return err
}
_tmp18.TxIdx = _tmp19
// Nonce:
_tmp20, err := dec.Uint64()
if err != nil {
return err
}
_tmp18.Nonce = _tmp20
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp17 = append(_tmp17, _tmp18)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.NonceChanges = _tmp17
// CodeChanges:
var _tmp21 []encodingCodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp22 encodingCodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIndex:
_tmp23, err := dec.Uint16()
if err != nil {
return err
}
_tmp22.TxIndex = _tmp23
// Code:
_tmp24, err := dec.Bytes()
if err != nil {
return err
}
_tmp22.Code = _tmp24
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp21 = append(_tmp21, _tmp22)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.CodeChanges = _tmp21
_tmp3.Accesses = _tmp5
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp1 = append(_tmp1, _tmp2)
_tmp2 = append(_tmp2, _tmp3)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.Accesses = _tmp1
_tmp0.StorageChanges = _tmp2
// StorageReads:
var _tmp9 []*EncodedStorage
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
_tmp10 := new(EncodedStorage)
if err := _tmp10.DecodeRLP(dec); err != nil {
return err
}
_tmp9 = append(_tmp9, _tmp10)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.StorageReads = _tmp9
// BalanceChanges:
var _tmp11 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 encodingBalanceChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp13, err := dec.Uint16()
if err != nil {
return err
}
_tmp12.TxIdx = _tmp13
// Balance:
var _tmp14 uint256.Int
if err := dec.ReadUint256(&_tmp14); err != nil {
return err
}
_tmp12.Balance = &_tmp14
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp11 = append(_tmp11, _tmp12)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.BalanceChanges = _tmp11
// NonceChanges:
var _tmp15 []encodingAccountNonce
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp16 encodingAccountNonce
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp17, err := dec.Uint16()
if err != nil {
return err
}
_tmp16.TxIdx = _tmp17
// Nonce:
_tmp18, err := dec.Uint64()
if err != nil {
return err
}
_tmp16.Nonce = _tmp18
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp15 = append(_tmp15, _tmp16)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.NonceChanges = _tmp15
// CodeChanges:
var _tmp19 []encodingCodeChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp20 encodingCodeChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIndex:
_tmp21, err := dec.Uint16()
if err != nil {
return err
}
_tmp20.TxIndex = _tmp21
// Code:
_tmp22, err := dec.Bytes()
if err != nil {
return err
}
_tmp20.Code = _tmp22
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp19 = append(_tmp19, _tmp20)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.CodeChanges = _tmp19
if err := dec.ListEnd(); err != nil {
return err
}

View file

@ -36,9 +36,9 @@ func equalBALs(a *BlockAccessList, b *BlockAccessList) bool {
return true
}
func makeTestConstructionBAL() *ConstructionBlockAccessList {
return &ConstructionBlockAccessList{
map[common.Address]*ConstructionAccountAccess{
func makeTestConstructionBAL() *AccessListBuilder {
return &AccessListBuilder{
FinalizedAccesses: map[common.Address]*ConstructionAccountAccesses{
common.BytesToAddress([]byte{0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
common.BytesToHash([]byte{0x01}): {
@ -60,9 +60,10 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
1: 2,
2: 6,
},
CodeChange: map[uint16][]byte{
0: common.Hex2Bytes("deadbeef"),
},
CodeChanges: map[uint16]CodeChange{0: {
TxIdx: 0,
Code: common.Hex2Bytes("deadbeef"),
}},
},
common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{
@ -84,9 +85,6 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
NonceChanges: map[uint16]uint64{
1: 2,
},
CodeChange: map[uint16][]byte{
0: common.Hex2Bytes("deadbeef"),
},
},
},
}
@ -95,7 +93,8 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
// TestBALEncoding tests that a populated access list can be encoded/decoded correctly.
func TestBALEncoding(t *testing.T) {
var buf bytes.Buffer
bal := makeTestConstructionBAL()
balBuilder := makeTestConstructionBAL()
bal := balBuilder.FinalizedAccesses
err := bal.EncodeRLP(&buf)
if err != nil {
t.Fatalf("encoding failed: %v\n", err)
@ -104,10 +103,10 @@ func TestBALEncoding(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil {
t.Fatalf("decoding failed: %v\n", err)
}
if dec.Hash() != bal.toEncodingObj().Hash() {
if dec.Hash() != bal.ToEncodingObj().Hash() {
t.Fatalf("encoded block hash doesn't match decoded")
}
if !equalBALs(bal.toEncodingObj(), &dec) {
if !equalBALs(bal.ToEncodingObj(), &dec) {
t.Fatal("decoded BAL doesn't match")
}
}
@ -115,18 +114,18 @@ func TestBALEncoding(t *testing.T) {
func makeTestAccountAccess(sort bool) AccountAccess {
var (
storageWrites []encodingSlotWrites
storageReads [][32]byte
storageReads []common.Hash
balances []encodingBalanceChange
nonces []encodingAccountNonce
)
for i := 0; i < 5; i++ {
slot := encodingSlotWrites{
Slot: testrand.Hash(),
Slot: newEncodedStorageFromHash(testrand.Hash()),
}
for j := 0; j < 3; j++ {
slot.Accesses = append(slot.Accesses, encodingStorageWrite{
TxIdx: uint16(2 * j),
ValueAfter: testrand.Hash(),
ValueAfter: newEncodedStorageFromHash(testrand.Hash()),
})
}
if sort {
@ -138,7 +137,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
}
if sort {
slices.SortFunc(storageWrites, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:])
return bytes.Compare(a.Slot.inner.Bytes(), b.Slot.inner.Bytes())
})
}
@ -146,7 +145,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
storageReads = append(storageReads, testrand.Hash())
}
if sort {
slices.SortFunc(storageReads, func(a, b [32]byte) int {
slices.SortFunc(storageReads, func(a, b common.Hash) int {
return bytes.Compare(a[:], b[:])
})
}
@ -154,7 +153,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
for i := 0; i < 5; i++ {
balances = append(balances, encodingBalanceChange{
TxIdx: uint16(2 * i),
Balance: [16]byte(testrand.Bytes(16)),
Balance: new(uint256.Int).SetBytes(testrand.Bytes(32)),
})
}
if sort {
@ -175,16 +174,20 @@ func makeTestAccountAccess(sort bool) AccountAccess {
})
}
var encodedStorageReads []*EncodedStorage
for _, slot := range storageReads {
encodedStorageReads = append(encodedStorageReads, newEncodedStorageFromHash(slot))
}
return AccountAccess{
Address: [20]byte(testrand.Bytes(20)),
StorageWrites: storageWrites,
StorageReads: storageReads,
StorageChanges: storageWrites,
StorageReads: encodedStorageReads,
BalanceChanges: balances,
NonceChanges: nonces,
CodeChanges: []encodingCodeChange{
CodeChanges: []CodeChange{
{
TxIndex: 100,
Code: testrand.Bytes(256),
TxIdx: 100,
Code: testrand.Bytes(256),
},
},
}
@ -193,10 +196,10 @@ func makeTestAccountAccess(sort bool) AccountAccess {
func makeTestBAL(sort bool) BlockAccessList {
list := BlockAccessList{}
for i := 0; i < 5; i++ {
list.Accesses = append(list.Accesses, makeTestAccountAccess(sort))
list = append(list, makeTestAccountAccess(sort))
}
if sort {
slices.SortFunc(list.Accesses, func(a, b AccountAccess) int {
slices.SortFunc(list, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:])
})
}
@ -216,9 +219,9 @@ func TestBlockAccessListCopy(t *testing.T) {
}
// Make sure the mutations on copy won't affect the origin
for _, aa := range cpyCpy.Accesses {
for _, aa := range cpyCpy {
for i := 0; i < len(aa.StorageReads); i++ {
aa.StorageReads[i] = [32]byte(testrand.Bytes(32))
aa.StorageReads[i] = &EncodedStorage{new(uint256.Int).SetBytes(testrand.Bytes(32))}
}
}
if !reflect.DeepEqual(list, cpy) {
@ -228,8 +231,9 @@ func TestBlockAccessListCopy(t *testing.T) {
func TestBlockAccessListValidation(t *testing.T) {
// Validate the block access list after RLP decoding
testBALMaxIndex := 8
enc := makeTestBAL(true)
if err := enc.Validate(); err != nil {
if err := enc.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err)
}
var buf bytes.Buffer
@ -241,14 +245,17 @@ func TestBlockAccessListValidation(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)); err != nil {
t.Fatalf("Unexpected RLP-decode error: %v", err)
}
if err := dec.Validate(); err != nil {
if err := dec.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err)
}
// Validate the derived block access list
cBAL := makeTestConstructionBAL()
listB := cBAL.toEncodingObj()
if err := listB.Validate(); err != nil {
cBAL := makeTestConstructionBAL().FinalizedAccesses
listB := cBAL.ToEncodingObj()
if err := listB.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err)
}
}
// BALReader test ideas
// * BAL which doesn't have any pre-tx system contracts should return an empty state diff at idx 0

View file

@ -0,0 +1,32 @@
package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"io"
"os"
"testing"
)
func TestBALDecoding(t *testing.T) {
var (
err error
data []byte
)
data, err = os.ReadFile("blocks_bal_one.rlp")
if err != nil {
t.Fatalf("error opening file: %v", err)
}
reader := bytes.NewReader(data)
stream := rlp.NewStream(reader, 0)
var blocks Block
for i := 0; err == nil; i++ {
fmt.Printf("decode %d\n", i)
err = stream.Decode(&blocks)
if err != nil && err != io.EOF {
t.Fatalf("error decoding blocks: %v", err)
}
fmt.Printf("block number is %d\n", blocks.NumberU64())
}
}

View file

@ -28,6 +28,8 @@ import (
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
@ -99,6 +101,9 @@ type Header struct {
// RequestsHash was added by EIP-7685 and is ignored in legacy headers.
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
// BlockAccessListHash was added by EIP-7928 and is ignored in legacy headers.
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
// SlotNumber was added by EIP-7843 and is ignored in legacy headers.
SlotNumber *uint64 `json:"slotNumber" rlp:"optional"`
}
@ -163,10 +168,8 @@ func (h *Header) SanityCheck() error {
// EmptyBody returns true if there is no additional 'body' to complete the header
// that is: no transactions, no uncles and no withdrawals.
func (h *Header) EmptyBody() bool {
var (
emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash
)
return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals
// quick hack to ensure that we download bodies for empty blocks so that we receive the BALs
return false
}
// EmptyReceipts returns true if there are no receipts for this header/block.
@ -204,6 +207,7 @@ type Block struct {
uncles []*Header
transactions Transactions
withdrawals Withdrawals
accessList *bal.BlockAccessList
// caches
hash atomic.Pointer[common.Hash]
@ -220,7 +224,8 @@ type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@ -284,6 +289,14 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher
return b
}
func NewBlockWithAccessList(header *Header, body *Body, receipts []*Receipt, accessList *bal.BlockAccessList, hasher ListHasher) *Block {
block := NewBlock(header, body, receipts, hasher)
block.accessList = accessList
balHash := accessList.Hash()
block.header.BlockAccessListHash = &balHash
return block
}
// CopyHeader creates a deep copy of a block header.
func CopyHeader(h *Header) *Header {
cpy := *h
@ -329,12 +342,14 @@ func CopyHeader(h *Header) *Header {
// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
var (
eb extblock
)
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
b.header, b.uncles, b.transactions, b.withdrawals, b.accessList = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.AccessList
b.size.Store(rlp.ListSize(size))
return nil
}
@ -346,6 +361,7 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
AccessList: b.accessList,
})
}
@ -358,9 +374,10 @@ func (b *Block) Body() *Body {
// Accessors for body data. These do not return a copy because the content
// of the body slices does not affect the cached hash/size in block.
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
func (b *Block) AccessList() *bal.BlockAccessList { return b.accessList }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
@ -513,6 +530,24 @@ func (b *Block) WithBody(body Body) *Block {
return block
}
// WithAccessList returns a copy of the block with the access list embedded.
// It does not set the access list hash in the header of the returned block.
// TODO: ^ when support for --experimental.bal is removed, this function should set the access list hash in the header
func (b *Block) WithAccessList(accessList *bal.BlockAccessList) *Block {
alCopy := accessList.Copy()
block := &Block{
header: b.header,
transactions: slices.Clone(b.transactions),
uncles: make([]*Header, len(b.uncles)),
withdrawals: slices.Clone(b.withdrawals),
accessList: &alCopy,
}
for i := range b.uncles {
block.uncles[i] = CopyHeader(b.uncles[i])
}
return block
}
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {

View file

@ -16,29 +16,30 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
Hash common.Hash `json:"hash"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@ -62,6 +63,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot
enc.RequestsHash = h.RequestsHash
enc.BlockAccessListHash = h.BlockAccessListHash
enc.SlotNumber = (*hexutil.Uint64)(h.SlotNumber)
enc.Hash = h.Hash()
return json.Marshal(&enc)
@ -70,28 +72,29 @@ func (h Header) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@ -172,6 +175,9 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.RequestsHash != nil {
h.RequestsHash = dec.RequestsHash
}
if dec.BlockAccessListHash != nil {
h.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil {
h.SlotNumber = (*uint64)(dec.SlotNumber)
}

View file

@ -43,8 +43,9 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil
_tmp6 := obj.RequestsHash != nil
_tmp7 := obj.SlotNumber != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
_tmp7 := obj.BlockAccessListHash != nil
_tmp8 := obj.SlotNumber != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@ -54,42 +55,49 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.WithdrawalsHash[:])
}
}
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.BlobGasUsed == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.BlobGasUsed))
}
}
if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.ExcessBlobGas))
}
}
if _tmp5 || _tmp6 || _tmp7 {
if _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.ParentBeaconRoot[:])
}
}
if _tmp6 || _tmp7 {
if _tmp6 || _tmp7 || _tmp8 {
if obj.RequestsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.RequestsHash[:])
}
}
if _tmp7 {
if _tmp7 || _tmp8 {
if obj.BlockAccessListHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.BlockAccessListHash[:])
}
}
if _tmp8 {
if obj.SlotNumber == nil {
w.Write([]byte{0x80})
} else {

View file

@ -489,25 +489,32 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// create creates a new contract using code as deployment code.
func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *uint256.Int, address common.Address, typ OpCode) (ret []byte, createAddress common.Address, leftOverGas uint64, err error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
var nonce uint64
if evm.depth > int(params.CallCreateDepth) {
err = ErrDepth
} else if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
err = ErrInsufficientBalance
} else {
nonce = evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
err = ErrNonceUintOverflow
}
}
if err == nil {
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
}
if evm.Config.Tracer != nil {
evm.captureBegin(evm.depth, typ, caller, address, code, gas, value.ToBig())
defer func(startGas uint64) {
evm.captureEnd(evm.depth, startGas, leftOverGas, ret, err)
}(gas)
}
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
return nil, common.Address{}, gas, ErrDepth
if err != nil {
return nil, common.Address{}, gas, err
}
if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
return nil, common.Address{}, gas, ErrInsufficientBalance
}
nonce := evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
return nil, common.Address{}, gas, ErrNonceUintOverflow
}
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
// Charge the contract creation init gas in verkle mode
if evm.chainRules.IsEIP4762 {
@ -533,6 +540,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address)
storageRoot := evm.StateDB.GetStorageRoot(address)
if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage

View file

@ -374,7 +374,33 @@ func gasExpEIP158(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor
return gas, nil
}
func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
transfersValue = !stack.Back(2).IsZero()
)
if transfersValue {
if evm.readOnly {
return 0, ErrWriteProtection
} else if !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
}
memoryGas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
gas uint64
transfersValue = !stack.Back(2).IsZero()
@ -391,15 +417,22 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
} else if !evm.StateDB.Exist(address) {
gas += params.CallNewAccountGas
}
if transfersValue && !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
memoryGas, err := memoryGasCost(mem, memorySize)
return gas, nil
}
func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
stateless, err := gasCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
stateful, err := gasCallStateful(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
gas, overflow := math.SafeAdd(stateless, stateful)
if overflow {
return 0, ErrGasUintOverflow
}
@ -410,25 +443,41 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasCallCodeStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasCallCodeStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
memoryGas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
var (
gas uint64
overflow bool
gas uint64
overflow bool
transfersValue = !stack.Back(2).IsZero()
)
if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
if transfersValue {
if !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas
}
}
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var overflow bool
gas, err := gasCallCodeStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -440,10 +489,16 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory
}
func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
var (
err error
gas uint64
)
gas, err = gasDelegateCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -455,11 +510,36 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
return gas, nil
}
func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
func gasDelegateCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasDelegateCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
return gas, nil
}
func gasStaticCallStateless(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
}
return gas, nil
}
func gasStaticCallStateful(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return 0, nil
}
func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := gasStaticCallStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return 0, err
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
@ -477,11 +557,16 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
}
var gas uint64
// EIP150 homestead gas reprice fork:
if evm.chainRules.IsEIP150 {
gas = params.SelfdestructGasEIP150
var address = common.Address(stack.Back(0).Bytes20())
if gas > contract.Gas {
return gas, nil
}
if evm.chainRules.IsEIP158 {
// if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {

View file

@ -516,9 +516,6 @@ func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
}
func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())

View file

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
@ -94,5 +95,5 @@ type StateDB interface {
AccessEvents() *state.AccessEvents
// Finalise must be invoked at the end of a transaction
Finalise(bool)
Finalise(bool) bal.StateMutations
}

View file

@ -28,6 +28,8 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch {
case rules.IsVerkle:
return newCancunInstructionSet(), errors.New("verkle-fork not defined yet")
case rules.IsAmsterdam:
return newPragueInstructionSet(), nil
case rules.IsOsaka:
return newOsakaInstructionSet(), nil
case rules.IsPrague:

View file

@ -155,50 +155,12 @@ func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *Stack, mem *Mem
return 0, nil
}
func makeCallVariantGasCallEIP2929(oldCalculator gasFunc, addressPosition int) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
addr := common.Address(stack.Back(addressPosition).Bytes20())
// Check slot presence in the access list
warmAccess := evm.StateDB.AddressInAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
if !warmAccess {
evm.StateDB.AddAddressToAccessList(addr)
// Charge the remaining difference here already, to correctly calculate available
// gas for call
if !contract.UseGas(coldCost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
}
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
gas, err := oldCalculator(evm, contract, stack, mem, memorySize)
if warmAccess || err != nil {
return gas, err
}
// In case of a cold access, we temporarily add the cold charge back, and also
// add it to the returned gas. By adding it to the return, it will be charged
// outside of this function, as part of the dynamic gas, and that will make it
// also become correctly reported to tracers.
contract.Gas += coldCost
var overflow bool
if gas, overflow = math.SafeAdd(gas, coldCost); overflow {
return 0, ErrGasUintOverflow
}
return gas, nil
}
}
var (
gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall, 1)
gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall, 1)
gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall, 1)
gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode, 1)
// TODO: we can use the same functions already defined above for the 7702 gas handlers
gasCallEIP2929 = makeCallVariantGasCall(gasCallStateless, gasCallStateful)
gasDelegateCallEIP2929 = makeCallVariantGasCall(gasDelegateCallStateless, gasDelegateCallStateful)
gasStaticCallEIP2929 = makeCallVariantGasCall(gasStaticCallStateless, gasStaticCallStateful)
gasCallCodeEIP2929 = makeCallVariantGasCall(gasCallCodeStateless, gasCallCodeStateful)
gasSelfdestructEIP2929 = makeSelfdestructGasFn(true)
// gasSelfdestructEIP3529 implements the changes in EIP-3529 (no refunds)
gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
@ -243,6 +205,10 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
return 0, ErrOutOfGas
}
}
if contract.Gas < gas {
return gas, nil
}
// if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
gas += params.CreateBySelfdestructGas
@ -256,33 +222,25 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
}
var (
innerGasCallEIP7702 = makeCallVariantGasCallEIP7702(gasCall)
gasDelegateCallEIP7702 = makeCallVariantGasCallEIP7702(gasDelegateCall)
gasStaticCallEIP7702 = makeCallVariantGasCallEIP7702(gasStaticCall)
gasCallCodeEIP7702 = makeCallVariantGasCallEIP7702(gasCallCode)
gasCallEIP7702 = makeCallVariantGasCall(gasCallStateful, gasCallStateless)
gasDelegateCallEIP7702 = makeCallVariantGasCall(gasDelegateCallStateful, gasDelegateCallStateless)
gasStaticCallEIP7702 = makeCallVariantGasCall(gasStaticCallStateful, gasStaticCallStateless)
gasCallCodeEIP7702 = makeCallVariantGasCall(gasCallCodeStateful, gasCallCodeStateless)
)
func gasCallEIP7702(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
// Return early if this call attempts to transfer value in a static context.
// Although it's checked in `gasCall`, EIP-7702 loads the target's code before
// to determine if it is resolving a delegation. This could incorrectly record
// the target in the block access list (BAL) if the call later fails.
transfersValue := !stack.Back(2).IsZero()
if evm.readOnly && transfersValue {
return 0, ErrWriteProtection
}
return innerGasCallEIP7702(evm, contract, stack, mem, memorySize)
}
func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
func makeCallVariantGasCall(oldCalculatorStateful, oldCalculatorStateless gasFunc) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
total uint64 // total dynamic gas used
addr = common.Address(stack.Back(1).Bytes20())
eip150BaseGas uint64 // gas used for memory expansion, transfer costs -> input to the 63/64 bounding
eip7702Gas uint64
eip2929Gas uint64
addr = common.Address(stack.Back(1).Bytes20())
overflow bool
err error
)
// Check slot presence in the access list
if !evm.StateDB.AddressInAccessList(addr) {
if evm.chainRules.IsEIP2929 && !evm.StateDB.AddressInAccessList(addr) {
evm.StateDB.AddAddressToAccessList(addr)
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
@ -292,44 +250,87 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
if !contract.UseGas(coldCost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
total += coldCost
eip2929Gas = coldCost
}
// Check if code is a delegation and if so, charge for resolution.
if target, ok := types.ParseDelegation(evm.StateDB.GetCode(addr)); ok {
var cost uint64
if evm.StateDB.AddressInAccessList(target) {
cost = params.WarmStorageReadCostEIP2929
} else {
evm.StateDB.AddAddressToAccessList(target)
cost = params.ColdAccountAccessCostEIP2929
}
if !contract.UseGas(cost, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
total += cost
}
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
old, err := oldCalculator(evm, contract, stack, mem, memorySize)
eip150BaseGas, err = oldCalculatorStateless(evm, contract, stack, mem, memorySize)
if err != nil {
return old, err
return 0, err
}
// ensure the portion of the call cost which doesn't depend on state lookups
// is covered by the provided gas
if contract.Gas < eip150BaseGas {
return 0, ErrOutOfGas
}
oldStateful, err := oldCalculatorStateful(evm, contract, stack, mem, memorySize)
if err != nil {
return oldStateful, err
}
// this should cause BAL test failures if uncommented
baseCost, overflow := math.SafeAdd(eip150BaseGas, oldStateful)
if overflow {
return 0, ErrGasUintOverflow
} else if contract.Gas < baseCost {
return 0, ErrOutOfGas
}
if eip150BaseGas, overflow = math.SafeAdd(eip150BaseGas, oldStateful); overflow {
return 0, ErrOutOfGas
}
if evm.chainRules.IsPrague {
// Check if code is a delegation and if so, charge for resolution.
if target, ok := types.ParseDelegation(evm.StateDB.GetCode(addr)); ok {
if evm.StateDB.AddressInAccessList(target) {
eip7702Gas = params.WarmStorageReadCostEIP2929
} else {
evm.StateDB.AddAddressToAccessList(target)
eip7702Gas = params.ColdAccountAccessCostEIP2929
}
if !contract.UseGas(eip7702Gas, evm.Config.Tracer, tracing.GasChangeCallStorageColdAccess) {
return 0, ErrOutOfGas
}
}
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, eip150BaseGas, stack.Back(0))
if err != nil {
return 0, err
}
// TODO: it's not clear what happens if there is enough gas to cover the stateless component
// but not enough to cover the whole call: do all the state reads happen in this case, and
// we fail at the very end?
// Temporarily add the gas charge back to the contract and return value. By
// adding it to the return, it will be charged outside of this function, as
// part of the dynamic gas. This will ensure it is correctly reported to
// tracers.
contract.Gas += total
var overflow bool
if total, overflow = math.SafeAdd(old, total); overflow {
contract.Gas, overflow = math.SafeAdd(contract.Gas, eip2929Gas)
if overflow {
return 0, ErrGasUintOverflow
}
return total, nil
contract.Gas, overflow = math.SafeAdd(contract.Gas, eip7702Gas)
if overflow {
return 0, ErrGasUintOverflow
}
var totalCost uint64
totalCost, overflow = math.SafeAdd(eip2929Gas, eip7702Gas)
if overflow {
return 0, ErrGasUintOverflow
}
totalCost, overflow = math.SafeAdd(totalCost, evm.callGasTemp)
if overflow {
return 0, ErrGasUintOverflow
}
totalCost, overflow = math.SafeAdd(totalCost, eip150BaseGas)
if overflow {
return 0, ErrGasUintOverflow
}
return totalCost, nil
}
}

View file

@ -19,6 +19,7 @@ package eth
import (
"context"
"errors"
"fmt"
"math/big"
"time"
@ -499,3 +500,22 @@ func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration {
func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration {
return b.eth.config.TxSyncMaxTimeout
}
// GetBlockAccessList returns a block access list for the given number/hash
// or nil if one does not exist.
func (b *EthAPIBackend) BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error) {
var block *types.Block
if num := number.BlockNumber; num != nil {
block = b.eth.blockchain.GetBlockByNumber(uint64(num.Int64()))
} else if hash := number.BlockHash; hash != nil {
block = b.eth.blockchain.GetBlockByHash(*hash)
}
if block == nil {
return nil, fmt.Errorf("block not found")
}
if block.AccessList() == nil {
return nil, nil
}
return block.AccessList().StringableRepresentation(), nil
}

View file

@ -275,6 +275,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
overrides.OverrideVerkle = config.OverrideVerkle
}
options.Overrides = &overrides
options.BALExecutionMode = config.BALExecutionMode
eth.blockchain, err = core.NewBlockChain(chainDb, config.Genesis, eth.engine, options)
if err != nil {

View file

@ -201,9 +201,13 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root")
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5):
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5, forks.Amsterdam):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads")
}
if api.checkFork(params.Timestamp, forks.Amsterdam) {
return api.forkchoiceUpdated(update, params, engine.PayloadV4, false)
}
}
// TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up
@ -498,6 +502,7 @@ func (api *ConsensusAPI) GetPayloadV6(payloadID engine.PayloadID) (*engine.Execu
//
// Note passing nil `forks`, `versions` disables the respective check.
func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
if versions != nil && !payloadID.Is(versions...) {
return nil, engine.UnsupportedFork
@ -750,6 +755,8 @@ func (api *ConsensusAPI) NewPayloadV5(ctx context.Context, params engine.Executa
return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague")
case params.BlockAccessList == nil:
return invalidStatus, paramsErr("nil block access list post-amsterdam")
case params.SlotNumber == nil:
return invalidStatus, paramsErr("nil slotnumber post-amsterdam")
case !api.checkFork(params.Timestamp, forks.Amsterdam):
@ -759,7 +766,7 @@ func (api *ConsensusAPI) NewPayloadV5(ctx context.Context, params engine.Executa
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(ctx, params, versionedHashes, beaconRoot, requests, false)
return api.newPayload(context.Background(), params, versionedHashes, beaconRoot, requests, false)
}
func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (result engine.PayloadStatusV1, err error) {
@ -1180,6 +1187,10 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBody {
result.Withdrawals = []*types.Withdrawal{}
}
if block.AccessList() != nil {
result.AccessList = block.AccessList()
}
return &result
}

View file

@ -209,6 +209,8 @@ type Config struct {
// RangeLimit restricts the maximum range (end - start) for range queries.
RangeLimit uint64 `toml:",omitempty"`
BALExecutionMode int
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -68,6 +68,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
TxSyncMaxTimeout time.Duration `toml:",omitempty"`
RangeLimit uint64 `toml:",omitempty"`
BALExecutionMode int
}
var enc Config
enc.Genesis = c.Genesis
@ -121,6 +122,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout
enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout
enc.RangeLimit = c.RangeLimit
enc.BALExecutionMode = c.BALExecutionMode
return &enc, nil
}
@ -178,6 +180,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TxSyncDefaultTimeout *time.Duration `toml:",omitempty"`
TxSyncMaxTimeout *time.Duration `toml:",omitempty"`
RangeLimit *uint64 `toml:",omitempty"`
BALExecutionMode *int
}
var dec Config
if err := unmarshal(&dec); err != nil {
@ -336,5 +339,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.RangeLimit != nil {
c.RangeLimit = *dec.RangeLimit
}
if dec.BALExecutionMode != nil {
c.BALExecutionMode = *dec.BALExecutionMode
}
return nil
}

View file

@ -1055,7 +1055,7 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
// Call Prepare to clear out the statedb access list
statedb.SetTxContext(txctx.TxHash, txctx.TxIndex)
_, err = core.ApplyTransactionWithEVM(message, core.NewGasPool(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, vmctx.Time, tx, evm)
_, _, err = core.ApplyTransactionWithEVM(message, core.NewGasPool(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, vmctx.Time, tx, evm)
if err != nil {
return nil, fmt.Errorf("tracing failed: %w", err)
}

View file

@ -1007,6 +1007,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
if block.AccessList() != nil {
fields["accessList"] = block.AccessList()
}
return fields
}
@ -1378,6 +1381,18 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
}
}
// GetBlockAccessListByBlockNumber returns a block access list for the given block number
// or nil if one does not exist.
func (api *BlockChainAPI) GetBlockAccessListByBlockNumber(number rpc.BlockNumber) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockNumber: &number})
}
// GetBlockAccessListByBlockHash returns a block access list for the given block hash
// or nil if one does not exist.
func (api *BlockChainAPI) GetBlockAccessListByBlockHash(hash common.Hash) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockHash: &hash})
}
// TransactionAPI exposes methods for reading and creating transaction data.
type TransactionAPI struct {
b Backend

View file

@ -74,6 +74,7 @@ type Backend interface {
GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error)
// Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error

View file

@ -354,11 +354,11 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
return nil, nil, nil, err
}
// EIP-7002
if err := core.ProcessWithdrawalQueue(&requests, evm); err != nil {
if _, err := core.ProcessWithdrawalQueue(&requests, evm); err != nil {
return nil, nil, nil, err
}
// EIP-7251
if err := core.ProcessConsolidationQueue(&requests, evm); err != nil {
if _, err := core.ProcessConsolidationQueue(&requests, evm); err != nil {
return nil, nil, nil, err
}
}
@ -372,7 +372,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
Withdrawals: *block.BlockOverrides.Withdrawals,
}
chainHeadReader := &simChainHeadReader{ctx, sim.b}
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts)
b, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts, nil)
if err != nil {
return nil, nil, nil, err
}

View file

@ -474,6 +474,11 @@ web3._extend({
params: 1,
inputFormatter: [null],
}),
new web3._extend.Method({
name: 'getEncodedBlockAccessList',
call: 'debug_getEncodedBlockAccessList',
params: 1
}),
],
properties: []
});
@ -611,7 +616,17 @@ web3._extend({
name: 'config',
call: 'eth_config',
params: 0,
})
}),
new web3._extend.Method({
name: 'getBlockAccessListByBlockNumber',
call: 'eth_getBlockAccessListByBlockNumber',
params: 1,
}),
new web3._extend.Method({
name: 'getBlockAccessListByBlockHash',
call: 'eth_getBlockAccessListByBlockHash',
params: 1,
}),
],
properties: [
new web3._extend.Property({

View file

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -70,7 +71,8 @@ type environment struct {
sidecars []*types.BlobTxSidecar
blobs int
witness *stateless.Witness
witness *stateless.Witness
accessList bal.ConstructionBlockAccessList
}
// txFits reports whether the transaction fits into the block size limit.
@ -168,7 +170,10 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
}
// Collect consensus-layer requests if Prague is enabled.
var requests [][]byte
var (
requests [][]byte
postMut = make(bal.StateMutations)
)
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
requests = [][]byte{}
// EIP-6110 deposits
@ -176,23 +181,47 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
return &newPayloadResult{err: err}
}
// EIP-7002
if err := core.ProcessWithdrawalQueue(&requests, work.evm); err != nil {
mut, err := core.ProcessWithdrawalQueue(&requests, work.evm)
if err != nil {
return &newPayloadResult{err: err}
}
postMut.Merge(mut)
// EIP-7251 consolidations
if err := core.ProcessConsolidationQueue(&requests, work.evm); err != nil {
mut, err = core.ProcessConsolidationQueue(&requests, work.evm)
if err != nil {
return &newPayloadResult{err: err}
}
postMut.Merge(mut)
work.accessList.AccumulateMutations(postMut, uint16(work.tcount)+1)
work.accessList.AccumulateReads(work.state.Reader().(state.StateReaderTracker).GetStateAccessList())
}
if requests != nil {
reqHash := types.CalcRequestsHash(requests)
work.header.RequestsHash = &reqHash
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
// set the block access list on the body after the block has finished executing
// but before the header hash is computed (in FinalizeAndAssemble).
//
// I considered trying to instantiate the beacon consensus engine with a tracer.
// however, the BAL tracer instance is used once per block, while the engine object
// lives for the entire time the client is running.
var onBlockFinalization func(mutations bal.StateMutations) *bal.BlockAccessList
if miner.chainConfig.IsAmsterdam(work.header.Number, work.header.Time) {
onBlockFinalization = func(withdrawalMut bal.StateMutations) *bal.BlockAccessList {
work.accessList.AccumulateMutations(withdrawalMut, uint16(work.tcount)+1)
work.accessList.AccumulateReads(work.state.Reader().(state.StateReaderTracker).GetStateAccessList())
return work.accessList.ToEncodingObj()
}
}
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts, onBlockFinalization)
if err != nil {
return &newPayloadResult{err: err}
}
return &newPayloadResult{
block: block,
fees: totalFees(block, work.receipts),
@ -287,50 +316,68 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
log.Error("Failed to create sealing context", "err", err)
return nil, err
}
mut := make(bal.StateMutations)
if header.ParentBeaconRoot != nil {
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm)
mut.Merge(core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm))
}
if miner.chainConfig.IsPrague(header.Number, header.Time) {
core.ProcessParentBlockHash(header.ParentHash, env.evm)
mut.Merge(core.ProcessParentBlockHash(header.ParentHash, env.evm))
}
env.accessList.AccumulateMutations(mut, 0)
return env, nil
}
// makeEnv creates a new environment for the sealing block.
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top.
state, err := miner.chain.StateAt(parent.Root)
sdb, err := miner.chain.StateAt(parent.Root)
if err != nil {
return nil, err
}
var accessListBuilder bal.ConstructionBlockAccessList
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
accessListBuilder = make(bal.ConstructionBlockAccessList)
sdb = sdb.WithReader(state.NewReaderWithTracker(sdb.Reader()))
}
if witness {
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
panic("fix this edge case so that the prefetcher invocation below doesn't populate the readset for constructing the BAL")
}
bundle, err := stateless.NewWitness(header, miner.chain)
if err != nil {
return nil, err
}
state.StartPrefetcher("miner", bundle, nil)
sdb.StartPrefetcher("miner", bundle, nil)
}
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: state,
size: uint64(header.Size()),
coinbase: coinbase,
gasPool: core.NewGasPool(header.GasLimit),
header: header,
witness: state.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), state, miner.chainConfig, vm.Config{}),
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: sdb,
size: uint64(header.Size()),
coinbase: coinbase,
gasPool: core.NewGasPool(header.GasLimit),
header: header,
witness: sdb.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), sdb, miner.chainConfig, vm.Config{}),
accessList: accessListBuilder,
}, nil
}
func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) error {
var (
errAccessListOversized = errors.New("access list oversized")
)
func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) (err error) {
if tx.Type() == types.BlobTxType {
return miner.commitBlobTransaction(env, tx)
}
receipt, err := miner.applyTransaction(env, tx)
if err != nil {
return err
}
env.txs = append(env.txs, tx)
env.receipts = append(env.receipts, receipt)
env.size += tx.Size()
@ -338,7 +385,7 @@ func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction) e
return nil
}
func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) error {
func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) (err error) {
sc := tx.BlobTxSidecar()
if sc == nil {
panic("blob transaction without blobs in miner")
@ -372,11 +419,41 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
snap = env.state.Snapshot()
gp = env.gasPool.Snapshot()
)
receipt, err := core.ApplyTransaction(env.evm, env.gasPool, env.state, env.header, tx)
var stateCopy *state.StateDB
var prevReader state.Reader
if env.accessList != nil {
prevReader = env.state.Reader()
stateCopy = env.state.WithReader(state.NewReaderWithTracker(env.state.Reader()))
env.evm.StateDB = stateCopy
}
mutations, receipt, err := core.ApplyTransaction(env.evm, env.gasPool, stateCopy, env.header, tx)
if err != nil {
env.state.RevertToSnapshot(snap)
if env.accessList != nil {
// transaction couldn't be applied. reset env state to what it was before
env.state = env.state.WithReader(prevReader)
env.evm.StateDB = env.state
} else {
env.state.RevertToSnapshot(snap)
}
env.gasPool.Set(gp)
return nil, err
}
if env.accessList != nil {
al := env.accessList.Copy()
al.AccumulateMutations(mutations, uint16(env.tcount)+1)
al.AccumulateReads(stateCopy.Reader().(state.StateReaderTracker).GetStateAccessList())
if env.size+tx.Size()+uint64(al.ToEncodingObj().EncodedSize()) >= params.MaxBlockSize-maxBlockSizeBufferZone {
env.gasPool.Set(gp)
// transaction couldn't be applied. reset env state to what it was before
env.state = env.state.WithReader(prevReader)
env.evm.StateDB = env.state
return nil, errAccessListOversized
}
env.state = stateCopy.WithReader(prevReader)
env.evm.StateDB = env.state
env.accessList = al
}
env.header.GasUsed = env.gasPool.Used()
return receipt, nil
@ -384,6 +461,7 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error {
isCancun := miner.chainConfig.IsCancun(env.header.Number, env.header.Time)
loop:
for {
// Check interruption signal and abort building if it's fired.
if interrupt != nil {
@ -482,7 +560,12 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
txs.Shift()
case errors.Is(err, errAccessListOversized):
// Transaction can't be applied because it would cause the block to be oversized due to the
// contribution of the state accesses/modifications it makes.
// terminate the payload construction as it's not guaranteed we will be able to find a transaction
// that can fit in a short amount of time.
break loop
default:
// Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause.

View file

@ -236,6 +236,8 @@ var (
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
Osaka: DefaultOsakaBlobConfig,
BPO1: DefaultBPO1BlobConfig,
BPO2: DefaultBPO2BlobConfig,
},
}
@ -1015,9 +1017,11 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
}
if cur.timestamp != nil {
// If the fork is configured, a blob schedule must be defined for it.
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
/*
if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
}
*/
}
}
return nil
@ -1172,6 +1176,9 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
// BlobConfig returns the blob config associated with the provided fork.
func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig {
switch fork {
case forks.Amsterdam:
// TODO: (????)
return c.BlobScheduleConfig.BPO2
case forks.BPO5:
return c.BlobScheduleConfig.BPO5
case forks.BPO4:
@ -1217,6 +1224,8 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre
// the fork isn't defined or isn't a time-based fork.
func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 {
switch {
case fork == forks.Amsterdam:
return c.AmsterdamTime
case fork == forks.BPO5:
return c.BPO5Time
case fork == forks.BPO4:

View file

@ -25,6 +25,7 @@ import (
"io"
"math/big"
"reflect"
"runtime/debug"
"strings"
"sync"
@ -672,6 +673,7 @@ func (s *Stream) ReadBytes(b []byte) error {
return nil
case String:
if uint64(len(b)) != size {
debug.PrintStack()
return fmt.Errorf("input value has wrong size %d, want %d", size, len(b))
}
if err = s.readFull(b); err != nil {

View file

@ -24,6 +24,65 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
)
func TestBlockchainBAL(t *testing.T) {
bt := new(testMatcher)
// We are running most of GeneralStatetests to tests witness support, even
// though they are ran as state tests too. Still, the performance tests are
// less about state andmore about EVM number crunching, so skip those.
bt.skipLoad(`^GeneralStateTests/VMTests/vmPerformance`)
// Skip random failures due to selfish mining test
bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`)
// Slow tests
bt.slow(`.*bcExploitTest/DelegateCallSpam.json`)
bt.slow(`.*bcExploitTest/ShanghaiLove.json`)
bt.slow(`.*bcExploitTest/SuicideIssue.json`)
bt.slow(`.*/bcForkStressTest/`)
bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`)
bt.slow(`.*/bcWalletTest/`)
// Very slow test
bt.skipLoad(`.*/stTimeConsuming/.*`)
// test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range,
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
// After the merge we would accept side chains as canonical even if they have lower td
bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
// With chain history removal, TDs become unavailable, this transition tests based on TTD are unrunnable
bt.skipLoad(`.*bcArrowGlacierToParis/powToPosBlockRejection.json`)
// This directory contains no test.
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
execBlockTest(t, bt, test, true)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
func TestBlockchain(t *testing.T) {
bt := new(testMatcher)
@ -67,17 +126,16 @@ func TestBlockchain(t *testing.T) {
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
execBlockTest(t, bt, test, false)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
if !common.FileExist(executionSpecBlockchainTestDir) {
t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir)
func testExecutionSpecBlocktests(t *testing.T, testDir string) {
if !common.FileExist(testDir) {
t.Skipf("directory %s does not exist", testDir)
}
bt := new(testMatcher)
@ -85,12 +143,24 @@ func TestExecutionSpecBlocktests(t *testing.T) {
bt.skipLoad(".*prague/eip7251_consolidations/test_system_contract_deployment.json")
bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json")
bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
bt.walk(t, testDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test, true)
})
}
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBlockchainTestDir)
}
// TestExecutionSpecBlocktestsBAL runs the BAL release test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktestsBAL(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBALBlockchainTestDir)
}
var failures = 0
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest, buildAndVerifyBAL bool) {
// Define all the different flag combinations we should run the tests with,
// picking only one for short tests.
//
@ -106,7 +176,13 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
}
for _, snapshot := range snapshotConf {
for _, dbscheme := range dbschemeConf {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, nil, nil)); err != nil {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, buildAndVerifyBAL, nil, nil)); err != nil {
failures++
/*
if failures > 10 {
panic("adsf")
}
*/
t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err)
return
}

View file

@ -22,11 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
stdmath "math"
"math/big"
"os"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
@ -37,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -44,6 +40,11 @@ import (
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
stdmath "math"
"math/big"
"os"
"reflect"
"strings"
)
// A BlockTest checks handling of entire blocks.
@ -71,6 +72,7 @@ type btBlock struct {
ExpectException string
Rlp string
UncleHeaders []*btHeader
AccessList *bal.BlockAccessList `json:"blockAccessList,omitempty"`
}
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
@ -97,6 +99,7 @@ type btHeader struct {
BlobGasUsed *uint64
ExcessBlobGas *uint64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *uint64
}
@ -113,27 +116,20 @@ type btHeaderMarshaling struct {
SlotNumber *math.HexOrDecimal64
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
func (t *BlockTest) createTestBlockChain(config *params.ChainConfig, snapshotter bool, scheme string, witness, createAndVerifyBAL bool, tracer *tracing.Hooks) (*core.BlockChain, error) {
// import pre accounts & construct test genesis block & state root
// Commit genesis state
var (
gspec = t.genesis(config)
db = rawdb.NewMemoryDatabase()
tconf = &triedb.Config{
Preimages: true,
IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp,
}
)
if scheme == rawdb.PathScheme || tconf.IsVerkle {
if scheme == rawdb.PathScheme {
tconf.PathDB = pathdb.Defaults
} else {
tconf.HashDB = hashdb.Defaults
}
gspec := t.genesis(config)
// if ttd is not specified, set an arbitrary huge value
if gspec.Config.TerminalTotalDifficulty == nil {
@ -142,15 +138,15 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb, nil)
if err != nil {
return err
return nil, err
}
triedb.Close() // close the db to prevent memory leak
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
return nil, fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
if gblock.Root() != t.json.Genesis.StateRoot {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
return nil, fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
}
// Wrap the original engine within the beacon-engine
engine := beacon.New(ethash.NewFaker())
@ -164,12 +160,27 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
Tracer: tracer,
StatelessSelfValidation: witness,
},
NoPrefetch: true,
}
if snapshotter {
options.SnapshotLimit = 1
options.SnapshotWait = true
}
chain, err := core.NewBlockChain(db, gspec, engine, options)
if err != nil {
return nil, err
}
return chain, nil
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, createAndVerifyBAL bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
chain, err := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
if err != nil {
return err
}
@ -203,7 +214,50 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
}
}
}
return t.validateImportedHeaders(chain, validBlocks)
err = t.validateImportedHeaders(chain, validBlocks)
if err != nil {
return err
}
if createAndVerifyBAL {
newChain, _ := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
defer newChain.Stop()
var blocksWithBAL types.Blocks
for i := uint64(1); i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := chain.GetBlockByNumber(i)
if chain.Config().IsAmsterdam(block.Number(), block.Time()) && block.AccessList() == nil {
return fmt.Errorf("block %d missing BAL", block.NumberU64())
}
blocksWithBAL = append(blocksWithBAL, block)
}
amt, err := newChain.InsertChain(blocksWithBAL)
if err != nil {
return err
}
_ = amt
newDB, err := newChain.State()
if err != nil {
return err
}
if err = t.validatePostState(newDB); err != nil {
return fmt.Errorf("post state validation failed: %v", err)
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
if newChain.Snapshots() != nil {
if err := chain.Snapshots().Verify(chain.CurrentBlock().Root); err != nil {
return err
}
}
}
err = t.validateImportedHeaders(newChain, validBlocks)
if err != nil {
return err
}
}
return nil
}
// Network returns the network/fork name for this test.
@ -213,20 +267,21 @@ func (t *BlockTest) Network() string {
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
BlockAccessListHash: t.json.Genesis.BlockAccessListHash,
}
}
@ -256,6 +311,16 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
return nil, fmt.Errorf("block RLP decoding failed when expected to succeed: %v", err)
}
}
// check that if we encode the same block, it will result in the same RLP
var enc bytes.Buffer
if err := rlp.Encode(&enc, cb); err != nil {
return nil, err
}
expected := common.Hex2Bytes(strings.TrimLeft(b.Rlp, "0x"))
if !bytes.Equal(enc.Bytes(), expected) {
return nil, fmt.Errorf("mismatch. expected\n%s\ngot\n%x\n", expected, enc.Bytes())
}
// RLP decoding worked, try to insert into chain:
blocks := types.Blocks{cb}
i, err := blockchain.InsertChain(blocks)
@ -268,7 +333,7 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
}
if b.BlockHeader == nil {
if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil {
fmt.Fprintf(os.Stdout, "block (index %d) insertion should have failed due to: %v:\n%v\n",
fmt.Fprintf(os.Stderr, "block (index %d) insertion should have failed due to: %v:\n%v\n",
bi, b.ExpectException, string(data))
}
return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v",

View file

@ -38,6 +38,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *math.HexOrDecimal64
}
var enc btHeader
@ -62,6 +63,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas)
enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot
enc.BlockAccessListHash = b.BlockAccessListHash
enc.SlotNumber = (*math.HexOrDecimal64)(b.SlotNumber)
return json.Marshal(&enc)
}
@ -90,6 +92,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *math.HexOrDecimal64
}
var dec btHeader
@ -159,6 +162,9 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil {
b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
if dec.BlockAccessListHash != nil {
b.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil {
b.SlotNumber = (*uint64)(dec.SlotNumber)
}

View file

@ -493,6 +493,70 @@ var Forks = map[string]*params.ChainConfig{
BPO1: bpo1BlobConfig,
},
},
"Amsterdam": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(0),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"BPO2ToAmsterdamAtTime15k": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(15_000),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"OsakaToBPO1AtTime15k": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),

View file

@ -34,17 +34,18 @@ import (
)
var (
baseDir = filepath.Join(".", "testdata")
blockTestDir = filepath.Join(baseDir, "BlockchainTests")
stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests")
executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
baseDir = filepath.Join(".", "testdata")
blockTestDir = filepath.Join(baseDir, "BlockchainTests")
stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests")
executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
executionSpecBALBlockchainTestDir = filepath.Join(".", "spec-tests-bal", "fixtures", "blockchain_tests")
)
func readJSON(reader io.Reader, value interface{}) error {

View file

@ -239,6 +239,14 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
return t.root.Get(GetBinaryTreeKeyStorageSlot(addr, key), t.nodeResolver)
}
func (t *BinaryTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *BinaryTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// UpdateAccount updates the account information for the given address.
func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var (

View file

@ -210,6 +210,29 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil
}
// UpdateStorageBatch attempts to update a list storages in the batch manner.
func (t *StateTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
var (
hkeys = make([][]byte, 0, len(keys))
evals = make([][]byte, 0, len(values))
)
for _, key := range keys {
hk := crypto.Keccak256(key)
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = key
}
hkeys = append(hkeys, hk)
}
for _, val := range values {
data, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
evals = append(evals, data)
}
return t.trie.UpdateBatch(hkeys, evals)
}
// UpdateAccount will abstract the write of an account to the secure trie.
func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := crypto.Keccak256(address.Bytes())
@ -226,6 +249,29 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
func (t *StateTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
var (
hkeys = make([][]byte, 0, len(addresses))
values = make([][]byte, 0, len(accounts))
)
for _, addr := range addresses {
hk := crypto.Keccak256(addr.Bytes())
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = addr.Bytes()
}
hkeys = append(hkeys, hk)
}
for _, acc := range accounts {
data, err := rlp.EncodeToBytes(acc)
if err != nil {
return err
}
values = append(values, data)
}
return t.trie.UpdateBatch(hkeys, values)
}
func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil
}

View file

@ -33,12 +33,10 @@ import (
// while the latter is inserted/deleted in order to follow the rule of trie.
// This tool can track all of them no matter the node is embedded in its
// parent or not, but valueNode is never tracked.
//
// Note opTracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type opTracer struct {
inserts map[string]struct{}
deletes map[string]struct{}
lock sync.RWMutex
}
// newOpTracer initializes the tracer for capturing trie changes.
@ -53,6 +51,9 @@ func newOpTracer() *opTracer {
// in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched".
func (t *opTracer) onInsert(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path))
return
@ -64,6 +65,9 @@ func (t *opTracer) onInsert(path []byte) {
// in the addition set, then just wipe it from the addition set
// as it's untouched.
func (t *opTracer) onDelete(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path))
return
@ -73,12 +77,18 @@ func (t *opTracer) onDelete(path []byte) {
// reset clears the content tracked by tracer.
func (t *opTracer) reset() {
t.lock.Lock()
defer t.lock.Unlock()
clear(t.inserts)
clear(t.deletes)
}
// copy returns a deep copied tracer instance.
func (t *opTracer) copy() *opTracer {
t.lock.RLock()
defer t.lock.RUnlock()
return &opTracer{
inserts: maps.Clone(t.inserts),
deletes: maps.Clone(t.deletes),
@ -87,6 +97,9 @@ func (t *opTracer) copy() *opTracer {
// deletedList returns a list of node paths which are deleted from the trie.
func (t *opTracer) deletedList() [][]byte {
t.lock.RLock()
defer t.lock.RUnlock()
paths := make([][]byte, 0, len(t.deletes))
for path := range t.deletes {
paths = append(paths, []byte(path))

View file

@ -49,6 +49,14 @@ func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bo
}
}
func (t *TransitionTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *TransitionTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// Base returns the base trie.
func (t *TransitionTrie) Base() *trie.SecureTrie {
return t.base

View file

@ -480,6 +480,72 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
}
}
// UpdateBatch updates a batch of entries concurrently.
func (t *Trie) UpdateBatch(keys [][]byte, values [][]byte) error {
// Short circuit if the trie is already committed and unusable.
if t.committed {
return ErrCommitted
}
if len(keys) != len(values) {
return fmt.Errorf("keys and values length mismatch: %d != %d", len(keys), len(values))
}
// Insert the entries sequentially if there are not too many
// trie nodes in the trie.
fn, ok := t.root.(*fullNode)
if !ok || len(keys) < 4 { // TODO(rjl493456442) the parallelism threshold should be twisted
for i, key := range keys {
err := t.Update(key, values[i])
if err != nil {
return err
}
}
return nil
}
var (
ikeys = make(map[byte][][]byte)
ivals = make(map[byte][][]byte)
eg errgroup.Group
)
for i, key := range keys {
hkey := keybytesToHex(key)
ikeys[hkey[0]] = append(ikeys[hkey[0]], hkey)
ivals[hkey[0]] = append(ivals[hkey[0]], values[i])
}
if len(keys) > 0 {
fn.flags = t.newFlag()
}
for p, k := range ikeys {
pos := p
ks := k
eg.Go(func() error {
vs := ivals[pos]
for i, k := range ks {
if len(vs[i]) != 0 {
_, n, err := t.insert(fn.Children[pos], []byte{pos}, k[1:], valueNode(vs[i]))
if err != nil {
return err
}
fn.Children[pos] = n
} else {
_, n, err := t.delete(fn.Children[pos], []byte{pos}, k[1:])
if err != nil {
return err
}
fn.Children[pos] = n
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
t.unhashed += len(keys)
t.uncommitted += len(keys)
return nil
}
// MustDelete is a wrapper of Delete and will omit any encountered error but
// just print out an error message.
func (t *Trie) MustDelete(key []byte) {

View file

@ -1500,82 +1500,56 @@ func testTrieCopyNewTrie(t *testing.T, entries []kv) {
}
}
// goos: darwin
// goarch: arm64
// pkg: github.com/ethereum/go-ethereum/trie
// cpu: Apple M1 Pro
// BenchmarkTriePrefetch
// BenchmarkTriePrefetch-8 9961 100706 ns/op
func BenchmarkTriePrefetch(b *testing.B) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
tr := NewEmpty(db)
vals := make(map[string]*kv)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
}
tr.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
}
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
func TestUpdateBatch(t *testing.T) {
testUpdateBatch(t, []kv{
{k: []byte("do"), v: []byte("verb")},
{k: []byte("ether"), v: []byte("wookiedoo")},
{k: []byte("horse"), v: []byte("stallion")},
{k: []byte("shaman"), v: []byte("horse")},
{k: []byte("doge"), v: []byte("coin")},
{k: []byte("dog"), v: []byte("puppy")},
})
for i := 0; i < b.N; i++ {
tr, err := New(TrieID(root), db)
if err != nil {
b.Fatalf("Failed to open the trie")
}
var keys [][]byte
for k := range vals {
keys = append(keys, []byte(k))
if len(keys) > 64 {
break
}
}
tr.Prefetch(keys)
var entries []kv
for i := 0; i < 256; i++ {
entries = append(entries, kv{k: testrand.Bytes(32), v: testrand.Bytes(32)})
}
testUpdateBatch(t, entries)
}
// goos: darwin
// goarch: arm64
// pkg: github.com/ethereum/go-ethereum/trie
// cpu: Apple M1 Pro
// BenchmarkTrieSeqPrefetch
// BenchmarkTrieSeqPrefetch-8 12879 96710 ns/op
func BenchmarkTrieSeqPrefetch(b *testing.B) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
tr := NewEmpty(db)
vals := make(map[string]*kv)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
}
tr.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
func testUpdateBatch(t *testing.T, entries []kv) {
var (
base = NewEmpty(nil)
keys [][]byte
vals [][]byte
)
for _, entry := range entries {
base.Update(entry.k, entry.v)
keys = append(keys, entry.k)
vals = append(vals, entry.v)
}
for i := 0; i < 10; i++ {
k, v := testrand.Bytes(32), testrand.Bytes(32)
base.Update(k, v)
keys = append(keys, k)
vals = append(vals, v)
}
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
for i := 0; i < b.N; i++ {
tr, err := New(TrieID(root), db)
if err != nil {
b.Fatalf("Failed to open the trie")
}
var keys [][]byte
for k := range vals {
keys = append(keys, []byte(k))
if len(keys) > 64 {
break
}
}
for _, k := range keys {
tr.Get(k)
cmp := NewEmpty(nil)
if err := cmp.UpdateBatch(keys, vals); err != nil {
t.Fatalf("Failed to update batch, %v", err)
}
// Traverse the original tree, the changes made on the copy one shouldn't
// affect the old one
for _, key := range keys {
v1, _ := base.Get(key)
v2, _ := cmp.Get(key)
if !bytes.Equal(v1, v2) {
t.Errorf("Unexpected data, key: %v, want: %v, got: %v", key, v1, v2)
}
}
if base.Hash() != cmp.Hash() {
t.Errorf("Hash mismatch: want %x, got %x", base.Hash(), cmp.Hash())
}
}

View file

@ -18,7 +18,6 @@ package triedb
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"