all: implement eip 7928 block access lists

This commit is contained in:
Jared Wasinger 2026-03-02 23:13:16 -05:00 committed by Marius van der Wijden
parent da0d7b575a
commit 5808d212bb
78 changed files with 4599 additions and 1077 deletions

View file

@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
) )
var _ = (*executableDataMarshaling)(nil) var _ = (*executableDataMarshaling)(nil)
@ -34,6 +35,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"` SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var enc ExecutableData var enc ExecutableData
@ -59,6 +61,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.BlockAccessList = e.BlockAccessList
enc.SlotNumber = (*hexutil.Uint64)(e.SlotNumber) enc.SlotNumber = (*hexutil.Uint64)(e.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -83,6 +86,7 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"` SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var dec ExecutableData var dec ExecutableData
@ -157,6 +161,9 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil { if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
} }
if dec.BlockAccessList != nil {
e.BlockAccessList = dec.BlockAccessList
}
if dec.SlotNumber != nil { if dec.SlotNumber != nil {
e.SlotNumber = (*uint64)(dec.SlotNumber) e.SlotNumber = (*uint64)(dec.SlotNumber)
} }

View file

@ -18,6 +18,7 @@ package engine
import ( import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big" "math/big"
"slices" "slices"
@ -99,6 +100,7 @@ type ExecutableData struct {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"` BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"` ExcessBlobGas *uint64 `json:"excessBlobGas"`
BlockAccessList *bal.BlockAccessList `json:"blockAccessList"`
SlotNumber *uint64 `json:"slotNumber"` SlotNumber *uint64 `json:"slotNumber"`
} }
@ -303,6 +305,8 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
requestsHash = &h requestsHash = &h
} }
body := types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}
header := &types.Header{ header := &types.Header{
ParentHash: data.ParentHash, ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash, UncleHash: types.EmptyUncleHash,
@ -326,9 +330,16 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash, RequestsHash: requestsHash,
SlotNumber: data.SlotNumber, SlotNumber: data.SlotNumber,
} }
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}), if data.BlockAccessList != nil {
nil balHash := data.BlockAccessList.Hash()
header.BlockAccessListHash = &balHash
block := types.NewBlockWithHeader(header).WithBody(body).WithAccessList(data.BlockAccessList)
return block, nil
}
return types.NewBlockWithHeader(header).WithBody(body), nil
} }
// BlockToExecutableData constructs the ExecutableData structure by filling the // BlockToExecutableData constructs the ExecutableData structure by filling the
@ -352,6 +363,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
Withdrawals: block.Withdrawals(), Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(), BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(), ExcessBlobGas: block.ExcessBlobGas(),
BlockAccessList: block.AccessList(),
SlotNumber: block.SlotNumber(), SlotNumber: block.SlotNumber(),
} }
@ -393,6 +405,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
type ExecutionPayloadBody struct { type ExecutionPayloadBody struct {
TransactionData []hexutil.Bytes `json:"transactions"` TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
AccessList *bal.BlockAccessList `json:"blockAccessList"`
} }
// Client identifiers to support ClientVersionV1. // Client identifiers to support ClientVersionV1.

View file

@ -5,6 +5,11 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0 # https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
# version:spec-tests-bal v5.0.0
# https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/bal%40v5.1.0
c8a7406e6337c1dfd2540f0477afb8abe965c5ed2a63382d7a483eb818f79939 fixtures_bal.tar.gz
# version:golang 1.25.7 # version:golang 1.25.7
# https://go.dev/dl/ # https://go.dev/dl/
178f2832820274b43e177d32f06a3ebb0129e427dd20a5e4c88df2c1763cf10a go1.25.7.src.tar.gz 178f2832820274b43e177d32f06a3ebb0129e427dd20a5e4c88df2c1763cf10a go1.25.7.src.tar.gz

View file

@ -172,6 +172,9 @@ var (
// This is where the tests should be unpacked. // This is where the tests should be unpacked.
executionSpecTestsDir = "tests/spec-tests" executionSpecTestsDir = "tests/spec-tests"
// This is where the bal-specific release of the tests should be unpacked.
executionSpecTestsBALDir = "tests/spec-tests-bal"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -380,6 +383,7 @@ func doTest(cmdline []string) {
// Get test fixtures. // Get test fixtures.
if !*short { if !*short {
downloadSpecTestFixtures(csdb, *cachedir) downloadSpecTestFixtures(csdb, *cachedir)
downloadBALSpecTestFixtures(csdb, *cachedir)
} }
// Configure the toolchain. // Configure the toolchain.
@ -445,6 +449,19 @@ func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string
return filepath.Join(cachedir, base) return filepath.Join(cachedir, base)
} }
func downloadBALSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string {
ext := ".tar.gz"
base := "fixtures_bal"
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFileFromKnownURL(archivePath); err != nil {
log.Fatal(err)
}
if err := build.ExtractArchive(archivePath, executionSpecTestsBALDir); err != nil {
log.Fatal(err)
}
return filepath.Join(cachedir, base)
}
// doCheckGenerate ensures that re-generating generated files does not cause // doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree. // any mutations in the source file tree.
func doCheckGenerate() { func doCheckGenerate() {

View file

@ -117,7 +117,7 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
test := tests[name] test := tests[name]
result := &testResult{Name: name, Pass: true} result := &testResult{Name: name, Pass: true}
var finalRoot *common.Hash var finalRoot *common.Hash
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) { if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), false, tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) { if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil { if s, _ := chain.State(); s != nil {
result.State = dump(s) result.State = dump(s)

View file

@ -240,7 +240,21 @@ func makeFullNode(ctx *cli.Context) *node.Node {
cfg.Eth.OverrideVerkle = &v cfg.Eth.OverrideVerkle = &v
} }
// Start metrics export if enabled. if ctx.IsSet(utils.BlockAccessListExecutionModeFlag.Name) {
val := ctx.String(utils.BlockAccessListExecutionModeFlag.Name)
switch val {
case utils.BalExecutionModeFull:
cfg.Eth.BALExecutionMode = 0
case utils.BalExecutionModeNoBatchIO:
cfg.Eth.BALExecutionMode = 1
case utils.BalExecutionModeSequential:
cfg.Eth.BALExecutionMode = 2
default:
utils.Fatalf("invalid option for --bal.executionmode: %s. acceptable values are full|nobatchio|sequential", val)
}
}
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics) utils.SetupMetrics(&cfg.Metrics)
// Setup OpenTelemetry reporting if enabled. // Setup OpenTelemetry reporting if enabled.

View file

@ -272,6 +272,7 @@ func init() {
consoleFlags, consoleFlags,
debug.Flags, debug.Flags,
metricsFlags, metricsFlags,
[]cli.Flag{utils.BlockAccessListExecutionModeFlag},
) )
flags.AutoEnvVars(app.Flags, "GETH") flags.AutoEnvVars(app.Flags, "GETH")

View file

@ -1106,6 +1106,31 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Name: "era.format", Name: "era.format",
Usage: "Archive format: 'era1' or 'erae'", Usage: "Archive format: 'era1' or 'erae'",
} }
// Block Access List flags
ExperimentalBALFlag = &cli.BoolFlag{
Name: "experimental.bal",
Usage: "Enable generation of EIP-7928 block access lists when importing post-Cancun blocks which lack them. When this flag is specified, importing blocks containing access lists triggers validation of their correctness and execution based off them. The header block access list field is not set with blocks created when this flag is specified, nor is it validated when importing blocks that contain access lists. This is used for development purposes only. Do not enable it otherwise.",
Category: flags.MiscCategory,
}
// block access list flags
BlockAccessListExecutionModeFlag = &cli.StringFlag{
Name: "bal.executionmode",
Usage: `
block access list execution type. possible inputs are:
- sequential: no performance acceleration
- full: parallel transaction execution, state root calculation, async warming of access list reads
- nobatchio: same as 'full', but without async warming of access list reads`,
Value: BalExecutionModeFull,
Category: flags.MiscCategory,
}
)
const (
BalExecutionModeFull = "full"
BalExecutionModeNoBatchIO = "nobatchio"
BalExecutionModeSequential = "sequential"
) )
var ( var (

View file

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -282,6 +283,12 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if !amsterdam && header.SlotNumber != nil { if !amsterdam && header.SlotNumber != nil {
return fmt.Errorf("invalid slotNumber: have %d, expected nil", *header.SlotNumber) return fmt.Errorf("invalid slotNumber: have %d, expected nil", *header.SlotNumber)
} }
if !amsterdam && header.BlockAccessListHash != nil {
return fmt.Errorf("invalid block access list hash: have %x, expected nil", header.BlockAccessListHash)
}
if amsterdam && header.BlockAccessListHash == nil {
return fmt.Errorf("header is missing block access list hash")
}
return nil return nil
} }
@ -336,24 +343,28 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H
} }
// Finalize implements consensus.Engine and processes withdrawals on top. // Finalize implements consensus.Engine and processes withdrawals on top.
func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) { func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) bal.StateMutations {
if !beacon.IsPoSHeader(header) { if !beacon.IsPoSHeader(header) {
beacon.ethone.Finalize(chain, header, state, body) return beacon.ethone.Finalize(chain, header, state, body)
return
} }
// Withdrawals processing. // Withdrawals processing.
for _, w := range body.Withdrawals { for _, w := range body.Withdrawals {
// always read the target account regardless of withdrawal amt to include it in the BAL
state.GetBalance(w.Address)
// Convert amount from gwei to wei. // Convert amount from gwei to wei.
amount := new(uint256.Int).SetUint64(w.Amount) amount := new(uint256.Int).SetUint64(w.Amount)
amount = amount.Mul(amount, uint256.NewInt(params.GWei)) amount = amount.Mul(amount, uint256.NewInt(params.GWei))
state.AddBalance(w.Address, amount, tracing.BalanceIncreaseWithdrawal) state.AddBalance(w.Address, amount, tracing.BalanceIncreaseWithdrawal)
} }
return state.Finalise(true)
// No block reward which is issued by consensus layer instead. // No block reward which is issued by consensus layer instead.
} }
// FinalizeAndAssemble implements consensus.Engine, setting the final state and // FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block. // assembling the block.
func (beacon *Beacon) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (result *types.Block, err error) { func (beacon *Beacon) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(postMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if !beacon.IsPoSHeader(header) {
ctx, _, spanEnd := telemetry.StartSpan(ctx, "consensus.beacon.FinalizeAndAssemble", ctx, _, spanEnd := telemetry.StartSpan(ctx, "consensus.beacon.FinalizeAndAssemble",
telemetry.Int64Attribute("block.number", int64(header.Number.Uint64())), telemetry.Int64Attribute("block.number", int64(header.Number.Uint64())),
telemetry.Int64Attribute("txs.count", int64(len(body.Transactions))), telemetry.Int64Attribute("txs.count", int64(len(body.Transactions))),
@ -365,6 +376,7 @@ func (beacon *Beacon) FinalizeAndAssemble(ctx context.Context, chain consensus.C
block, delegateErr := beacon.ethone.FinalizeAndAssemble(ctx, chain, header, state, body, receipts) block, delegateErr := beacon.ethone.FinalizeAndAssemble(ctx, chain, header, state, body, receipts)
return block, delegateErr return block, delegateErr
} }
shanghai := chain.Config().IsShanghai(header.Number, header.Time) shanghai := chain.Config().IsShanghai(header.Number, header.Time)
if shanghai { if shanghai {
// All blocks after Shanghai must include a withdrawals root. // All blocks after Shanghai must include a withdrawals root.
@ -376,11 +388,13 @@ func (beacon *Beacon) FinalizeAndAssemble(ctx context.Context, chain consensus.C
return nil, errors.New("withdrawals set before Shanghai activation") return nil, errors.New("withdrawals set before Shanghai activation")
} }
} }
// Finalize and assemble the block. // Finalize and assemble the block.
_, _, finalizeSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.Finalize") _, _, finalizeSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.Finalize")
beacon.Finalize(chain, header, state, body) postMut := beacon.Finalize(chain, header, state, body)
finalizeSpanEnd(nil) finalizeSpanEnd(nil)
// Assign the final state root to header. // Assign the final state root to header.
_, _, rootSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.IntermediateRoot") _, _, rootSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.IntermediateRoot")
header.Root = state.IntermediateRoot(true) header.Root = state.IntermediateRoot(true)
@ -388,9 +402,19 @@ func (beacon *Beacon) FinalizeAndAssemble(ctx context.Context, chain consensus.C
// Assemble the final block. // Assemble the final block.
_, _, blockSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.NewBlock") _, _, blockSpanEnd := telemetry.StartSpan(ctx, "consensus.beacon.NewBlock")
var block *types.Block
if onFinalizeAccessList != nil {
al := onFinalizeAccessList(postMut)
alHash := al.Hash()
header.BlockAccessListHash = &alHash
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)).WithAccessList(al)
} else {
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
}
blockSpanEnd(nil) blockSpanEnd(nil)
return block, nil return block, nil
} }
// Seal generates a new sealing request for the given input block and pushes // Seal generates a new sealing request for the given input block and pushes

View file

@ -28,6 +28,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/lru"
@ -576,13 +578,14 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
// Finalize implements consensus.Engine. There is no post-transaction // Finalize implements consensus.Engine. There is no post-transaction
// consensus rules in clique, do nothing here. // consensus rules in clique, do nothing here.
func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) { func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) (mut bal.StateMutations) {
// No block rewards in PoA, so the state remains as is // No block rewards in PoA, so the state remains as is
return
} }
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block. // nor block rewards given, and returns the final block.
func (c *Clique) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) { func (c *Clique) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(withdrawalMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if len(body.Withdrawals) > 0 { if len(body.Withdrawals) > 0 {
return nil, errors.New("clique does not support withdrawals") return nil, errors.New("clique does not support withdrawals")
} }
@ -592,6 +595,9 @@ func (c *Clique) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainH
// Assign the final state root to header. // Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalizeAccessList != nil {
panic("access list embedding not enabled for clique consensus")
}
// Assemble and return the final block for sealing. // Assemble and return the final block for sealing.
return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil
} }

View file

@ -21,6 +21,8 @@ import (
"context" "context"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -86,14 +88,14 @@ type Engine interface {
// //
// Note: The state database might be updated to reflect any consensus rules // Note: The state database might be updated to reflect any consensus rules
// that happen at finalization (e.g. block rewards). // that happen at finalization (e.g. block rewards).
Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) Finalize(chain ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) bal.StateMutations
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
// rewards or process withdrawals) and assembles the final block. // rewards or process withdrawals) and assembles the final block.
// //
// Note: The block header and state database might be updated to reflect any // Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards). // consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(ctx context.Context, chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) FinalizeAndAssemble(ctx context.Context, chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(mutations bal.StateMutations) *bal.BlockAccessList) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes // Seal generates a new sealing request for the given input block and pushes
// the result into the given channel. // the result into the given channel.

View file

@ -23,6 +23,8 @@ import (
"math/big" "math/big"
"time" "time"
"github.com/ethereum/go-ethereum/core/types/bal"
mapset "github.com/deckarep/golang-set/v2" mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
@ -507,14 +509,15 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H
} }
// Finalize implements consensus.Engine, accumulating the block and uncle rewards. // Finalize implements consensus.Engine, accumulating the block and uncle rewards.
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) { func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state vm.StateDB, body *types.Body) (mut bal.StateMutations) {
// Accumulate any block and uncle rewards // Accumulate any block and uncle rewards
accumulateRewards(chain.Config(), state, header, body.Uncles) accumulateRewards(chain.Config(), state, header, body.Uncles)
return
} }
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and // FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block. // uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt) (*types.Block, error) { func (ethash *Ethash) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, onFinalizeAccessList func(withdrawalMut bal.StateMutations) *bal.BlockAccessList) (*types.Block, error) {
if len(body.Withdrawals) > 0 { if len(body.Withdrawals) > 0 {
return nil, errors.New("ethash does not support withdrawals") return nil, errors.New("ethash does not support withdrawals")
} }
@ -524,6 +527,9 @@ func (ethash *Ethash) FinalizeAndAssemble(ctx context.Context, chain consensus.C
// Assign the final state root to header. // Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
if onFinalizeAccessList != nil {
panic("access list embedding not supported for ethash consenus")
}
// Header seems complete, assemble into a block and return // Header seems complete, assemble into a block and return
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil
} }

View file

@ -69,6 +69,8 @@ func latestBlobConfig(cfg *params.ChainConfig, time uint64) (BlobConfig, error)
bc = s.BPO4 bc = s.BPO4
case cfg.IsBPO3(london, time) && s.BPO3 != nil: case cfg.IsBPO3(london, time) && s.BPO3 != nil:
bc = s.BPO3 bc = s.BPO3
case cfg.IsAmsterdam(london, time) && s.Amsterdam != nil:
bc = s.Amsterdam
case cfg.IsBPO2(london, time) && s.BPO2 != nil: case cfg.IsBPO2(london, time) && s.BPO2 != nil:
bc = s.BPO2 bc = s.BPO2
case cfg.IsBPO1(london, time) && s.BPO1 != nil: case cfg.IsBPO1(london, time) && s.BPO1 != nil:

View file

@ -19,7 +19,6 @@ package core
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -111,6 +110,30 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
} }
} }
// block access lists must be present after the Amsterdam hard fork
if v.config.IsAmsterdam(block.Number(), block.Time()) {
if block.Header().BlockAccessListHash == nil {
// TODO: verify that this check isn't also done elsewhere
return fmt.Errorf("block access list hash not set in header")
}
if block.AccessList() != nil {
if *block.Header().BlockAccessListHash != block.AccessList().Hash() {
return fmt.Errorf("access list hash mismatch. local: %x. remote: %x\n", block.AccessList().Hash(), *block.Header().BlockAccessListHash)
} else if err := block.AccessList().Validate(len(block.Transactions())); err != nil {
return fmt.Errorf("invalid block access list: %v", err)
}
} else {
//panic("TODO: implement local access list construction path if importing a block without an access list")
}
} else {
// if experimental.bal is not enabled, block headers cannot have access list hash and bodies cannot have access lists.
if block.AccessList() != nil {
return fmt.Errorf("access list not allowed in block body if not in amsterdam or experimental.bal is set")
} else if block.Header().BlockAccessListHash != nil {
return fmt.Errorf("access list hash in block header not allowed when experimental.bal is set")
}
}
// Ancestor block must be known. // Ancestor block must be known.
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
@ -123,7 +146,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// ValidateState validates the various changes that happen after a state transition, // ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself. // such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error { func (v *BlockValidator) ValidateState(block *types.Block, stateTransition state.BlockStateTransition, res *ProcessResult, stateless bool) error {
if res == nil { if res == nil {
return errors.New("nil ProcessResult value") return errors.New("nil ProcessResult value")
} }
@ -160,10 +183,11 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} else if res.Requests != nil { } else if res.Requests != nil {
return errors.New("block has requests before prague fork") return errors.New("block has requests before prague fork")
} }
// Validate the state root against the received state root and throw // Validate the state root against the received state root and throw
// an error if they don't match. // an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { if root := stateTransition.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, stateTransition.Error())
} }
return nil return nil
} }

View file

@ -31,6 +31,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
@ -101,6 +103,21 @@ var (
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil) blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil) blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
// BALspecific timers
blockPreprocessingTimer = metrics.NewRegisteredResettingTimer("chain/preprocess", nil)
txExecutionTimer = metrics.NewRegisteredResettingTimer("chain/txexecution", nil)
stateTrieHashTimer = metrics.NewRegisteredResettingTimer("chain/statetriehash", nil)
accountTriesUpdateTimer = metrics.NewRegisteredResettingTimer("chain/accounttriesupdate", nil)
stateTriePrefetchTimer = metrics.NewRegisteredResettingTimer("chain/statetrieprefetch", nil)
stateTrieUpdateTimer = metrics.NewRegisteredResettingTimer("chain/statetrieupdate", nil)
originStorageLoadTimer = metrics.NewRegisteredResettingTimer("chain/originstorageload", nil)
stateRootComputeTimer = metrics.NewRegisteredResettingTimer("chain/staterootcompute", nil)
stateCommitTimer = metrics.NewRegisteredResettingTimer("chain/statetriecommit", nil)
blockPostprocessingTimer = metrics.NewRegisteredResettingTimer("chain/postprocess", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
@ -161,6 +178,12 @@ const (
BlockChainVersion uint64 = 9 BlockChainVersion uint64 = 9
) )
const (
BALExecutionModeFull = 0
BALExecutionModeNoBatchIO = iota
BALExecutionModeSequential = iota
)
// BlockChainConfig contains the configuration of the BlockChain object. // BlockChainConfig contains the configuration of the BlockChain object.
type BlockChainConfig struct { type BlockChainConfig struct {
// Trie database related options // Trie database related options
@ -220,6 +243,9 @@ type BlockChainConfig struct {
// Execution configs // Execution configs
StatelessSelfValidation bool // Generate execution witnesses and self-check against them (testing purpose) StatelessSelfValidation bool // Generate execution witnesses and self-check against them (testing purpose)
EnableWitnessStats bool // Whether trie access statistics collection is enabled EnableWitnessStats bool // Whether trie access statistics collection is enabled
// TODO clean this config up to use defined constants...
BALExecutionMode int
} }
// DefaultConfig returns the default config. // DefaultConfig returns the default config.
@ -362,6 +388,7 @@ type BlockChain struct {
validator Validator // Block and state validator interface validator Validator // Block and state validator interface
prefetcher Prefetcher prefetcher Prefetcher
processor Processor // Block transaction processor interface processor Processor // Block transaction processor interface
parallelProcessor ParallelStateProcessor
logger *tracing.Hooks logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking stateSizer *state.SizeTracker // State size tracking
@ -425,6 +452,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
bc.validator = NewBlockValidator(chainConfig, bc) bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc) bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(bc.hc) bc.processor = NewStateProcessor(bc.hc)
bc.parallelProcessor = NewParallelStateProcessor(bc.hc, &cfg.VmConfig)
genesisHeader := bc.GetHeaderByNumber(0) genesisHeader := bc.GetHeaderByNumber(0)
if genesisHeader == nil { if genesisHeader == nil {
@ -572,6 +600,113 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
} }
return bc, nil return bc, nil
} }
func (bc *BlockChain) processBlockWithAccessList(parentRoot common.Hash, block *types.Block, setHead bool) (procRes *blockProcessingResult, blockEndErr error) {
var (
startTime = time.Now()
procTime time.Duration
)
useAsyncReads := bc.cfg.BALExecutionMode != BALExecutionModeNoBatchIO
al := block.AccessList() // TODO: make the return of this method not be a pointer
accessListReader := bal.NewAccessListReader(*al)
prefetchReader, err := bc.statedb.ReaderEIP7928(parentRoot, accessListReader.StorageKeys(useAsyncReads), runtime.NumCPU())
if err != nil {
return nil, err
}
stateTransition, err := state.NewBALStateTransition(block, prefetchReader, bc.statedb, parentRoot)
if err != nil {
return nil, err
}
statedb, err := state.NewWithReader(parentRoot, bc.statedb, prefetchReader)
if bc.logger != nil && bc.logger.OnBlockStart != nil {
bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block,
Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(),
})
}
if bc.logger != nil && bc.logger.OnBlockEnd != nil {
defer func() {
bc.logger.OnBlockEnd(blockEndErr)
}()
}
res, err := bc.parallelProcessor.Process(block, stateTransition, statedb, bc.cfg.VmConfig)
if err != nil {
return nil, err
}
if err := bc.validator.ValidateState(block, stateTransition, res.ProcessResult, false); err != nil {
return nil, err
}
procTime = time.Since(startTime)
writeStart := time.Now()
// Write the block to the chain and get the status.
var (
//wstart = time.Now()
status WriteStatus
)
if !setHead {
// Don't set the head, only insert the block
err = bc.writeBlockWithState(block, res.ProcessResult.Receipts, stateTransition)
} else {
status, err = bc.writeBlockAndSetHead(block, res.ProcessResult.Receipts, res.ProcessResult.Logs, stateTransition, false)
}
if err != nil {
return nil, err
}
writeTime := time.Since(writeStart)
var stats ExecuteStats
/*
// TODO: implement the gathering of this data
stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing)
stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing)
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)
stats.CodeReads = statedb.CodeReads
stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
stats.AccountDeleted = statedb.AccountDeleted
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())
stats.CodeLoaded = statedb.CodeLoaded
stats.CodeLoadBytes = statedb.CodeLoadBytes
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
*/
// Update the metrics touched during block commit
stats.AccountCommits = stateTransition.Metrics().AccountCommits
stats.StorageCommits = stateTransition.Metrics().StorageCommits
stats.SnapshotCommit = stateTransition.Metrics().SnapshotCommits
stats.TrieDBCommit = stateTransition.Metrics().TrieDBCommits
// stats.StateReadCacheStats = whichReader.GetStats()
// ^ TODO fix this
elapsed := time.Since(startTime) + 1 // prevent zero division
stats.TotalTime = elapsed
stats.MgasPerSecond = float64(res.ProcessResult.GasUsed) * 1000 / float64(elapsed)
stats.BlockWrite = writeTime
stats.balTransitionStats = res.StateTransitionMetrics
return &blockProcessingResult{
usedGas: res.ProcessResult.GasUsed,
procTime: procTime,
status: status,
witness: nil,
stats: &stats,
}, nil
}
func (bc *BlockChain) setupSnapshot() { func (bc *BlockChain) setupSnapshot() {
// Short circuit if the chain is established with path scheme, as the // Short circuit if the chain is established with path scheme, as the
@ -1639,7 +1774,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the // writeBlockWithState writes block, metadata and corresponding state data to the
// database. // database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error { func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, transition state.BlockStateTransition) error {
if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) { if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
@ -1655,7 +1790,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
rawdb.WriteBlock(batch, block) rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(batch, statedb.Preimages()) rawdb.WritePreimages(batch, transition.Preimages())
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err) log.Crit("Failed to write block into disk", "err", err)
} }
@ -1670,7 +1805,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
hasStateSizer = bc.stateSizer != nil hasStateSizer = bc.stateSizer != nil
) )
if hasStateHook || hasStateSizer { if hasStateHook || hasStateSizer {
r, update, err := statedb.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun) r, update, err := transition.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun)
if err != nil { if err != nil {
return err return err
} }
@ -1686,7 +1821,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} }
root = r root = r
} else { } else {
root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun) root, err = transition.Commit(block.NumberU64(), isEIP158, isCancun)
if err != nil { if err != nil {
return err return err
} }
@ -1753,7 +1888,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held. // This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state state.BlockStateTransition, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, state); err != nil { if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err return NonStatTy, err
} }
@ -2002,7 +2137,12 @@ func (bc *BlockChain) insertChain(ctx context.Context, chain types.Blocks, setHe
if err != nil { if err != nil {
return nil, it.index, err return nil, it.index, err
} }
blockHasAccessList := block.AccessList() != nil
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
res.stats.reportBALMetrics()
} else {
res.stats.reportMetrics() res.stats.reportMetrics()
}
// Log slow block only if a single block is inserted (usually after the // Log slow block only if a single block is inserted (usually after the
// initial sync) to not overwhelm the users. // initial sync) to not overwhelm the users.
@ -2111,6 +2251,16 @@ type ExecuteConfig struct {
// ProcessBlock executes and validates the given block. If there was no error // ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database. // it writes the block and associated state to database.
func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash, block *types.Block, config ExecuteConfig) (result *blockProcessingResult, blockEndErr error) { func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash, block *types.Block, config ExecuteConfig) (result *blockProcessingResult, blockEndErr error) {
isAmsterdam := bc.chainConfig.IsAmsterdam(block.Number(), block.Time())
// TODO: need to check that the block is also postcancun if it contained an access list?
// this should be checked during decoding (?)
blockHasAccessList := block.AccessList() != nil
// optimized execution path for blocks which contain BALs
if blockHasAccessList && bc.cfg.BALExecutionMode != BALExecutionModeSequential {
return bc.processBlockWithAccessList(parentRoot, block, config.WriteHead)
}
var ( var (
err error err error
startTime = time.Now() startTime = time.Now()
@ -2121,17 +2271,30 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
defer interrupt.Store(true) // terminate the prefetch at the end defer interrupt.Store(true) // terminate the prefetch at the end
if bc.cfg.NoPrefetch { if bc.cfg.NoPrefetch {
if isAmsterdam {
reader, err := sdb.Reader(parentRoot)
if err != nil {
return nil, err
}
readerTracker := state.NewReaderWithTracker(reader)
statedb, err = state.NewWithReader(parentRoot, sdb, readerTracker)
if err != nil {
return nil, err
}
} else {
statedb, err = state.New(parentRoot, sdb) statedb, err = state.New(parentRoot, sdb)
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
} else { } else {
// If prefetching is enabled, run that against the current state to pre-cache // If prefetching is enabled, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes. // transactions and probabilistically some of the account/storage trie nodes.
// //
// Note: the main processor and prefetcher share the same reader with a local // Note: the main processor and prefetcher share the same reader with a local
// cache for mitigating the overhead of state access. // cache for mitigating the overhead of state access.
prefetch, process, err := sdb.ReadersWithCacheStats(parentRoot) prefetch, process, err := sdb.ReadersWithCache(parentRoot)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2139,6 +2302,9 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if isAmsterdam {
process = state.NewReaderWithTracker(process)
}
statedb, err = state.NewWithReader(parentRoot, sdb, process) statedb, err = state.NewWithReader(parentRoot, sdb, process)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2158,7 +2324,10 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
// Disable tracing for prefetcher executions. // Disable tracing for prefetcher executions.
vmCfg := bc.cfg.VmConfig vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil vmCfg.Tracer = nil
if block.AccessList() == nil {
// only use the state prefetcher for non-BAL blocks.
bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt) bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
}
blockPrefetchExecuteTimer.Update(time.Since(start)) blockPrefetchExecuteTimer.Update(time.Since(start))
if interrupt.Load() { if interrupt.Load() {
@ -2187,6 +2356,7 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
witnessStats = stateless.NewWitnessStats() witnessStats = stateless.NewWitnessStats()
} }
} }
statedb.StartPrefetcher("chain", witness, witnessStats) statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher() defer statedb.StopPrefetcher()
} }
@ -2207,16 +2377,19 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
} }
} }
var res *ProcessResult
var ptime, vtime time.Duration
// Process block using the parent state as reference point // Process block using the parent state as reference point
pstart := time.Now() pstart := time.Now()
pctx, _, spanEnd := telemetry.StartSpan(ctx, "bc.processor.Process") pctx, _, spanEnd := telemetry.StartSpan(ctx, "bc.processor.Process")
res, err := bc.processor.Process(pctx, block, statedb, bc.cfg.VmConfig) res, err = bc.processor.Process(pctx, block, statedb, bc.cfg.VmConfig)
spanEnd(&err) spanEnd(&err)
if err != nil { if err != nil {
bc.reportBadBlock(block, res, err) bc.reportBadBlock(block, res, err)
return nil, err return nil, err
} }
ptime := time.Since(pstart) ptime = time.Since(pstart)
vstart := time.Now() vstart := time.Now()
_, _, spanEnd = telemetry.StartSpan(ctx, "bc.validator.ValidateState") _, _, spanEnd = telemetry.StartSpan(ctx, "bc.validator.ValidateState")
@ -2226,7 +2399,28 @@ func (bc *BlockChain) ProcessBlock(ctx context.Context, parentRoot common.Hash,
bc.reportBadBlock(block, res, err) bc.reportBadBlock(block, res, err)
return nil, err return nil, err
} }
vtime := time.Since(vstart) vtime = time.Since(vstart)
if isAmsterdam {
computedAccessList := res.AccessList.ToEncodingObj()
computedAccessListHash := computedAccessList.Hash()
if *block.Header().BlockAccessListHash != computedAccessListHash {
//fmt.Printf("remote:\n%s\nlocal:\n%s\n", block.Body().AccessList.JSONString(), computedAccessList.JSONString())
err := fmt.Errorf("block header access list hash mismatch with computed (header=%x computed=%x)", *block.Header().BlockAccessListHash, computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
if block.AccessList() == nil {
// attach the computed access list to the block so it gets persisted
// when the block is written to disk
block = block.WithAccessList(computedAccessList)
} else if block.AccessList().Hash() != computedAccessListHash {
err := fmt.Errorf("block access list hash mismatch (remote=%x computed=%x)", block.AccessList().Hash(), computedAccessListHash)
bc.reportBadBlock(block, res, err)
return nil, err
}
}
// If witnesses was generated and stateless self-validation requested, do // If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of // that now. Self validation should *never* run in production, it's more of
@ -2815,6 +3009,10 @@ func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err
log.Error(summarizeBadBlock(block, receipts, bc.Config(), err)) log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
} }
func (bc *BlockChain) reportBALBlock(block *types.Block, res *ProcessResult, err error) {
}
// logForkReadiness will write a log when a future fork is scheduled, but not // logForkReadiness will write a log when a future fork is scheduled, but not
// active. This is useful so operators know their client is ready for the fork. // active. This is useful so operators know their client is ready for the fork.
func (bc *BlockChain) logForkReadiness(block *types.Block) { func (bc *BlockChain) logForkReadiness(block *types.Block) {

View file

@ -60,6 +60,9 @@ type ExecuteStats struct {
// Cache hit rates // Cache hit rates
StateReadCacheStats state.ReaderStats StateReadCacheStats state.ReaderStats
StatePrefetchCacheStats state.ReaderStats StatePrefetchCacheStats state.ReaderStats
// Stats specific to BAL state update
balTransitionStats *state.BALStateTransitionMetrics
} }
// reportMetrics uploads execution statistics to the metrics system. // reportMetrics uploads execution statistics to the metrics system.
@ -253,3 +256,53 @@ func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Durat
} }
log.Warn(string(jsonBytes)) log.Warn(string(jsonBytes))
} }
func (s *ExecuteStats) reportBALMetrics() {
/*
if s.AccountLoaded != 0 {
accountReadTimer.Update(s.AccountReads)
accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded))
}
if s.StorageLoaded != 0 {
storageReadTimer.Update(s.StorageReads)
storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded))
}
if s.CodeLoaded != 0 {
codeReadTimer.Update(s.CodeReads)
codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded))
codeReadBytesTimer.Update(time.Duration(s.CodeLoadBytes))
}
// TODO: implement these ^
*/
//accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation)
//storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation)
//accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation)
accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them
stateTriePrefetchTimer.Update(s.balTransitionStats.StatePrefetch)
accountTriesUpdateTimer.Update(s.balTransitionStats.AccountUpdate)
stateTrieUpdateTimer.Update(s.balTransitionStats.StateUpdate)
stateTrieHashTimer.Update(s.balTransitionStats.StateHash)
stateRootComputeTimer.Update(s.balTransitionStats.AccountUpdate + s.balTransitionStats.StateUpdate + s.balTransitionStats.StateHash)
//blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing
// ^basically impossible to get this metric with parallel execution
//blockValidationTimer.Update(s.Validation) // The time spent on block validation
//blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation
snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them
blockWriteTimer.Update(s.BlockWrite) // The time spent on block write
blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution
chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer
// Cache hit rates
accountCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheHit)
accountCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.AccountCacheMiss)
storageCacheHitMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheHit)
storageCacheMissMeter.Mark(s.StateReadCacheStats.StateStats.StorageCacheMiss)
}

View file

@ -165,7 +165,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.reportBadBlock(block, res, err) blockchain.reportBadBlock(block, res, err)
return err return err
} }
err = blockchain.validator.ValidateState(block, statedb, res, false) err = blockchain.validator.ValidateState(block, statedb, res, true, false)
if err != nil { if err != nil {
blockchain.reportBadBlock(block, res, err) blockchain.reportBadBlock(block, res, err)
return err return err

View file

@ -118,7 +118,8 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
evm = vm.NewEVM(blockContext, b.statedb, b.cm.config, vmConfig) evm = vm.NewEVM(blockContext, b.statedb, b.cm.config, vmConfig)
) )
b.statedb.SetTxContext(tx.Hash(), len(b.txs)) b.statedb.SetTxContext(tx.Hash(), len(b.txs))
receipt, err := ApplyTransaction(evm, b.gasPool, b.statedb, b.header, tx) // TODO: integrate BAL with chain makers...
_, receipt, err := ApplyTransaction(evm, b.gasPool, b.statedb, b.header, tx)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -330,11 +331,11 @@ func (b *BlockGen) collectRequests(readonly bool) (requests [][]byte) {
blockContext := NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase) blockContext := NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase)
evm := vm.NewEVM(blockContext, statedb, b.cm.config, vm.Config{}) evm := vm.NewEVM(blockContext, statedb, b.cm.config, vm.Config{})
// EIP-7002 // EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil { if _, err := ProcessWithdrawalQueue(&requests, evm); err != nil {
panic(fmt.Sprintf("could not process withdrawal requests: %v", err)) panic(fmt.Sprintf("could not process withdrawal requests: %v", err))
} }
// EIP-7251 // EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil { if _, err := ProcessConsolidationQueue(&requests, evm); err != nil {
panic(fmt.Sprintf("could not process consolidation requests: %v", err)) panic(fmt.Sprintf("could not process consolidation requests: %v", err))
} }
} }
@ -412,7 +413,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals} body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
block, err := b.engine.FinalizeAndAssemble(context.Background(), cm, b.header, statedb, &body, b.receipts) block, err := b.engine.FinalizeAndAssemble(context.Background(), cm, b.header, statedb, &body, b.receipts, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -34,6 +34,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
SlotNumber *uint64 `json:"slotNumber"` SlotNumber *uint64 `json:"slotNumber"`
} }
var enc Genesis var enc Genesis
@ -57,6 +58,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas) enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed) enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
enc.BlockAccessListHash = g.BlockAccessListHash
enc.SlotNumber = g.SlotNumber enc.SlotNumber = g.SlotNumber
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -79,6 +81,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
SlotNumber *uint64 `json:"slotNumber"` SlotNumber *uint64 `json:"slotNumber"`
} }
var dec Genesis var dec Genesis
@ -136,6 +139,9 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil { if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed) g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
} }
if dec.BlockAccessListHash != nil {
g.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil { if dec.SlotNumber != nil {
g.SlotNumber = dec.SlotNumber g.SlotNumber = dec.SlotNumber
} }

View file

@ -73,6 +73,7 @@ type Genesis struct {
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"` // EIP-7928
SlotNumber *uint64 `json:"slotNumber"` // EIP-7843 SlotNumber *uint64 `json:"slotNumber"` // EIP-7843
} }
@ -123,6 +124,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
genesis.BaseFee = genesisHeader.BaseFee genesis.BaseFee = genesisHeader.BaseFee
genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas
genesis.BlobGasUsed = genesisHeader.BlobGasUsed genesis.BlobGasUsed = genesisHeader.BlobGasUsed
genesis.BlockAccessListHash = genesisHeader.BlockAccessListHash
genesis.SlotNumber = genesisHeader.SlotNumber genesis.SlotNumber = genesisHeader.SlotNumber
return &genesis, nil return &genesis, nil
@ -498,6 +500,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
Difficulty: g.Difficulty, Difficulty: g.Difficulty,
MixDigest: g.Mixhash, MixDigest: g.Mixhash,
Coinbase: g.Coinbase, Coinbase: g.Coinbase,
BlockAccessListHash: g.BlockAccessListHash,
Root: root, Root: root,
} }
if g.GasLimit == 0 { if g.GasLimit == 0 {

View file

@ -0,0 +1,395 @@
package core
import (
"cmp"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/sync/errgroup"
"runtime"
"slices"
"time"
)
// ProcessResultWithMetrics wraps ProcessResult with some metrics that are
// emitted when executing blocks containing access lists.
type ProcessResultWithMetrics struct {
ProcessResult *ProcessResult
PreProcessTime time.Duration
StateTransitionMetrics *state.BALStateTransitionMetrics
// the time it took to execute all txs in the block
ExecTime time.Duration
PostProcessTime time.Duration
}
// ParallelStateProcessor is used to execute and verify blocks containing
// access lists.
type ParallelStateProcessor struct {
*StateProcessor
vmCfg *vm.Config
}
// NewParallelStateProcessor returns a new ParallelStateProcessor instance.
func NewParallelStateProcessor(chain *HeaderChain, vmConfig *vm.Config) ParallelStateProcessor {
res := NewStateProcessor(chain)
return ParallelStateProcessor{
res,
vmConfig,
}
}
func validateStateAccesses(lastIdx int, accessList bal.AccessListReader, localAccesses bal.StateAccesses) bool {
// 1. strip out any state in the localAccesses that was modified
muts := accessList.Mutations(lastIdx + 1)
for acct, mut := range *muts {
if _, exist := localAccesses[acct]; !exist {
continue
}
// delete any storage slots that were mutated from the read set
if len(localAccesses[acct]) > 0 {
for key, _ := range mut.StorageWrites {
if _, ok := localAccesses[acct][key]; ok {
delete(localAccesses[acct], key)
}
}
}
if len(localAccesses[acct]) == 0 {
delete(localAccesses, acct)
}
}
if !accessList.Accesses().Eq(localAccesses) {
return false
}
return true
}
// called by resultHandler when all transactions have successfully executed.
// performs post-tx state transition (system contracts and withdrawals)
// and calculates the ProcessResult, returning it to be sent on resCh
// by resultHandler
func (p *ParallelStateProcessor) prepareExecResult(block *types.Block, tExecStart time.Time, accesses bal.StateAccesses, statedb *state.StateDB, prefetchReader state.Reader, results []txExecResult) *ProcessResultWithMetrics {
tExec := time.Since(tExecStart)
var requests [][]byte
tPostprocessStart := time.Now()
header := block.Header()
context := NewEVMBlockContext(header, p.chain, nil)
lastBALIdx := len(block.Transactions()) + 1
postTxState := statedb.WithReader(state.NewReaderWithTracker(state.NewReaderWithBlockLevelAccessList(prefetchReader, *block.AccessList(), lastBALIdx)))
cfg := vm.Config{
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
}
evm := vm.NewEVM(context, postTxState, p.chainConfig(), cfg)
// 1. order the receipts by tx index
// 2. correctly calculate the cumulative gas used per receipt, returning bad block error if it goes over the allowed
slices.SortFunc(results, func(a, b txExecResult) int {
return cmp.Compare(a.receipt.TransactionIndex, b.receipt.TransactionIndex)
})
var (
// total gas used not applying refunds
blockGas = uint64(0)
// total gas used applying refunds
execGas = uint64(0)
)
var allLogs []*types.Log
var allReceipts []*types.Receipt
for _, result := range results {
blockGas += result.blockGas
execGas += result.execGas
result.receipt.CumulativeGasUsed = blockGas
if blockGas > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
allLogs = append(allLogs, result.receipt.Logs...)
allReceipts = append(allReceipts, result.receipt)
}
// Block gas limit is enforced against usedGas (pre-refund after Amsterdam, post-refund before).
if blockGas > header.GasLimit {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
}
}
var postMut bal.StateMutations
// Read requests if Prague is enabled.
if p.chainConfig().IsPrague(block.Number(), block.Time()) {
requests = [][]byte{}
var err error
// EIP-6110
if err = ParseDepositLogs(&requests, allLogs, p.chainConfig()); err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7002
postMut, err = ProcessWithdrawalQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
// EIP-7251
consolidationMut, err := ProcessConsolidationQueue(&requests, evm)
if err != nil {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: err},
}
}
postMut.Merge(consolidationMut)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
postMut.Merge(p.chain.Engine().Finalize(p.chain, header, postTxState, block.Body()))
postTxAccesses := postTxState.Reader().(state.StateReaderTracker).GetStateAccessList()
accessList := bal.NewAccessListReader(*block.AccessList())
if !postMut.Eq(*accessList.MutationsAt(lastBALIdx)) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("mismatch between local/remote access list mutations for final idx")},
}
}
accesses.Merge(postTxAccesses)
if !validateStateAccesses(lastBALIdx, accessList, accesses) {
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{Error: fmt.Errorf("mismatch between local/remote access list for state accesses")},
}
}
tPostprocess := time.Since(tPostprocessStart)
return &ProcessResultWithMetrics{
ProcessResult: &ProcessResult{
Receipts: allReceipts,
Requests: requests,
Logs: allLogs,
GasUsed: execGas,
},
PostProcessTime: tPostprocess,
ExecTime: tExec,
}
}
type txExecResult struct {
idx int // transaction index
receipt *types.Receipt
err error // non-EVM error which would render the block invalid
blockGas uint64
execGas uint64
stateReads bal.StateAccesses
}
// resultHandler polls until all transactions have finished executing and the
// state root calculation is complete. The result is emitted on resCh.
func (p *ParallelStateProcessor) resultHandler(block *types.Block, preTxReads bal.StateAccesses, statedb *state.StateDB, prefetchReader state.Reader, tExecStart time.Time, txResCh <-chan txExecResult, stateRootCalcResCh <-chan stateRootCalculationResult, resCh chan *ProcessResultWithMetrics) {
// 1. if the block has transactions, receive the execution results from all of them and return an error on resCh if any txs err'd
// 2. once all txs are executed, compute the post-tx state transition and produce the ProcessResult sending it on resCh (or an error if the post-tx state didn't match what is reported in the BAL)
var results []txExecResult
gp := NewGasPool(block.GasLimit())
var execErr error
var numTxComplete int
accesses := preTxReads
if len(block.Transactions()) > 0 {
loop:
for {
select {
case res := <-txResCh:
if execErr == nil {
// short-circuit if invalid block was detected
if res.err != nil {
execErr = res.err
continue
}
if err := gp.SubGas(res.receipt.CumulativeGasUsed); err != nil {
execErr = err
} else {
results = append(results, res)
accesses.Merge(res.stateReads)
}
}
numTxComplete++
if numTxComplete == len(block.Transactions()) {
break loop
}
}
}
if execErr != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: execErr}}
return
}
}
execResults := p.prepareExecResult(block, tExecStart, accesses, statedb, prefetchReader, results)
rootCalcRes := <-stateRootCalcResCh
if execResults.ProcessResult.Error != nil {
resCh <- execResults
} else if rootCalcRes.err != nil {
resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: rootCalcRes.err}}
} else {
execResults.StateTransitionMetrics = rootCalcRes.metrics
resCh <- execResults
}
}
type stateRootCalculationResult struct {
err error
metrics *state.BALStateTransitionMetrics
root common.Hash
}
// calcAndVerifyRoot performs the post-state root hash calculation, verifying
// it against what is reported by the block and returning a result on resCh.
func (p *ParallelStateProcessor) calcAndVerifyRoot(block *types.Block, stateTransition *state.BALStateTransition, resCh chan stateRootCalculationResult) {
root := stateTransition.IntermediateRoot(false)
res := stateRootCalculationResult{
metrics: stateTransition.Metrics(),
}
if root != block.Root() {
res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x", root, block.Root())
}
resCh <- res
}
// execTx executes single transaction returning a result which includes state accessed/modified
func (p *ParallelStateProcessor) execTx(block *types.Block, tx *types.Transaction, balIdx int, db *state.StateDB, signer types.Signer) *txExecResult {
header := block.Header()
context := NewEVMBlockContext(header, p.chain, nil)
cfg := vm.Config{
NoBaseFee: p.vmCfg.NoBaseFee,
EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
}
evm := vm.NewEVM(context, db, p.chainConfig(), cfg)
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
if err != nil {
err = fmt.Errorf("could not apply tx %d [%v]: %w", balIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
gp := NewGasPool(block.GasLimit())
db.SetTxContext(tx.Hash(), balIdx-1)
var gasUsed uint64
mut, receipt, err := ApplyTransactionWithEVM(msg, gp, db, block.Number(), block.Hash(), context.Time, tx, evm)
if err != nil {
err := fmt.Errorf("could not apply tx %d [%v]: %w", balIdx, tx.Hash().Hex(), err)
return &txExecResult{err: err}
}
accessList := bal.NewAccessListReader(*block.AccessList())
if !accessList.MutationsAt(balIdx).Eq(mut) {
err := fmt.Errorf("mismatch between local/remote computed state mutations at bal idx %d. got:\n%s\nexpected:\n%s\n", balIdx, mut.String(), accessList.MutationsAt(balIdx).String())
return &txExecResult{err: err}
}
return &txExecResult{
idx: balIdx,
receipt: receipt,
execGas: receipt.GasUsed,
blockGas: gasUsed,
stateReads: db.Reader().(state.StateReaderTracker).GetStateAccessList(),
}
}
func (p *ParallelStateProcessor) processBlockPreTx(block *types.Block, statedb *state.StateDB, prefetchReader state.Reader, cfg vm.Config) (bal.StateAccesses, error) {
var (
header = block.Header()
)
alReader := state.NewReaderWithBlockLevelAccessList(prefetchReader, *block.AccessList(), 0)
readerWithTracker := state.NewReaderWithTracker(alReader)
sdb := statedb.WithReader(readerWithTracker)
accessList := bal.NewAccessListReader(*block.AccessList())
context := NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, sdb, p.chainConfig(), cfg)
var mutations bal.StateMutations
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
mutations = ProcessBeaconBlockRoot(*beaconRoot, evm)
}
pbhMutations := ProcessParentBlockHash(block.ParentHash(), evm)
mutations.Merge(pbhMutations)
reads := readerWithTracker.(state.StateReaderTracker).GetStateAccessList()
if !accessList.MutationsAt(0).Eq(mutations) {
return nil, fmt.Errorf("mismatch between local/remote access list mutations at idx 0")
}
return reads, nil
}
// Process performs EVM execution and state root computation for a block which is known
// to contain an access list.
func (p *ParallelStateProcessor) Process(block *types.Block, stateTransition *state.BALStateTransition, statedb *state.StateDB, cfg vm.Config) (*ProcessResultWithMetrics, error) {
var (
header = block.Header()
resCh = make(chan *ProcessResultWithMetrics)
signer = types.MakeSigner(p.chainConfig(), header.Number, header.Time)
rootCalcResultCh = make(chan stateRootCalculationResult)
txResCh = make(chan txExecResult)
pStart = time.Now()
tExecStart time.Time
tPreprocess time.Duration // time to create a set of prestates for parallel transaction execution
balReader = statedb.Reader()
)
startingState := statedb.Copy()
preReads, err := p.processBlockPreTx(block, statedb, balReader, cfg)
if err != nil {
return nil, err
}
// compute the reads/mutations at the last bal index
tPreprocess = time.Since(pStart)
// execute transactions and state root calculation in parallel
tExecStart = time.Now()
go p.resultHandler(block, preReads, statedb, balReader, tExecStart, txResCh, rootCalcResultCh, resCh)
var workers errgroup.Group
workers.SetLimit(runtime.NumCPU())
for i, t := range block.Transactions() {
tx := t
idx := i
sdb := startingState.Copy()
workers.Go(func() error {
startingStateWithReadTracker := sdb.WithReader(state.NewReaderWithTracker(state.NewReaderWithBlockLevelAccessList(balReader, *block.AccessList(), idx+1)))
res := p.execTx(block, tx, idx+1, startingStateWithReadTracker, signer)
txResCh <- *res
return nil
})
}
go p.calcAndVerifyRoot(block, stateTransition, rootCalcResultCh)
res := <-resCh
if res.ProcessResult.Error != nil {
return nil, res.ProcessResult.Error
}
// TODO: remove preprocess metric ?
res.PreProcessTime = tPreprocess
return res, nil
}

View file

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"math/big" "math/big"
"slices" "slices"
@ -421,6 +422,17 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
} }
} }
func WriteAccessListRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(accessListKey(number, hash), rlp); err != nil {
log.Crit("failed to store block access list", "err", err)
}
}
func ReadAccessListRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(accessListKey(number, hash))
return data
}
// HasBody verifies the existence of a block body corresponding to the hash. // HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if isCanon(db, number, hash) { if isCanon(db, number, hash) {
@ -455,6 +467,26 @@ func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *t
WriteBodyRLP(db, hash, number, data) WriteBodyRLP(db, hash, number, data)
} }
func ReadAccessList(db ethdb.Reader, hash common.Hash, number uint64) *bal.BlockAccessList {
var al bal.BlockAccessList
data := ReadAccessListRLP(db, hash, number)
if data != nil {
err := rlp.DecodeBytes(data, &al)
if err != nil {
log.Crit("failed to RLP decode access list", "err", err)
}
}
return &al
}
func WriteAccessList(db ethdb.KeyValueWriter, hash common.Hash, number uint64, al *bal.BlockAccessList) {
data, err := rlp.EncodeToBytes(al)
if err != nil {
log.Crit("failed to RLP encode block access list", "err", err)
}
WriteAccessListRLP(db, hash, number, data)
}
// DeleteBody removes all block body data associated with a hash. // DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
if err := db.Delete(blockBodyKey(number, hash)); err != nil { if err := db.Delete(blockBodyKey(number, hash)); err != nil {
@ -659,13 +691,25 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
if body == nil { if body == nil {
return nil return nil
} }
return types.NewBlockWithHeader(header).WithBody(*body)
block := types.NewBlockWithHeader(header).WithBody(*body)
if header.BlockAccessListHash != nil {
accessList := ReadAccessList(db, hash, number)
if accessList != nil {
block = block.WithAccessList(accessList)
}
}
return block
} }
// WriteBlock serializes a block into the database, header and body separately. // WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) { func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
WriteHeader(db, block.Header()) WriteHeader(db, block.Header())
if block.AccessList() != nil {
WriteAccessList(db, block.Hash(), block.NumberU64(), block.AccessList())
}
} }
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size. // WriteAncientBlocks writes entire block data into ancient store and returns the total written size.

View file

@ -111,6 +111,7 @@ var (
headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian) headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
accessListPrefix = []byte("z")
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
@ -209,6 +210,11 @@ func blockBodyKey(number uint64, hash common.Hash) []byte {
return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
} }
// accessListKey = accessListPrefix + num (uint64 big endian) + hash
func accessListKey(number uint64, hash common.Hash) []byte {
return append(append(accessListPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash // blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
func blockReceiptsKey(number uint64, hash common.Hash) []byte { func blockReceiptsKey(number uint64, hash common.Hash) []byte {
return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)

View file

@ -0,0 +1,514 @@
package state
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
"maps"
"sync"
"sync/atomic"
"time"
)
// BALStateTransition is responsible for performing the state root update
// and commit for EIP 7928 access-list-containing blocks. An instance of
// this object is only used for a single block.
type BALStateTransition struct {
accessList bal.AccessListReader
db Database
reader Reader
stateTrie Trie
parentRoot common.Hash
// the computed state root of the block
rootHash common.Hash
// the state modifications performed by the block
diffs bal.StateMutations
// a map of common.Address -> *types.StateAccount containing the block
// prestate of all accounts that will be modified
prestates sync.Map
postStates map[common.Address]*types.StateAccount
// a map of common.Address -> Trie containing the account tries for all
// accounts with mutated storage
tries sync.Map //map[common.Address]Trie
deletions map[common.Address]struct{}
accountDeleted int64
accountUpdated int64
storageDeleted atomic.Int64
storageUpdated atomic.Int64
stateUpdate *stateUpdate
metrics BALStateTransitionMetrics
maxBALIdx int
err error
}
func (s *BALStateTransition) Metrics() *BALStateTransitionMetrics {
return &s.metrics
}
type BALStateTransitionMetrics struct {
// trie hashing metrics
AccountUpdate time.Duration
StatePrefetch time.Duration
StateUpdate time.Duration
StateHash time.Duration
OriginStorageLoadTime time.Duration
// commit metrics
AccountCommits time.Duration
StorageCommits time.Duration
SnapshotCommits time.Duration
TrieDBCommits time.Duration
TotalCommitTime time.Duration
}
func NewBALStateTransition(block *types.Block, prefetchReader Reader, db Database, parentRoot common.Hash) (*BALStateTransition, error) {
stateTrie, err := db.OpenTrie(parentRoot)
if err != nil {
return nil, err
}
return &BALStateTransition{
accessList: bal.NewAccessListReader(*block.AccessList()),
db: db,
reader: prefetchReader,
stateTrie: stateTrie,
parentRoot: parentRoot,
rootHash: common.Hash{},
diffs: make(bal.StateMutations),
prestates: sync.Map{},
postStates: make(map[common.Address]*types.StateAccount),
tries: sync.Map{},
deletions: make(map[common.Address]struct{}),
stateUpdate: nil,
maxBALIdx: len(block.Transactions()) + 1,
}, nil
}
func (s *BALStateTransition) Error() error {
return s.err
}
func (s *BALStateTransition) setError(err error) {
if s.err != nil {
s.err = err
}
}
// TODO: refresh my knowledge of the storage-clearing EIP and ensure that my assumptions around
// an empty account which contains storage are valid here.
//
// isAccountDeleted checks whether the state account was deleted in this block. Post selfdestruct-removal,
// deletions can only occur if an account which has a balance becomes the target of a CREATE2 initcode
// which calls SENDALL, clearing the account and marking it for deletion.
func isAccountDeleted(prestate *types.StateAccount, mutations bal.AccountMutations) bool {
// TODO: figure out how to simplify this method
if mutations.Code != nil && len(mutations.Code) != 0 {
return false
}
if mutations.Nonce != nil && *mutations.Nonce != 0 {
return false
}
if mutations.StorageWrites != nil && len(mutations.StorageWrites) > 0 {
return false
}
if mutations.Balance != nil {
if mutations.Balance.IsZero() {
if prestate.Nonce != 0 || prestate.Balance.IsZero() || common.BytesToHash(prestate.CodeHash) != types.EmptyCodeHash {
return false
}
// consider an empty account with storage to be deleted, so we don't check root here
return true
}
}
return false
}
// updateAccount applies the block state mutations to a given account returning
// the updated state account and new code (if the account code changed)
func (s *BALStateTransition) updateAccount(addr common.Address) (*types.StateAccount, []byte) {
a, _ := s.prestates.Load(addr)
acct := a.(*types.StateAccount)
acct, diff := acct.Copy(), s.diffs[addr]
code := diff.Code
if diff.Nonce != nil {
acct.Nonce = *diff.Nonce
}
if diff.Balance != nil {
acct.Balance = new(uint256.Int).Set(diff.Balance)
}
if tr, ok := s.tries.Load(addr); ok {
acct.Root = tr.(Trie).Hash()
}
return acct, code
}
func (s *BALStateTransition) commitAccount(addr common.Address) (*accountUpdate, *trienode.NodeSet, error) {
var (
encode = func(val common.Hash) []byte {
if val == (common.Hash{}) {
return nil
}
blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:]))
return blob
}
)
op := &accountUpdate{
address: addr,
data: types.SlimAccountRLP(*s.postStates[addr]), // TODO: cache the updated state acocunt somewhere
}
if prestate, exist := s.prestates.Load(addr); exist {
prestate := prestate.(*types.StateAccount)
op.origin = types.SlimAccountRLP(*prestate)
}
if s.diffs[addr].Code != nil {
code := contractCode{
hash: crypto.Keccak256Hash(s.diffs[addr].Code),
blob: s.diffs[addr].Code,
}
if op.origin == nil {
code.originHash = types.EmptyCodeHash
} else {
code.originHash = crypto.Keccak256Hash(op.origin)
}
op.code = &code
}
if len(s.diffs[addr].StorageWrites) == 0 {
return op, nil, nil
}
op.storages = make(map[common.Hash][]byte)
op.storagesOriginByHash = make(map[common.Hash][]byte)
op.storagesOriginByKey = make(map[common.Hash][]byte)
for key, value := range s.diffs[addr].StorageWrites {
hash := crypto.Keccak256Hash(key[:])
op.storages[hash] = encode(value)
storage, err := s.reader.Storage(addr, key)
if err != nil {
return nil, nil, err
}
origin := encode(storage)
op.storagesOriginByHash[hash] = origin
op.storagesOriginByKey[key] = origin
}
tr, _ := s.tries.Load(addr)
root, nodes := tr.(Trie).Commit(false)
s.postStates[addr].Root = root
return op, nodes, nil
}
// CommitWithUpdate flushes mutated trie nodes and state accounts to disk.
func (s *BALStateTransition) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
// 1) create a stateUpdate object
// Commit objects to the trie, measuring the elapsed time
var (
commitStart = time.Now()
accountTrieNodesUpdated int
accountTrieNodesDeleted int
storageTrieNodesUpdated int
storageTrieNodesDeleted int
lock sync.Mutex // protect two maps below
nodes = trienode.NewMergedNodeSet() // aggregated trie nodes
updates = make(map[common.Hash]*accountUpdate, len(s.diffs)) // aggregated account updates
// merge aggregates the dirty trie nodes into the global set.
//
// Given that some accounts may be destroyed and then recreated within
// the same block, it's possible that a node set with the same owner
// may already exist. In such cases, these two sets are combined, with
// the later one overwriting the previous one if any nodes are modified
// or deleted in both sets.
//
// merge run concurrently across all the state objects and account trie.
merge = func(set *trienode.NodeSet) error {
if set == nil {
return nil
}
lock.Lock()
defer lock.Unlock()
updates, deletes := set.Size()
if set.Owner == (common.Hash{}) {
accountTrieNodesUpdated += updates
accountTrieNodesDeleted += deletes
} else {
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deletes
}
return nodes.Merge(set)
}
)
destructedPrestates := make(map[common.Address]*types.StateAccount)
s.prestates.Range(func(key, value any) bool {
addr := key.(common.Address)
acct := value.(*types.StateAccount)
destructedPrestates[addr] = acct
return true
})
deletes, delNodes, err := handleDestruction(s.db, s.stateTrie, noStorageWiping, maps.Keys(s.deletions), destructedPrestates)
if err != nil {
return common.Hash{}, nil, err
}
for _, set := range delNodes {
if err := merge(set); err != nil {
return common.Hash{}, nil, err
}
}
// Handle all state updates afterwards, concurrently to one another to shave
// off some milliseconds from the commit operation. Also accumulate the code
// writes to run in parallel with the computations.
var (
start = time.Now()
root common.Hash
workers errgroup.Group
)
// Schedule the account trie first since that will be the biggest, so give
// it the most time to crunch.
//
// TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain
// heads, which seems excessive given that it doesn't do hashing, it just
// shuffles some data. For comparison, the *hashing* at chain head is 2-3ms.
// We need to investigate what's happening as it seems something's wonky.
// Obviously it's not an end of the world issue, just something the original
// code didn't anticipate for.
workers.Go(func() error {
// Write the account trie changes, measuring the amount of wasted time
newroot, set := s.stateTrie.Commit(true)
root = newroot
if err := merge(set); err != nil {
return err
}
s.metrics.AccountCommits = time.Since(start)
return nil
})
// Schedule each of the storage tries that need to be updated, so they can
// run concurrently to one another.
//
// TODO(karalabe): Experimentally, the account commit takes approximately the
// same time as all the storage commits combined, so we could maybe only have
// 2 threads in total. But that kind of depends on the account commit being
// more expensive than it should be, so let's fix that and revisit this todo.
for addr, _ := range s.diffs {
if _, isDeleted := s.deletions[addr]; isDeleted {
continue
}
address := addr
// Run the storage updates concurrently to one another
workers.Go(func() error {
// Write any storage changes in the state object to its storage trie
update, set, err := s.commitAccount(address)
if err != nil {
return err
}
if err := merge(set); err != nil {
return err
}
lock.Lock()
updates[crypto.Keccak256Hash(address[:])] = update
s.metrics.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime
lock.Unlock()
return nil
})
}
// Wait for everything to finish and update the metrics
if err := workers.Wait(); err != nil {
return common.Hash{}, nil, err
}
accountUpdatedMeter.Mark(s.accountUpdated)
storageUpdatedMeter.Mark(s.storageUpdated.Load())
accountDeletedMeter.Mark(s.accountDeleted)
storageDeletedMeter.Mark(s.storageDeleted.Load())
accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
ret := newStateUpdate(noStorageWiping, s.parentRoot, root, block, deletes, updates, nodes)
snapshotCommits, trieDBCommits, err := flushStateUpdate(s.db, block, ret)
if err != nil {
return common.Hash{}, nil, err
}
s.metrics.SnapshotCommits, s.metrics.TrieDBCommits = snapshotCommits, trieDBCommits
s.metrics.TotalCommitTime = time.Since(commitStart)
return root, ret, nil
}
func (s *BALStateTransition) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
root, _, err := s.CommitWithUpdate(block, deleteEmptyObjects, noStorageWiping)
return root, err
}
// IntermediateRoot applies block state mutations and computes the updated state
// trie root.
func (s *BALStateTransition) IntermediateRoot(_ bool) common.Hash {
if s.rootHash != (common.Hash{}) {
return s.rootHash
}
// State root calculation proceeds as follows:
// 1 (b): load the origin storage values for all slots which were modified during the block (this is needed for computing the stateUpdate)
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
//
// 2: compute the post-state root of the account trie
//
// Steps 1/2 are performed sequentially, with steps 1a-d performed in parallel
start := time.Now()
var wg sync.WaitGroup
s.diffs = *s.accessList.Mutations(s.maxBALIdx + 1)
for addr, d := range s.diffs {
wg.Add(1)
address := addr
diff := d
go func() {
defer wg.Done()
// 1 (c): update each mutated account, producing the post-block state object by applying the state mutations to the prestate (retrieved in 1a).
acct, err := s.reader.Account(address)
if err != nil {
s.setError(err)
return
}
if acct == nil {
acct = types.NewEmptyStateAccount()
}
s.prestates.Store(address, acct)
if len(diff.StorageWrites) > 0 {
tr, err := s.db.OpenStorageTrie(s.parentRoot, address, acct.Root, s.stateTrie)
if err != nil {
s.setError(err)
return
}
s.tries.Store(address, tr)
var (
updateKeys, updateValues [][]byte
deleteKeys [][]byte
)
for key, val := range diff.StorageWrites {
if val != (common.Hash{}) {
updateKeys = append(updateKeys, key[:])
updateValues = append(updateValues, common.TrimLeftZeroes(val[:]))
s.storageUpdated.Add(1)
} else {
deleteKeys = append(deleteKeys, key[:])
s.storageDeleted.Add(1)
}
}
if err := tr.UpdateStorageBatch(address, updateKeys, updateValues); err != nil {
s.setError(err)
return
}
for _, key := range deleteKeys {
if err := tr.DeleteStorage(address, key); err != nil {
s.setError(err)
return
}
}
hashStart := time.Now()
tr.Hash()
s.metrics.StateHash = time.Since(hashStart)
}
}()
}
wg.Add(1)
// 1 (d): prefetch the intermediate trie nodes of the mutated state set from the account trie.
go func() {
defer wg.Done()
prefetchStart := time.Now()
var prefetchAddrs []common.Address
for addr, _ := range s.diffs {
prefetchAddrs = append(prefetchAddrs, addr)
}
if err := s.stateTrie.PrefetchAccount(prefetchAddrs); err != nil {
s.setError(err)
return
}
s.metrics.StatePrefetch = time.Since(prefetchStart)
}()
wg.Wait()
s.metrics.AccountUpdate = time.Since(start)
// 2: compute the post-state root of the account trie
stateUpdateStart := time.Now()
for mutatedAddr, _ := range s.diffs {
p, _ := s.prestates.Load(mutatedAddr)
prestate := p.(*types.StateAccount)
isDeleted := isAccountDeleted(prestate, s.diffs[mutatedAddr])
if isDeleted {
if err := s.stateTrie.DeleteAccount(mutatedAddr); err != nil {
s.setError(err)
return common.Hash{}
}
s.deletions[mutatedAddr] = struct{}{}
} else {
acct, code := s.updateAccount(mutatedAddr)
if code != nil {
codeHash := crypto.Keccak256Hash(code)
acct.CodeHash = codeHash.Bytes()
if err := s.stateTrie.UpdateContractCode(mutatedAddr, codeHash, code); err != nil {
s.setError(err)
return common.Hash{}
}
}
if err := s.stateTrie.UpdateAccount(mutatedAddr, acct, len(code)); err != nil {
s.setError(err)
return common.Hash{}
}
s.postStates[mutatedAddr] = acct
}
}
s.metrics.StateUpdate = time.Since(stateUpdateStart)
stateTrieHashStart := time.Now()
s.rootHash = s.stateTrie.Hash()
s.metrics.StateHash = time.Since(stateTrieHashStart)
return s.rootHash
}
func (s *BALStateTransition) Preimages() map[common.Hash][]byte {
// TODO: implement this
return make(map[common.Hash][]byte)
}

View file

@ -73,7 +73,7 @@ type Trie interface {
// be returned. // be returned.
GetAccount(address common.Address) (*types.StateAccount, error) GetAccount(address common.Address) (*types.StateAccount, error)
// PrefetchAccount attempts to resolve specific accounts from the database // PrefetchAccount attempts to schedule specific accounts from the database
// to accelerate subsequent trie operations. // to accelerate subsequent trie operations.
PrefetchAccount([]common.Address) error PrefetchAccount([]common.Address) error
@ -82,7 +82,7 @@ type Trie interface {
// a trie.MissingNodeError is returned. // a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error) GetStorage(addr common.Address, key []byte) ([]byte, error)
// PrefetchStorage attempts to resolve specific storage slots from the database // PrefetchStorage attempts to schedule specific storage slots from the database
// to accelerate subsequent trie operations. // to accelerate subsequent trie operations.
PrefetchStorage(addr common.Address, keys [][]byte) error PrefetchStorage(addr common.Address, keys [][]byte) error
@ -91,12 +91,18 @@ type Trie interface {
// in the trie with provided address. // in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error
// UpdateStorage associates key with value in the trie. If value has length zero, // UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified // any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the // by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned. // database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error UpdateStorage(addr common.Address, key, value []byte) error
// UpdateStorageBatch attempts to update a list storages in the batch manner.
UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error
// DeleteAccount abstracts an account deletion from the trie. // DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error DeleteAccount(address common.Address) error
@ -234,6 +240,21 @@ func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (Reader, Reade
return ra, rb, nil return ra, rb, nil
} }
// ReaderEIP7928 creates a state reader with the manner of Block-level accessList.
func (db *CachingDB) ReaderEIP7928(stateRoot common.Hash, accessList map[common.Address][]common.Hash, threads int) (Reader, error) {
base, err := db.StateReader(stateRoot)
if err != nil {
return nil, err
}
// Construct the state reader with native cache and associated statistics
r := newStateReaderWithStats(newStateReaderWithCache(base))
// Construct the state reader with background prefetching
pr := newPrefetchStateReader(r, accessList, threads)
return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), pr), nil
}
// OpenTrie opens the main account trie at a specific root hash. // OpenTrie opens the main account trie at a specific root hash.
func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) { func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() { if db.triedb.IsVerkle() {

View file

@ -29,7 +29,7 @@ import (
// nodeIterator is an iterator to traverse the entire state trie post-order, // nodeIterator is an iterator to traverse the entire state trie post-order,
// including all of the contract code and contract state tries. Preimage is // including all of the contract code and contract state tries. Preimage is
// required in order to resolve the contract address. // required in order to schedule the contract address.
type nodeIterator struct { type nodeIterator struct {
state *StateDB // State being iterated state *StateDB // State being iterated
tr Trie // Primary account trie for traversal tr Trie // Primary account trie for traversal

View file

@ -382,7 +382,7 @@ func (ch nonceChange) copy() journalEntry {
} }
func (ch codeChange) revert(s *StateDB) { func (ch codeChange) revert(s *StateDB) {
s.getStateObject(ch.account).setCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode) s.getStateObject(ch.account).SetCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
} }
func (ch codeChange) dirtied() (common.Address, bool) { func (ch codeChange) dirtied() (common.Address, bool) {

View file

@ -45,9 +45,12 @@ type ContractCodeReader interface {
// requested contract code doesn't exist. // requested contract code doesn't exist.
Code(addr common.Address, codeHash common.Hash) []byte Code(addr common.Address, codeHash common.Hash) []byte
// CodeSize retrieves a particular contracts code's size. Returns zero code // CodeSize retrieves a particular contracts code's size.
// size if the requested contract code doesn't exist. //
CodeSize(addr common.Address, codeHash common.Hash) int // - Returns zero code size along with nil error if the requested contract code
// doesn't exist
// - Returns an error only if an unexpected issue occurs
CodeSize(addr common.Address, codeHash common.Hash) (int, error)
} }
// StateReader defines the interface for accessing accounts and storage slots // StateReader defines the interface for accessing accounts and storage slots

View file

@ -0,0 +1,334 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
// The EIP27928 reader utilizes a hierarchical architecture to optimize state
// access during block execution:
//
// - Base layer: The reader is initialized with the pre-transition state root,
// providing the access of the state.
//
// - Prefetching Layer: This base reader is wrapped by newPrefetchStateReader.
// Using an Access List hint, it asynchronously fetches required state data
// in the background, minimizing I/O blocking during transaction processing.
//
// - Execution Layer: To support parallel transaction execution within the EIP
// 7928 context, readers are wrapped in ReaderWithBlockLevelAccessList.
// This layer provides a "unified view" by merging the pre-transition state
// with mutated states from preceding transactions in the block.
//
// - Tracking Layer: Finally, the readerTracker wraps the execution reader to
// capture all state accesses made during a specific transaction. These individual
// access are subsequently merged to construct a comprehensive access list
// for the entire block.
//
// The architecture can be illustrated by the diagram below:
// [ Block Level Access List ] <────────────────┐
// ▲ │ (Merge)
// │ │
// ┌───────┴───────┐ ┌───────┴───────┐
// │ readerTracker │ │ readerTracker │ (Access Tracking)
// └───────┬───────┘ └───────┬───────┘
// │ │
// ┌──────────────┴──────────────┐ ┌──────────────┴──────────────┐
// │ ReaderWithBlockLevelAL │ │ ReaderWithBlockLevelAL │ (Unified View)
// │ (Pre-state + Mutations) │ │ (Pre-state + Mutations) │
// └──────────────┬──────────────┘ └──────────────┬──────────────┘
// │ │
// └────────────────┬─────────────────┘
// │
// ┌──────────────┴──────────────┐
// │ newPrefetchStateReader │ (Async I/O)
// │ (Access List Hint driven) │
// └──────────────┬──────────────┘
// │
// ┌──────────────┴──────────────┐
// │ Base Reader │ (State Root)
// │ (State & Contract Code) │
// └─────────────────────────────┘
import (
"github.com/ethereum/go-ethereum/crypto"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
)
type fetchTask struct {
addr common.Address
slots []common.Hash
}
func (t *fetchTask) weight() int { return 1 + len(t.slots) }
type prefetchStateReader struct {
StateReader
tasks []*fetchTask
nThreads int
done chan struct{}
term chan struct{}
closeOnce sync.Once
}
func newPrefetchStateReader(reader StateReader, accessList bal.StorageKeys, nThreads int) *prefetchStateReader {
tasks := make([]*fetchTask, 0, len(accessList))
for addr, slots := range accessList {
tasks = append(tasks, &fetchTask{
addr: addr,
slots: slots,
})
}
return newPrefetchStateReaderInternal(reader, tasks, nThreads)
}
func newPrefetchStateReaderInternal(reader StateReader, tasks []*fetchTask, nThreads int) *prefetchStateReader {
r := &prefetchStateReader{
StateReader: reader,
tasks: tasks,
nThreads: nThreads,
done: make(chan struct{}),
term: make(chan struct{}),
}
go r.prefetch()
return r
}
func (r *prefetchStateReader) Close() {
r.closeOnce.Do(func() {
close(r.term)
<-r.done
})
}
func (r *prefetchStateReader) Wait() error {
select {
case <-r.term:
return nil
case <-r.done:
return nil
}
}
func (r *prefetchStateReader) prefetch() {
defer close(r.done)
if len(r.tasks) == 0 {
return
}
var total int
for _, t := range r.tasks {
total += t.weight()
}
var (
wg sync.WaitGroup
unit = (total + r.nThreads - 1) / r.nThreads // round-up the per worker unit
)
for i := 0; i < r.nThreads; i++ {
start := i * unit
if start >= total {
break
}
limit := (i + 1) * unit
if i == r.nThreads-1 {
limit = total
}
// Schedule the worker for prefetching, the items on the range [start, limit)
// is exclusively assigned for this worker.
wg.Add(1)
go func(workerID, startW, endW int) {
r.process(startW, endW)
wg.Done()
}(i, start, limit)
}
wg.Wait()
}
func (r *prefetchStateReader) process(start, limit int) {
var total = 0
for _, t := range r.tasks {
tw := t.weight()
if total+tw > start {
s := 0
if start > total {
s = start - total
}
l := tw
if limit < total+tw {
l = limit - total
}
for j := s; j < l; j++ {
select {
case <-r.term:
return
default:
if j == 0 {
r.StateReader.Account(t.addr)
} else {
r.StateReader.Storage(t.addr, t.slots[j-1])
}
}
}
}
total += tw
if total >= limit {
return
}
}
}
// ReaderWithBlockLevelAccessList provides state access that reflects the
// pre-transition state combined with the mutations made by transactions
// prior to TxIndex.
type ReaderWithBlockLevelAccessList struct {
Reader
AccessList bal.AccessListReader
TxIndex int
}
func NewReaderWithBlockLevelAccessList(base Reader, accessList bal.BlockAccessList, txIndex int) *ReaderWithBlockLevelAccessList {
return &ReaderWithBlockLevelAccessList{
Reader: base,
AccessList: bal.NewAccessListReader(accessList),
TxIndex: txIndex,
}
}
// Account implements Reader, returning the account with the specific address.
func (r *ReaderWithBlockLevelAccessList) Account(addr common.Address) (acct *types.StateAccount, err error) {
acct, err = r.Reader.Account(addr)
if err != nil {
return nil, err
}
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut == nil {
return
}
if acct == nil {
acct = types.NewEmptyStateAccount()
} else {
// the account returned by the underlying reader is a reference
// copy it to avoid mutating the reader's instance
acct = acct.Copy()
}
if mut.Balance != nil {
acct.Balance = mut.Balance
}
if mut.Code != nil {
codeHash := crypto.Keccak256Hash(mut.Code)
acct.CodeHash = codeHash[:]
}
if mut.Nonce != nil {
acct.Nonce = *mut.Nonce
}
return
}
// Storage implements Reader, returning the storage slot with the specific
// address and slot key.
func (r *ReaderWithBlockLevelAccessList) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
val := r.AccessList.Storage(addr, slot, r.TxIndex)
if val != nil {
return *val, nil
}
return r.Reader.Storage(addr, slot)
}
// Has implements Reader, returning the flag indicating whether the contract
// code with specified address and hash exists or not.
func (r *ReaderWithBlockLevelAccessList) Has(addr common.Address, codeHash common.Hash) bool {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil {
return crypto.Keccak256Hash(mut.Code) == codeHash
}
return r.Reader.Has(addr, codeHash)
}
// Code implements Reader, returning the contract code with specified address
// and hash.
func (r *ReaderWithBlockLevelAccessList) Code(addr common.Address, codeHash common.Hash) ([]byte, error) {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil && crypto.Keccak256Hash(mut.Code) == codeHash {
// TODO: need to copy here?
return mut.Code, nil
}
return r.Reader.Code(addr, codeHash)
}
// CodeSize implements Reader, returning the contract code size with specified
// address and hash.
func (r *ReaderWithBlockLevelAccessList) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
mut := r.AccessList.AccountMutations(addr, r.TxIndex)
if mut != nil && mut.Code != nil && crypto.Keccak256Hash(mut.Code) == codeHash {
return len(mut.Code), nil
}
return r.Reader.CodeSize(addr, codeHash)
}
// StateReaderTracker defines the capability to retrieve the access footprint
// recorded during state reading operations.
type StateReaderTracker interface {
GetStateAccessList() bal.StateAccesses
}
func NewReaderWithTracker(r Reader) Reader {
return newReaderTracker(r)
}
type readerTracker struct {
Reader
access bal.StateAccesses
}
func newReaderTracker(reader Reader) *readerTracker {
return &readerTracker{
Reader: reader,
access: make(bal.StateAccesses),
}
}
// Account implements StateReader, tracking the accessed address locally.
func (r *readerTracker) Account(addr common.Address) (*types.StateAccount, error) {
_, exists := r.access[addr]
if !exists {
r.access[addr] = make(bal.StorageAccessList)
}
return r.Reader.Account(addr)
}
// Storage implements StateReader, tracking the accessed slot identifier locally.
func (r *readerTracker) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
list, exists := r.access[addr]
if !exists {
list = make(bal.StorageAccessList)
r.access[addr] = list
}
list[slot] = struct{}{}
return r.Reader.Storage(addr, slot)
}
// GetStateAccessList implements StateReaderTracker, returning the access footprint.
func (r *readerTracker) GetStateAccessList() bal.StateAccesses {
return r.access
}

View file

@ -0,0 +1,201 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"fmt"
"maps"
"math/rand"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/testrand"
)
type countingStateReader struct {
accounts map[common.Address]int
storages map[common.Address]map[common.Hash]int
lock sync.Mutex
}
func newRefStateReader() *countingStateReader {
return &countingStateReader{
accounts: make(map[common.Address]int),
storages: make(map[common.Address]map[common.Hash]int),
}
}
func (r *countingStateReader) validate(total int) error {
var sum int
for addr, n := range r.accounts {
if n != 1 {
return fmt.Errorf("duplicated account access: %x-%d", addr, n)
}
sum += 1
slots, exists := r.storages[addr]
if !exists {
continue
}
for key, n := range slots {
if n != 1 {
return fmt.Errorf("duplicated storage access: %x-%x-%d", addr, key, n)
}
sum += 1
}
}
for addr := range r.storages {
_, exists := r.accounts[addr]
if !exists {
return fmt.Errorf("dangling storage access: %x", addr)
}
}
if sum != total {
return fmt.Errorf("unexpected number of access, want: %d, got: %d", total, sum)
}
return nil
}
func (r *countingStateReader) Account(addr common.Address) (*types.StateAccount, error) {
r.lock.Lock()
defer r.lock.Unlock()
r.accounts[addr] += 1
return nil, nil
}
func (r *countingStateReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
r.lock.Lock()
defer r.lock.Unlock()
slots, exists := r.storages[addr]
if !exists {
slots = make(map[common.Hash]int)
r.storages[addr] = slots
}
slots[slot] += 1
return common.Hash{}, nil
}
func makeFetchTasks(n int) ([]*fetchTask, int) {
var (
total int
tasks []*fetchTask
)
for i := 0; i < n; i++ {
var slots []common.Hash
if rand.Intn(3) != 0 {
for j := 0; j < rand.Intn(100); j++ {
slots = append(slots, testrand.Hash())
}
}
tasks = append(tasks, &fetchTask{
addr: testrand.Address(),
slots: slots,
})
total += len(slots) + 1
}
return tasks, total
}
func TestPrefetchReader(t *testing.T) {
type suite struct {
tasks []*fetchTask
threads int
total int
}
var suites []suite
for i := 0; i < 100; i++ {
tasks, total := makeFetchTasks(100)
suites = append(suites, suite{
tasks: tasks,
threads: rand.Intn(30) + 1,
total: total,
})
}
// num(tasks) < num(threads)
tasks, total := makeFetchTasks(1)
suites = append(suites, suite{
tasks: tasks,
threads: 100,
total: total,
})
for _, s := range suites {
r := newRefStateReader()
pr := newPrefetchStateReaderInternal(r, s.tasks, s.threads)
pr.Wait()
if err := r.validate(s.total); err != nil {
t.Fatal(err)
}
}
}
func makeFakeSlots(n int) map[common.Hash]struct{} {
slots := make(map[common.Hash]struct{})
for i := 0; i < n; i++ {
slots[testrand.Hash()] = struct{}{}
}
return slots
}
type noopStateReader struct{}
func (r *noopStateReader) Account(addr common.Address) (*types.StateAccount, error) { return nil, nil }
func (r *noopStateReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
return common.Hash{}, nil
}
type noopCodeReader struct{}
func (r *noopCodeReader) Has(addr common.Address, codeHash common.Hash) bool { return false }
func (r *noopCodeReader) Code(addr common.Address, codeHash common.Hash) ([]byte, error) {
return nil, nil
}
func (r *noopCodeReader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) {
return 0, nil
}
func TestReaderWithTracker(t *testing.T) {
var r Reader = newReaderTracker(newReader(&noopCodeReader{}, &noopStateReader{}))
accesses := map[common.Address]map[common.Hash]struct{}{
testrand.Address(): makeFakeSlots(10),
testrand.Address(): makeFakeSlots(0),
}
for addr, slots := range accesses {
r.Account(addr)
for slot := range slots {
r.Storage(addr, slot)
}
}
got := r.(StateReaderTracker).GetStateAccessList()
if len(got) != len(accesses) {
t.Fatalf("Unexpected access list, want: %d, got: %d", len(accesses), len(got))
}
for addr, slots := range got {
entry, ok := accesses[addr]
if !ok {
t.Fatal("Unexpected access list")
}
if !maps.Equal(slots, entry) {
t.Fatal("Unexpected slots")
}
}
}

View file

@ -19,6 +19,7 @@ package state
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/core/types/bal"
"maps" "maps"
"slices" "slices"
"time" "time"
@ -54,6 +55,9 @@ type stateObject struct {
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
data types.StateAccount // Account data with all mutations applied in the scope of block data types.StateAccount // Account data with all mutations applied in the scope of block
txPreBalance *uint256.Int // the account balance after the last call to finalise
txPreNonce uint64 // the account nonce after the last call to finalise
// Write caches. // Write caches.
trie Trie // storage trie, which becomes non-nil on first access trie Trie // storage trie, which becomes non-nil on first access
code []byte // contract bytecode, which gets set when code is loaded code []byte // contract bytecode, which gets set when code is loaded
@ -76,6 +80,9 @@ type stateObject struct {
// Cache flags. // Cache flags.
dirtyCode bool // true if the code was updated dirtyCode bool // true if the code was updated
nonFinalizedCode bool // true if the code has been changed in the current transaction
txPrestateCode []byte // set to the value of the code at the beginning of the transaction if it changed in the current transaction
// Flag whether the account was marked as self-destructed. The self-destructed // Flag whether the account was marked as self-destructed. The self-destructed
// account is still accessible in the scope of same transaction. // account is still accessible in the scope of same transaction.
selfDestructed bool selfDestructed bool
@ -85,6 +92,8 @@ type stateObject struct {
// the contract is just created within the current transaction, or when the // the contract is just created within the current transaction, or when the
// object was previously existent and is being deployed as a contract within // object was previously existent and is being deployed as a contract within
// the current transaction. // the current transaction.
//
// the flag is set upon beginning of contract initcode execution, not when the code is actually deployed to the address.
newContract bool newContract bool
} }
@ -104,6 +113,8 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s
address: address, address: address,
origin: origin, origin: origin,
data: *acct, data: *acct,
txPreBalance: acct.Balance.Clone(),
txPreNonce: acct.Nonce,
originStorage: make(Storage), originStorage: make(Storage),
dirtyStorage: make(Storage), dirtyStorage: make(Storage),
pendingStorage: make(Storage), pendingStorage: make(Storage),
@ -185,6 +196,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
if value, pending := s.pendingStorage[key]; pending { if value, pending := s.pendingStorage[key]; pending {
return value return value
} }
if value, cached := s.originStorage[key]; cached { if value, cached := s.originStorage[key]; cached {
return value return value
} }
@ -240,6 +252,7 @@ func (s *stateObject) SetState(key, value common.Hash) common.Hash {
if prev == value { if prev == value {
return prev return prev
} }
// New value is different, update and journal the change // New value is different, update and journal the change
s.db.journal.storageChange(s.address, key, prev, origin) s.db.journal.storageChange(s.address, key, prev, origin)
s.setState(key, value, origin) s.setState(key, value, origin)
@ -259,22 +272,64 @@ func (s *stateObject) setState(key common.Hash, value common.Hash, origin common
// finalise moves all dirty storage slots into the pending area to be hashed or // finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction. // committed later. It is invoked at the end of every transaction.
func (s *stateObject) finalise() { func (s *stateObject) finalise() (mut *bal.AccountMutations) {
mut = &bal.AccountMutations{}
if s.Balance().Cmp(s.txPreBalance) != 0 {
mut.Balance = s.Balance()
}
if s.Nonce() != s.txPreNonce {
mut.Nonce = new(uint64)
*mut.Nonce = s.Nonce()
}
// include account code changes: created contracts and 7702 delegation authority code changes
if s.nonFinalizedCode {
if s.code == nil {
// code cleared (7702). code must be non-nil in the post to signal that it's part of the diff vs being unchanged.
mut.Code = []byte{}
} else {
mut.Code = s.code
}
}
mut.StorageWrites = make(map[common.Hash]common.Hash)
slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage)) slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage))
for key, value := range s.dirtyStorage { for key, value := range s.dirtyStorage {
if origin, exist := s.uncommittedStorage[key]; exist && origin == value { if origin, exist := s.uncommittedStorage[key]; exist && origin == value {
// non-parallel-execution:
// The slot is reverted to its original value, delete the entry // The slot is reverted to its original value, delete the entry
// to avoid thrashing the data structures. // to avoid thrashing the data structures.
//
// parallel-exec-with-BAL:
// each statedb instance only executes a single transaction so the previous value
// of the slot won't be in uncommittedStorage
txPrestateVal := s.GetCommittedState(key)
if txPrestateVal != value {
mut.StorageWrites[key] = value
}
delete(s.uncommittedStorage, key) delete(s.uncommittedStorage, key)
} else if exist { } else if exist {
// non-parallel-execution:
// The slot is modified to another value and the slot has been // The slot is modified to another value and the slot has been
// tracked for commit, do nothing here. // tracked for commit in uncommittedStorage.
//
// parallel-exec-with-BAL:
// each statedb instance only executes a single transaction so the previous value
// of the slot won't be in uncommittedStorage
mut.StorageWrites[key] = value
} else { } else {
// The slot is different from its original value and hasn't been // The slot is different from its original value and hasn't been
// tracked for commit yet. // tracked for commit yet.
s.uncommittedStorage[key] = s.GetCommittedState(key) // Whether executing parallel with BAL or not, the value of the slot before the execution
// of the current transaction is in originStorage
origin := s.GetCommittedState(key)
if value != origin {
mut.StorageWrites[key] = value
}
s.uncommittedStorage[key] = origin
slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure
} }
// Aggregate the dirty storage slots into the pending area. It might // Aggregate the dirty storage slots into the pending area. It might
// be possible that the value of tracked slot here is same with the // be possible that the value of tracked slot here is same with the
// one in originStorage (e.g. the slot was modified in tx_a and then // one in originStorage (e.g. the slot was modified in tx_a and then
@ -283,6 +338,7 @@ func (s *stateObject) finalise() {
// byzantium fork) and entry is necessary to modify the value back. // byzantium fork) and entry is necessary to modify the value back.
s.pendingStorage[key] = value s.pendingStorage[key] = value
} }
if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
if err := s.db.prefetcher.prefetch(s.addrHash(), s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil { if err := s.db.prefetcher.prefetch(s.addrHash(), s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil {
log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err)
@ -295,6 +351,18 @@ func (s *stateObject) finalise() {
// of the newly-created object as it's no longer eligible for self-destruct // of the newly-created object as it's no longer eligible for self-destruct
// by EIP-6780. For non-newly-created objects, it's a no-op. // by EIP-6780. For non-newly-created objects, it's a no-op.
s.newContract = false s.newContract = false
s.nonFinalizedCode = false
s.txPrestateCode = nil
// TODO: I had a bug here where i would set both of these to the value of s.data.* and there were no test failures. need to figure out why.
s.txPreBalance = s.Balance().Clone()
s.txPreNonce = s.Nonce()
if mut.Nonce == nil && mut.Code == nil && mut.Balance == nil && len(mut.StorageWrites) == 0 {
return nil
}
return mut
} }
// updateTrie is responsible for persisting cached storage changes into the // updateTrie is responsible for persisting cached storage changes into the
@ -314,6 +382,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
return s.trie, nil return s.trie, nil
} }
} }
// Retrieve a pretecher populated trie, or fall back to the database. This will // Retrieve a pretecher populated trie, or fall back to the database. This will
// block until all prefetch tasks are done, which are needed for witnesses even // block until all prefetch tasks are done, which are needed for witnesses even
// for unmodified state objects. // for unmodified state objects.
@ -347,6 +416,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
var ( var (
deletions []common.Hash deletions []common.Hash
used = make([]common.Hash, 0, len(s.uncommittedStorage)) used = make([]common.Hash, 0, len(s.uncommittedStorage))
updateKeys [][]byte
updateValues [][]byte
) )
for key, origin := range s.uncommittedStorage { for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes // Skip noop changes, persist actual changes
@ -360,10 +431,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue continue
} }
if (value != common.Hash{}) { if (value != common.Hash{}) {
if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil { updateKeys = append(updateKeys, key[:])
s.db.setError(err) updateValues = append(updateValues, common.TrimLeftZeroes(value[:]))
return nil, err
}
s.db.StorageUpdated.Add(1) s.db.StorageUpdated.Add(1)
} else { } else {
deletions = append(deletions, key) deletions = append(deletions, key)
@ -371,6 +440,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading // Cache the items for preloading
used = append(used, key) // Copy needed for closure used = append(used, key) // Copy needed for closure
} }
if len(updateKeys) > 0 {
if err := tr.UpdateStorageBatch(common.Address{}, updateKeys, updateValues); err != nil {
s.db.setError(err)
return nil, err
}
}
for _, key := range deletions { for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil { if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err) s.db.setError(err)
@ -530,6 +605,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
dirtyCode: s.dirtyCode, dirtyCode: s.dirtyCode,
selfDestructed: s.selfDestructed, selfDestructed: s.selfDestructed,
newContract: s.newContract, newContract: s.newContract,
txPreBalance: s.txPreBalance.Clone(),
txPreNonce: s.txPreNonce,
} }
switch s.trie.(type) { switch s.trie.(type) {
@ -606,13 +683,25 @@ func (s *stateObject) SetCode(codeHash common.Hash, code []byte) (prev []byte) {
prev = slices.Clone(s.code) prev = slices.Clone(s.code)
s.db.journal.setCode(s.address, prev) s.db.journal.setCode(s.address, prev)
s.setCode(codeHash, code) s.setCode(codeHash, code)
if s.txPrestateCode == nil {
if prev == nil {
prev = []byte{}
}
s.txPrestateCode = prev
}
if !bytes.Equal(code, s.txPrestateCode) {
s.dirtyCode = true
s.nonFinalizedCode = true
} else {
s.nonFinalizedCode = false
}
return prev return prev
} }
func (s *stateObject) setCode(codeHash common.Hash, code []byte) { func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code s.code = code
s.data.CodeHash = codeHash[:] s.data.CodeHash = codeHash[:]
s.dirtyCode = true
} }
func (s *stateObject) SetNonce(nonce uint64) { func (s *stateObject) SetNonce(nonce uint64) {

View file

@ -20,6 +20,7 @@ package state
import ( import (
"errors" "errors"
"fmt" "fmt"
"iter"
"maps" "maps"
"slices" "slices"
"sort" "sort"
@ -27,6 +28,9 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/stateless"
@ -64,6 +68,14 @@ func (m *mutation) isDelete() bool {
return m.typ == deletion return m.typ == deletion
} }
type BlockStateTransition interface {
CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error)
Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error)
IntermediateRoot(deleteEmpty bool) common.Hash
Error() error
Preimages() map[common.Hash][]byte
}
// StateDB structs within the ethereum protocol are used to store anything // StateDB structs within the ethereum protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing // within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve: // nested states. It's the general query interface to retrieve:
@ -117,6 +129,13 @@ type StateDB struct {
// The tx context and all occurred logs in the scope of transaction. // The tx context and all occurred logs in the scope of transaction.
thash common.Hash thash common.Hash
txIndex int txIndex int
// block access list modifications will be recorded with this index.
// 0 - state access before transaction execution
// 1 -> len(block txs) - state access of each transaction
// len(block txs) + 1 - state access after transaction execution.
balIndex int
logs map[common.Hash][]*types.Log logs map[common.Hash][]*types.Log
logSize uint logSize uint
@ -198,6 +217,13 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
return sdb, nil return sdb, nil
} }
// WithReader returns a copy of the statedb instance with the specified reader.
func (s *StateDB) WithReader(reader Reader) *StateDB {
cpy := s.Copy()
cpy.reader = reader
return cpy
}
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the // state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot. // commit phase, most of the needed data is already hot.
@ -313,6 +339,11 @@ func (s *StateDB) Exist(addr common.Address) bool {
return s.getStateObject(addr) != nil return s.getStateObject(addr) != nil
} }
func (s *StateDB) ExistBeforeCurTx(addr common.Address) bool {
obj := s.getStateObject(addr)
return obj != nil && !obj.newContract
}
// Empty returns whether the state object is either non-existent // Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0) // or empty according to the EIP161 specification (balance = nonce = code = 0)
func (s *StateDB) Empty(addr common.Address) bool { func (s *StateDB) Empty(addr common.Address) bool {
@ -568,6 +599,25 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
} }
} }
func (s *StateDB) updateStateObjects(objs []*stateObject) {
var addrs []common.Address
var accts []*types.StateAccount
for _, obj := range objs {
addrs = append(addrs, obj.Address())
accts = append(accts, &obj.data)
}
if err := s.trie.UpdateAccountBatch(addrs, accts, nil); err != nil {
s.setError(fmt.Errorf("updateStateObjects error: %v", err))
}
for _, obj := range objs {
if obj.dirtyCode {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
}
// deleteStateObject removes the given object from the state trie. // deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(addr common.Address) { func (s *StateDB) deleteStateObject(addr common.Address) {
@ -587,6 +637,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok { if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil return nil
} }
s.AccountLoaded++ s.AccountLoaded++
start := time.Now() start := time.Now()
@ -623,6 +674,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
if obj == nil { if obj == nil {
obj = s.createObject(addr) obj = s.createObject(addr)
} }
return obj return obj
} }
@ -681,6 +733,7 @@ func (s *StateDB) Copy() *StateDB {
refund: s.refund, refund: s.refund,
thash: s.thash, thash: s.thash,
txIndex: s.txIndex, txIndex: s.txIndex,
balIndex: s.txIndex,
logs: make(map[common.Hash][]*types.Log, len(s.logs)), logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize, logSize: s.logSize,
preimages: maps.Clone(s.preimages), preimages: maps.Clone(s.preimages),
@ -781,8 +834,10 @@ func (s *StateDB) EmitLogsForBurnAccounts() {
// Finalise finalises the state by removing the destructed objects and clears // Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates // the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that. // into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) { func (s *StateDB) Finalise(deleteEmptyObjects bool) (mutations bal.StateMutations) {
addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties)) addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties))
mutations = make(bal.StateMutations)
for addr := range s.journal.dirties { for addr := range s.journal.dirties {
obj, exist := s.stateObjects[addr] obj, exist := s.stateObjects[addr]
if !exist { if !exist {
@ -803,8 +858,19 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
if _, ok := s.stateObjectsDestruct[obj.address]; !ok { if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
s.stateObjectsDestruct[obj.address] = obj s.stateObjectsDestruct[obj.address] = obj
} }
// a pre-existing account can only be removed from the state under the following circumstance:
// it had a balance and was the target of a create2 which selfdestructed in the initcode
if !obj.txPreBalance.IsZero() {
mutations[addr] = bal.AccountMutations{
Balance: uint256.NewInt(0),
}
}
} else { } else {
obj.finalise() mut := obj.finalise()
if mut != nil {
mutations[addr] = *mut
}
s.markUpdate(addr) s.markUpdate(addr)
} }
// At this point, also ship the address off to the precacher. The precacher // At this point, also ship the address off to the precacher. The precacher
@ -819,6 +885,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
} }
// Invalidate journal because reverting across transactions is not allowed. // Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund() s.clearJournalAndRefund()
return mutations
} }
// IntermediateRoot computes the current root hash of the state trie. // IntermediateRoot computes the current root hash of the state trie.
@ -999,6 +1066,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
var ( var (
usedAddrs []common.Address usedAddrs []common.Address
deletedAddrs []common.Address deletedAddrs []common.Address
updatedObjs []*stateObject
) )
for addr, op := range s.mutations { for addr, op := range s.mutations {
if op.applied { if op.applied {
@ -1010,7 +1078,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
deletedAddrs = append(deletedAddrs, addr) deletedAddrs = append(deletedAddrs, addr)
} else { } else {
obj := s.stateObjects[addr] obj := s.stateObjects[addr]
s.updateStateObject(obj) updatedObjs = append(updatedObjs, obj)
s.AccountUpdated += 1 s.AccountUpdated += 1
// Count code writes post-Finalise so reverted CREATEs are excluded. // Count code writes post-Finalise so reverted CREATEs are excluded.
@ -1021,6 +1089,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
} }
usedAddrs = append(usedAddrs, addr) // Copy needed for closure usedAddrs = append(usedAddrs, addr) // Copy needed for closure
} }
if len(updatedObjs) > 0 {
s.updateStateObjects(updatedObjs)
}
for _, deletedAddr := range deletedAddrs { for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr) s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1 s.AccountDeleted += 1
@ -1032,9 +1103,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
} }
// Track the amount of time wasted on hashing the account trie // Track the amount of time wasted on hashing the account trie
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
hash := s.trie.Hash() hash := s.trie.Hash()
// If witness building is enabled, gather the account trie witness // If witness building is enabled, gather the account trie witness
if s.witness != nil { if s.witness != nil {
witness := s.trie.Witness() witness := s.trie.Witness()
@ -1043,6 +1112,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witnessStats.Add(witness, common.Hash{}) s.witnessStats.Add(witness, common.Hash{})
} }
} }
return hash return hash
} }
@ -1052,6 +1122,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) { func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash s.thash = thash
s.txIndex = ti s.txIndex = ti
s.balIndex = ti + 1
}
// SetAccessListIndex sets the current index that state mutations will
// be reported as in the BAL. It is only relevant if this StateDB instance
// is being used in the BAL construction path.
func (s *StateDB) SetAccessListIndex(idx int) {
s.balIndex = idx
} }
func (s *StateDB) clearJournalAndRefund() { func (s *StateDB) clearJournalAndRefund() {
@ -1063,8 +1141,8 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast // of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating // storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots. // stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) { func fastDeleteStorage(originalRoot common.Hash, snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) iter, err := snaps.StorageIterator(originalRoot, addrHash, common.Hash{})
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -1103,8 +1181,8 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," // slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the // employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly. // storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) { func slowDeleteStorage(db Database, trie Trie, originalRoot common.Hash, addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) tr, err := db.OpenStorageTrie(originalRoot, addr, root, trie)
if err != nil { if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) return nil, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
} }
@ -1139,7 +1217,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
// The function will make an attempt to utilize an efficient strategy if the // The function will make an attempt to utilize an efficient strategy if the
// associated state snapshot is reachable; otherwise, it will resort to a less // associated state snapshot is reachable; otherwise, it will resort to a less
// efficient approach. // efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) { func deleteStorage(db Database, trie Trie, addr common.Address, addrHash common.Hash, root, originalRoot common.Hash) (map[common.Hash][]byte, map[common.Hash][]byte, *trienode.NodeSet, error) {
var ( var (
err error err error
nodes *trienode.NodeSet // the set for trie node mutations (value is nil) nodes *trienode.NodeSet // the set for trie node mutations (value is nil)
@ -1149,12 +1227,12 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// The fast approach can be failed if the snapshot is not fully // The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow // generated, or it's internally corrupted. Fallback to the slow
// one just in case. // one just in case.
snaps := s.db.Snapshot() snaps := db.Snapshot()
if snaps != nil { if snaps != nil {
storages, storageOrigins, nodes, err = s.fastDeleteStorage(snaps, addrHash, root) storages, storageOrigins, nodes, err = fastDeleteStorage(originalRoot, snaps, addrHash, root)
} }
if snaps == nil || err != nil { if snaps == nil || err != nil {
storages, storageOrigins, nodes, err = s.slowDeleteStorage(addr, addrHash, root) storages, storageOrigins, nodes, err = slowDeleteStorage(db, trie, originalRoot, addr, addrHash, root)
} }
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
@ -1180,39 +1258,38 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value. // with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted, // In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value. // with their values be tracked as original value.
func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) { func handleDestruction(db Database, trie Trie, noStorageWiping bool, destructions iter.Seq[common.Address], prestates map[common.Address]*types.StateAccount) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var ( var (
nodes []*trienode.NodeSet nodes []*trienode.NodeSet
deletes = make(map[common.Hash]*accountDelete) deletes = make(map[common.Hash]*accountDelete)
) )
for addr, prevObj := range s.stateObjectsDestruct { for addr := range destructions {
prev := prevObj.origin prestate := prestates[addr]
// The account was non-existent, and it's marked as destructed in the scope // The account was non-existent, and it's marked as destructed in the scope
// of block. It can be either case (a) or (b) and will be interpreted as // of block. It can be either case (a) or (b) and will be interpreted as
// null->null state transition. // null->null state transition.
// - for (a), skip it without doing anything // - for (a), skip it without doing anything
// - for (b), the resurrected account with nil as original will be handled afterwards // - for (b), the resurrected account with nil as original will be handled afterwards
if prev == nil { if prestate == nil {
continue continue
} }
// The account was existent, it can be either case (c) or (d). // The account was existent, it can be either case (c) or (d).
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
op := &accountDelete{ op := &accountDelete{
address: addr, address: addr,
origin: types.SlimAccountRLP(*prev), origin: types.SlimAccountRLP(*prestate),
} }
deletes[addrHash] = op deletes[addrHash] = op
// Short circuit if the origin storage was empty. // Short circuit if the origin storage was empty.
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() { if prestate.Root == types.EmptyRootHash || db.TrieDB().IsVerkle() {
continue continue
} }
if noStorageWiping { if noStorageWiping {
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr) return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
} }
// Remove storage slots belonging to the account. // Remove storage slots belonging to the account.
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root) storages, storagesOrigin, set, err := deleteStorage(db, trie, addr, addrHash, prestate.Root, prestate.Root)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err) return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err)
} }
@ -1237,6 +1314,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
if s.dbErr != nil { if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
} }
// Finalize any pending changes and merge everything into the tries // Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects) s.IntermediateRoot(deleteEmptyObjects)
@ -1286,7 +1364,12 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
// the same block, account deletions must be processed first. This ensures // the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated // that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly. // during subsequent resurrection can be combined correctly.
deletes, delNodes, err := s.handleDestruction(noStorageWiping) var stateAccountsDestruct, destructAccountsOrigins = make(map[common.Address]*types.StateAccount), make(map[common.Address]*types.StateAccount)
for addr, obj := range s.stateObjectsDestruct {
stateAccountsDestruct[addr] = &obj.data
destructAccountsOrigins[addr] = obj.origin
}
deletes, delNodes, err := handleDestruction(s.db, s.trie, noStorageWiping, maps.Keys(stateAccountsDestruct), destructAccountsOrigins)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1387,6 +1470,44 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil
} }
func flushStateUpdate(d Database, block uint64, update *stateUpdate) (snapshotCommits, trieDBCommits time.Duration, err error) {
if db := d.TrieDB().Disk(); db != nil && len(update.codes) > 0 {
batch := db.NewBatch()
for _, code := range update.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
}
if err := batch.Write(); err != nil {
return 0, 0, err
}
}
if !update.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := d.Snapshot(); snap != nil && snap.Snapshot(update.originRoot) != nil {
start := time.Now()
if err := snap.Update(update.root, update.originRoot, update.accounts, update.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", update.originRoot, "to", update.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
if err := snap.Cap(update.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", update.root, "layers", TriesInMemory, "err", err)
}
snapshotCommits += time.Since(start)
}
// If trie database is enabled, commit the state update as a new layer
if db := d.TrieDB(); db != nil {
start := time.Now()
if err := db.Update(update.root, update.originRoot, block, update.nodes, update.stateSet()); err != nil {
return 0, 0, err
}
trieDBCommits += time.Since(start)
}
}
return snapshotCommits, trieDBCommits, nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations // commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores. // to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, deriveCodeFields bool) (*stateUpdate, error) { func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, deriveCodeFields bool) (*stateUpdate, error) {
@ -1394,6 +1515,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
if err != nil { if err != nil {
return nil, err return nil, err
} }
// TODO: move this check inside flushStateUpdate?
if deriveCodeFields { if deriveCodeFields {
if err := ret.deriveCodeFields(s.reader); err != nil { if err := ret.deriveCodeFields(s.reader); err != nil {
return nil, err return nil, err

View file

@ -21,6 +21,8 @@ import (
"math/big" "math/big"
"sort" "sort"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
@ -59,22 +61,37 @@ func (s *hookedStateDB) IsNewContract(addr common.Address) bool {
} }
func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int { func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetBalance(addr) return s.inner.GetBalance(addr)
} }
func (s *hookedStateDB) GetNonce(addr common.Address) uint64 { func (s *hookedStateDB) GetNonce(addr common.Address) uint64 {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetNonce(addr) return s.inner.GetNonce(addr)
} }
func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash { func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeHash(addr) return s.inner.GetCodeHash(addr)
} }
func (s *hookedStateDB) GetCode(addr common.Address) []byte { func (s *hookedStateDB) GetCode(addr common.Address) []byte {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCode(addr) return s.inner.GetCode(addr)
} }
func (s *hookedStateDB) GetCodeSize(addr common.Address) int { func (s *hookedStateDB) GetCodeSize(addr common.Address) int {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetCodeSize(addr) return s.inner.GetCodeSize(addr)
} }
@ -91,14 +108,23 @@ func (s *hookedStateDB) GetRefund() uint64 {
} }
func (s *hookedStateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) { func (s *hookedStateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetStateAndCommittedState(addr, hash) return s.inner.GetStateAndCommittedState(addr, hash)
} }
func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
if s.hooks.OnStorageRead != nil {
s.hooks.OnStorageRead(addr, hash)
}
return s.inner.GetState(addr, hash) return s.inner.GetState(addr, hash)
} }
func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash { func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.GetStorageRoot(addr) return s.inner.GetStorageRoot(addr)
} }
@ -111,14 +137,23 @@ func (s *hookedStateDB) SetTransientState(addr common.Address, key, value common
} }
func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool { func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.HasSelfDestructed(addr) return s.inner.HasSelfDestructed(addr)
} }
func (s *hookedStateDB) Exist(addr common.Address) bool { func (s *hookedStateDB) Exist(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Exist(addr) return s.inner.Exist(addr)
} }
func (s *hookedStateDB) Empty(addr common.Address) bool { func (s *hookedStateDB) Empty(addr common.Address) bool {
if s.hooks.OnAccountRead != nil {
s.hooks.OnAccountRead(addr)
}
return s.inner.Empty(addr) return s.inner.Empty(addr)
} }
@ -221,6 +256,10 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) {
s.inner.SelfDestruct(address) s.inner.SelfDestruct(address)
} }
func (s *hookedStateDB) ExistBeforeCurTx(addr common.Address) bool {
return s.inner.ExistBeforeCurTx(addr)
}
func (s *hookedStateDB) AddLog(log *types.Log) { func (s *hookedStateDB) AddLog(log *types.Log) {
// The inner will modify the log (add fields), so invoke that first // The inner will modify the log (add fields), so invoke that first
s.inner.AddLog(log) s.inner.AddLog(log)
@ -233,11 +272,10 @@ func (s *hookedStateDB) EmitLogsForBurnAccounts() {
s.inner.EmitLogsForBurnAccounts() s.inner.EmitLogsForBurnAccounts()
} }
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) { func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) bal.StateMutations {
if s.hooks.OnBalanceChange == nil && s.hooks.OnNonceChangeV2 == nil && s.hooks.OnNonceChange == nil && s.hooks.OnCodeChangeV2 == nil && s.hooks.OnCodeChange == nil { if s.hooks.OnBalanceChange == nil && s.hooks.OnNonceChangeV2 == nil && s.hooks.OnNonceChange == nil && s.hooks.OnCodeChangeV2 == nil && s.hooks.OnCodeChange == nil {
// Short circuit if no relevant hooks are set. // Short circuit if no relevant hooks are set.
s.inner.Finalise(deleteEmptyObjects) return s.inner.Finalise(deleteEmptyObjects)
return
} }
// Collect all self-destructed addresses first, then sort them to ensure // Collect all self-destructed addresses first, then sort them to ensure
@ -276,9 +314,7 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
// If an initcode invokes selfdestruct, do not emit a code change. // If an initcode invokes selfdestruct, do not emit a code change.
prevCodeHash := s.inner.GetCodeHash(addr) prevCodeHash := s.inner.GetCodeHash(addr)
if prevCodeHash == types.EmptyCodeHash { if prevCodeHash != types.EmptyCodeHash {
continue
}
// Otherwise, trace the change. // Otherwise, trace the change.
if s.hooks.OnCodeChangeV2 != nil { if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) s.hooks.OnCodeChangeV2(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
@ -287,5 +323,14 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
} }
} }
s.inner.Finalise(deleteEmptyObjects) if s.hooks.OnSelfDestructChange != nil {
s.hooks.OnSelfDestructChange(addr)
}
}
return s.inner.Finalise(deleteEmptyObjects)
}
func (s *hookedStateDB) TxIndex() int {
return s.inner.TxIndex()
} }

View file

@ -21,6 +21,8 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
@ -68,6 +70,8 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
blockNumber = block.Number() blockNumber = block.Number()
allLogs []*types.Log allLogs []*types.Log
gp = NewGasPool(block.GasLimit()) gp = NewGasPool(block.GasLimit())
computedAccessList = make(bal.ConstructionBlockAccessList)
isAmsterdam = p.chainConfig().IsAmsterdam(block.Number(), block.Time())
) )
var tracingStateDB = vm.StateDB(statedb) var tracingStateDB = vm.StateDB(statedb)
if hooks := cfg.Tracer; hooks != nil { if hooks := cfg.Tracer; hooks != nil {
@ -88,10 +92,16 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
evm := vm.NewEVM(context, tracingStateDB, config, cfg) evm := vm.NewEVM(context, tracingStateDB, config, cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm) mutations := ProcessBeaconBlockRoot(*beaconRoot, evm)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, 0)
}
} }
if config.IsPrague(block.Number(), block.Time()) || config.IsVerkle(block.Number(), block.Time()) { if config.IsPrague(block.Number(), block.Time()) || config.IsVerkle(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), evm) mutations := ProcessParentBlockHash(block.ParentHash(), evm)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, 0)
}
} }
// Iterate over and process the individual transactions // Iterate over and process the individual transactions
@ -105,61 +115,78 @@ func (p *StateProcessor) Process(ctx context.Context, block *types.Block, stated
telemetry.StringAttribute("tx.hash", tx.Hash().Hex()), telemetry.StringAttribute("tx.hash", tx.Hash().Hex()),
telemetry.Int64Attribute("tx.index", int64(i)), telemetry.Int64Attribute("tx.index", int64(i)),
) )
var (
receipt, err := ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, context.Time, tx, evm) receipt *types.Receipt
mutations bal.StateMutations
)
mutations, receipt, err = ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, context.Time, tx, evm)
spanEnd(&err) // TODO: I changed order of this so that it would be called when there was an error, check if that's a bug in upstream
if err != nil { if err != nil {
spanEnd(&err) spanEnd(&err)
return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
} }
receipts = append(receipts, receipt) receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...) allLogs = append(allLogs, receipt.Logs...)
spanEnd(nil)
if isAmsterdam {
computedAccessList.AccumulateMutations(mutations, uint16(i)+1)
} }
requests, err := postExecution(ctx, config, block, allLogs, evm) }
postMut, requests, err := postExecution(ctx, config, block, allLogs, evm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards) // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body()) eip4985WithdrawalMuts := p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
postMut.Merge(eip4985WithdrawalMuts)
if isAmsterdam {
computedAccessList.AccumulateMutations(postMut, uint16(len(block.Transactions()))+1)
accesses := statedb.Reader().(state.StateReaderTracker).GetStateAccessList()
computedAccessList.AccumulateReads(accesses)
}
return &ProcessResult{ return &ProcessResult{
Receipts: receipts, Receipts: receipts,
Requests: requests, Requests: requests,
Logs: allLogs, Logs: allLogs,
GasUsed: gp.Used(), GasUsed: gp.Used(),
AccessList: computedAccessList,
}, nil }, nil
} }
// postExecution processes the post-execution system calls if Prague is enabled. // postExecution processes the post-execution system calls if Prague is enabled.
func postExecution(ctx context.Context, config *params.ChainConfig, block *types.Block, allLogs []*types.Log, evm *vm.EVM) (requests [][]byte, err error) { func postExecution(ctx context.Context, config *params.ChainConfig, block *types.Block, allLogs []*types.Log, evm *vm.EVM) (mut bal.StateMutations, requests [][]byte, err error) {
_, _, spanEnd := telemetry.StartSpan(ctx, "core.postExecution") _, _, spanEnd := telemetry.StartSpan(ctx, "core.postExecution")
defer spanEnd(&err) defer spanEnd(&err)
mut = make(bal.StateMutations)
// Read requests if Prague is enabled. // Read requests if Prague is enabled.
if config.IsPrague(block.Number(), block.Time()) { if config.IsPrague(block.Number(), block.Time()) {
requests = [][]byte{} requests = [][]byte{}
// EIP-6110 // EIP-6110
if err := ParseDepositLogs(&requests, allLogs, config); err != nil { if err := ParseDepositLogs(&requests, allLogs, config); err != nil {
return requests, fmt.Errorf("failed to parse deposit logs: %w", err) return mut, requests, fmt.Errorf("failed to parse deposit logs: %w", err)
} }
// EIP-7002 // EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil { if mut, err = ProcessWithdrawalQueue(&requests, evm); err != nil {
return requests, fmt.Errorf("failed to process withdrawal queue: %w", err) return mut, requests, fmt.Errorf("failed to process withdrawal queue: %w", err)
} }
// EIP-7251 // EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil { consolidationMut, err := ProcessConsolidationQueue(&requests, evm)
return requests, fmt.Errorf("failed to process consolidation queue: %w", err) if err != nil {
return mut, requests, fmt.Errorf("failed to process consolidation queue: %w", err)
} }
mut.Merge(consolidationMut)
} }
return requests, nil return mut, requests, nil
} }
// ApplyTransactionWithEVM attempts to apply a transaction to the given state database // ApplyTransactionWithEVM attempts to apply a transaction to the given state database
// and uses the input parameters for its environment similar to ApplyTransaction. However, // and uses the input parameters for its environment similar to ApplyTransaction. However,
// this method takes an already created EVM instance as input. // this method takes an already created EVM instance as input.
func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, blockTime uint64, tx *types.Transaction, evm *vm.EVM) (receipt *types.Receipt, err error) { func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, blockTime uint64, tx *types.Transaction, evm *vm.EVM) (mutations bal.StateMutations, receipt *types.Receipt, err error) {
if hooks := evm.Config.Tracer; hooks != nil { if hooks := evm.Config.Tracer; hooks != nil {
if hooks.OnTxStart != nil { if hooks.OnTxStart != nil {
hooks.OnTxStart(evm.GetVMContext(), tx, msg.From) hooks.OnTxStart(evm.GetVMContext(), tx, msg.From)
@ -171,12 +198,12 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
// Apply the transaction to the current state (included in the env). // Apply the transaction to the current state (included in the env).
result, err := ApplyMessage(evm, msg, gp) result, err := ApplyMessage(evm, msg, gp)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
// Update the state with pending changes. // Update the state with pending changes.
var root []byte var root []byte
if evm.ChainConfig().IsByzantium(blockNumber) { if evm.ChainConfig().IsByzantium(blockNumber) {
evm.StateDB.Finalise(true) mutations = evm.StateDB.Finalise(true)
} else { } else {
root = statedb.IntermediateRoot(evm.ChainConfig().IsEIP158(blockNumber)).Bytes() root = statedb.IntermediateRoot(evm.ChainConfig().IsEIP158(blockNumber)).Bytes()
} }
@ -185,7 +212,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
if statedb.Database().TrieDB().IsVerkle() { if statedb.Database().TrieDB().IsVerkle() {
statedb.AccessEvents().Merge(evm.AccessEvents) statedb.AccessEvents().Merge(evm.AccessEvents)
} }
return MakeReceipt(evm, result, statedb, blockNumber, blockHash, blockTime, tx, gp.CumulativeUsed(), root), nil return mutations, MakeReceipt(evm, result, statedb, blockNumber, blockHash, blockTime, tx, gp.CumulativeUsed(), root), nil
} }
// MakeReceipt generates the receipt object for a transaction given its execution result. // MakeReceipt generates the receipt object for a transaction given its execution result.
@ -230,10 +257,10 @@ func MakeReceipt(evm *vm.EVM, result *ExecutionResult, statedb *state.StateDB, b
// and uses the input parameters for its environment. It returns the receipt // and uses the input parameters for its environment. It returns the receipt
// for the transaction and an error if the transaction failed, // for the transaction and an error if the transaction failed,
// indicating the block was invalid. // indicating the block was invalid.
func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction) (*types.Receipt, error) { func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction) (bal.StateMutations, *types.Receipt, error) {
msg, err := TransactionToMessage(tx, types.MakeSigner(evm.ChainConfig(), header.Number, header.Time), header.BaseFee) msg, err := TransactionToMessage(tx, types.MakeSigner(evm.ChainConfig(), header.Number, header.Time), header.BaseFee)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
// Create a new context to be used in the EVM environment // Create a new context to be used in the EVM environment
return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, evm) return ApplyTransactionWithEVM(msg, gp, statedb, header.Number, header.Hash(), header.Time, tx, evm)
@ -241,7 +268,7 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *
// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root
// contract. This method is exported to be used in tests. // contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) { func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) bal.StateMutations {
if tracer := evm.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext()) onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil { if tracer.OnSystemCallEnd != nil {
@ -260,12 +287,12 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) {
evm.SetTxContext(NewEVMTxContext(msg)) evm.SetTxContext(NewEVMTxContext(msg))
evm.StateDB.AddAddressToAccessList(params.BeaconRootsAddress) evm.StateDB.AddAddressToAccessList(params.BeaconRootsAddress)
_, _, _, _ = evm.Call(msg.From, *msg.To, msg.Data, vm.GasCosts{RegularGas: 30_000_000}, common.U2560) _, _, _, _ = evm.Call(msg.From, *msg.To, msg.Data, vm.GasCosts{RegularGas: 30_000_000}, common.U2560)
evm.StateDB.Finalise(true) return evm.StateDB.Finalise(true)
} }
// ProcessParentBlockHash stores the parent block hash in the history storage contract // ProcessParentBlockHash stores the parent block hash in the history storage contract
// as per EIP-2935/7709. // as per EIP-2935/7709.
func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) { func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) bal.StateMutations {
if tracer := evm.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext()) onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil { if tracer.OnSystemCallEnd != nil {
@ -290,22 +317,23 @@ func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
if evm.StateDB.AccessEvents() != nil { if evm.StateDB.AccessEvents() != nil {
evm.StateDB.AccessEvents().Merge(evm.AccessEvents) evm.StateDB.AccessEvents().Merge(evm.AccessEvents)
} }
evm.StateDB.Finalise(true) return evm.StateDB.Finalise(true)
} }
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract. // ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
// It returns the opaque request data returned by the contract. // It returns the opaque request data returned by the contract.
func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) error { func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) (bal.StateMutations, error) {
return processRequestsSystemCall(requests, evm, 0x01, params.WithdrawalQueueAddress) return processRequestsSystemCall(requests, evm, 0x01, params.WithdrawalQueueAddress)
} }
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract. // ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
// It returns the opaque request data returned by the contract. // It returns the opaque request data returned by the contract.
func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) error { func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) (bal.StateMutations, error) {
return processRequestsSystemCall(requests, evm, 0x02, params.ConsolidationQueueAddress) return processRequestsSystemCall(requests, evm, 0x02, params.ConsolidationQueueAddress)
} }
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) error { // TODO: does the requests contract produce mutations? I think it just parses the logs into requests but idk
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) (bal.StateMutations, error) {
if tracer := evm.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext()) onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil { if tracer.OnSystemCallEnd != nil {
@ -323,19 +351,19 @@ func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte
evm.SetTxContext(NewEVMTxContext(msg)) evm.SetTxContext(NewEVMTxContext(msg))
evm.StateDB.AddAddressToAccessList(addr) evm.StateDB.AddAddressToAccessList(addr)
ret, _, _, err := evm.Call(msg.From, *msg.To, msg.Data, vm.GasCosts{RegularGas: 30_000_000}, common.U2560) ret, _, _, err := evm.Call(msg.From, *msg.To, msg.Data, vm.GasCosts{RegularGas: 30_000_000}, common.U2560)
evm.StateDB.Finalise(true) mut := evm.StateDB.Finalise(true)
if err != nil { if err != nil {
return fmt.Errorf("system call failed to execute: %v", err) return nil, fmt.Errorf("system call failed to execute: %v", err)
} }
if len(ret) == 0 { if len(ret) == 0 {
return nil // skip empty output return mut, nil // skip empty output
} }
// Append prefixed requestsData to the requests list. // Append prefixed requestsData to the requests list.
requestsData := make([]byte, len(ret)+1) requestsData := make([]byte, len(ret)+1)
requestsData[0] = requestType requestsData[0] = requestType
copy(requestsData[1:], ret) copy(requestsData[1:], ret)
*requests = append(*requests, requestsData) *requests = append(*requests, requestsData)
return nil return mut, nil
} }
var depositTopic = common.HexToHash("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5") var depositTopic = common.HexToHash("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")

View file

@ -67,6 +67,10 @@ func ExecuteStateless(ctx context.Context, config *params.ChainConfig, vmconfig
processor := NewStateProcessor(chain) processor := NewStateProcessor(chain)
validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block
if config.IsAmsterdam(block.Number(), block.Time()) {
db = db.WithReader(state.NewReaderWithTracker(db.Reader()))
}
// Run the stateless blocks processing and self-validate certain fields // Run the stateless blocks processing and self-validate certain fields
res, err := processor.Process(ctx, block, db, vmconfig) res, err := processor.Process(ctx, block, db, vmconfig)
if err != nil { if err != nil {

View file

@ -178,7 +178,6 @@ type (
CloseHook = func() CloseHook = func()
// BlockStartHook is called before executing `block`. // BlockStartHook is called before executing `block`.
// `td` is the total difficulty prior to `block`.
BlockStartHook = func(event BlockEvent) BlockStartHook = func(event BlockEvent)
// BlockEndHook is called after executing a block. // BlockEndHook is called after executing a block.
@ -192,24 +191,25 @@ type (
// GenesisBlockHook is called when the genesis block is being processed. // GenesisBlockHook is called when the genesis block is being processed.
GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc) GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc)
// OnSystemCallStartHook is called when a system call is about to be executed. Today, // OnSystemCallStartHook is called when a system call is about to be executed.
// this hook is invoked when the EIP-4788 system call is about to be executed to set the // Today, this hook is invoked when the EIP-4788 system call is about to be
// beacon block root. // executed to set the beacon block root.
// //
// After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit` // After this hook, the EVM call tracing will happened as usual so you will
// as well as state hooks between this hook and the `OnSystemCallEndHook`. // receive a `OnEnter/OnExit` as well as state hooks between this hook and
// the `OnSystemCallEndHook`.
// //
// Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks // Note that system call happens outside normal transaction execution, so
// will not be invoked. // the `OnTxStart/OnTxEnd` hooks will not be invoked.
OnSystemCallStartHook = func() OnSystemCallStartHook = func()
// OnSystemCallStartHookV2 is called when a system call is about to be executed. Refer // OnSystemCallStartHookV2 is called when a system call is about to be executed.
// to `OnSystemCallStartHook` for more information. // Refer to `OnSystemCallStartHook` for more information.
OnSystemCallStartHookV2 = func(vm *VMContext) OnSystemCallStartHookV2 = func(vm *VMContext)
// OnSystemCallEndHook is called when a system call has finished executing. Today, // OnSystemCallEndHook is called when a system call has finished executing.
// this hook is invoked when the EIP-4788 system call is about to be executed to set the // Today, this hook is invoked when the EIP-4788 system call is about to be
// beacon block root. // executed to set the beacon block root.
OnSystemCallEndHook = func() OnSystemCallEndHook = func()
// StateUpdateHook is called after state is committed for a block. // StateUpdateHook is called after state is committed for a block.
@ -239,9 +239,17 @@ type (
// StorageChangeHook is called when the storage of an account changes. // StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash) StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
SelfDestructHook = func(address common.Address)
// LogHook is called when a log is emitted. // LogHook is called when a log is emitted.
LogHook = func(log *types.Log) LogHook = func(log *types.Log)
// AccountReadHook is called when the account is accessed.
AccountReadHook = func(addr common.Address)
// StorageReadHook is called when the storage slot is accessed.
StorageReadHook = func(addr common.Address, slot common.Hash)
// BlockHashReadHook is called when EVM reads the blockhash of a block. // BlockHashReadHook is called when EVM reads the blockhash of a block.
BlockHashReadHook = func(blockNumber uint64, hash common.Hash) BlockHashReadHook = func(blockNumber uint64, hash common.Hash)
) )
@ -255,6 +263,7 @@ type Hooks struct {
OnOpcode OpcodeHook OnOpcode OpcodeHook
OnFault FaultHook OnFault FaultHook
OnGasChange GasChangeHook OnGasChange GasChangeHook
// Chain events // Chain events
OnBlockchainInit BlockchainInitHook OnBlockchainInit BlockchainInitHook
OnClose CloseHook OnClose CloseHook
@ -266,7 +275,10 @@ type Hooks struct {
OnSystemCallStartV2 OnSystemCallStartHookV2 OnSystemCallStartV2 OnSystemCallStartHookV2
OnSystemCallEnd OnSystemCallEndHook OnSystemCallEnd OnSystemCallEndHook
OnStateUpdate StateUpdateHook OnStateUpdate StateUpdateHook
// State events
OnBlockFinalization func() // called after post-tx system contracts and consensus finalization are invoked
// State mutation events
OnBalanceChange BalanceChangeHook OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2 OnNonceChangeV2 NonceChangeHookV2
@ -274,6 +286,12 @@ type Hooks struct {
OnCodeChangeV2 CodeChangeHookV2 OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook OnStorageChange StorageChangeHook
OnLog LogHook OnLog LogHook
OnSelfDestructChange SelfDestructHook
// State access events
OnAccountRead AccountReadHook
OnStorageRead StorageReadHook
// Block hash read // Block hash read
OnBlockHashRead BlockHashReadHook OnBlockHashRead BlockHashReadHook
} }
@ -290,57 +308,74 @@ const (
// Issuance // Issuance
// BalanceIncreaseRewardMineUncle is a reward for mining an uncle block. // BalanceIncreaseRewardMineUncle is a reward for mining an uncle block.
BalanceIncreaseRewardMineUncle BalanceChangeReason = 1 BalanceIncreaseRewardMineUncle BalanceChangeReason = 1
// BalanceIncreaseRewardMineBlock is a reward for mining a block. // BalanceIncreaseRewardMineBlock is a reward for mining a block.
BalanceIncreaseRewardMineBlock BalanceChangeReason = 2 BalanceIncreaseRewardMineBlock BalanceChangeReason = 2
// BalanceIncreaseWithdrawal is ether withdrawn from the beacon chain. // BalanceIncreaseWithdrawal is ether withdrawn from the beacon chain.
BalanceIncreaseWithdrawal BalanceChangeReason = 3 BalanceIncreaseWithdrawal BalanceChangeReason = 3
// BalanceIncreaseGenesisBalance is ether allocated at the genesis block. // BalanceIncreaseGenesisBalance is ether allocated at the genesis block.
BalanceIncreaseGenesisBalance BalanceChangeReason = 4 BalanceIncreaseGenesisBalance BalanceChangeReason = 4
// Transaction fees // Transaction fees
// BalanceIncreaseRewardTransactionFee is the transaction tip increasing block builder's balance. // BalanceIncreaseRewardTransactionFee is the transaction tip increasing
// block builder's balance.
BalanceIncreaseRewardTransactionFee BalanceChangeReason = 5 BalanceIncreaseRewardTransactionFee BalanceChangeReason = 5
// BalanceDecreaseGasBuy is spent to purchase gas for execution a transaction. // BalanceDecreaseGasBuy is spent to purchase gas for execution a transaction.
// Part of this gas will be burnt as per EIP-1559 rules. // Part of this gas will be burnt as per EIP-1559 rules.
BalanceDecreaseGasBuy BalanceChangeReason = 6 BalanceDecreaseGasBuy BalanceChangeReason = 6
// BalanceIncreaseGasReturn is ether returned for unused gas at the end of execution. // BalanceIncreaseGasReturn is ether returned for unused gas at the end of execution.
BalanceIncreaseGasReturn BalanceChangeReason = 7 BalanceIncreaseGasReturn BalanceChangeReason = 7
// DAO fork // DAO fork
// BalanceIncreaseDaoContract is ether sent to the DAO refund contract. // BalanceIncreaseDaoContract is ether sent to the DAO refund contract.
BalanceIncreaseDaoContract BalanceChangeReason = 8 BalanceIncreaseDaoContract BalanceChangeReason = 8
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved to the refund contract.
// BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved
// to the refund contract.
BalanceDecreaseDaoAccount BalanceChangeReason = 9 BalanceDecreaseDaoAccount BalanceChangeReason = 9
// BalanceChangeTransfer is ether transferred via a call. // BalanceChangeTransfer is ether transferred via a call.
// it is a decrease for the sender and an increase for the recipient. // it is a decrease for the sender and an increase for the recipient.
BalanceChangeTransfer BalanceChangeReason = 10 BalanceChangeTransfer BalanceChangeReason = 10
// BalanceChangeTouchAccount is a transfer of zero value. It is only there to // BalanceChangeTouchAccount is a transfer of zero value. It is only there to
// touch-create an account. // touch-create an account.
BalanceChangeTouchAccount BalanceChangeReason = 11 BalanceChangeTouchAccount BalanceChangeReason = 11
// BalanceIncreaseSelfdestruct is added to the recipient as indicated by a selfdestructing account. // BalanceIncreaseSelfdestruct is added to the recipient as indicated by a
// selfdestructing account.
BalanceIncreaseSelfdestruct BalanceChangeReason = 12 BalanceIncreaseSelfdestruct BalanceChangeReason = 12
// BalanceDecreaseSelfdestruct is deducted from a contract due to self-destruct. // BalanceDecreaseSelfdestruct is deducted from a contract due to self-destruct.
BalanceDecreaseSelfdestruct BalanceChangeReason = 13 BalanceDecreaseSelfdestruct BalanceChangeReason = 13
// BalanceDecreaseSelfdestructBurn is ether that is sent to an already self-destructed // BalanceDecreaseSelfdestructBurn is ether that is sent to an already self-destructed
// account within the same tx (captured at end of tx). // account within the same tx (captured at end of tx).
// Note it doesn't account for a self-destruct which appoints itself as recipient. // Note it doesn't account for a self-destruct which appoints itself as recipient.
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14 BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
// BalanceChangeRevert is emitted when the balance is reverted back to a previous value due to call failure. // BalanceChangeRevert is emitted when the balance is reverted back to a
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). // previous value due to call failure.
//
// It is only emitted when the tracer has opted in to use the journaling
// wrapper (WrapWithJournal).
BalanceChangeRevert BalanceChangeReason = 15 BalanceChangeRevert BalanceChangeReason = 15
) )
// GasChangeReason is used to indicate the reason for a gas change, useful // GasChangeReason is used to indicate the reason for a gas change, useful
// for tracing and reporting. // for tracing and reporting.
// //
// There is essentially two types of gas changes, those that can be emitted once per transaction // There is essentially two types of gas changes, those that can be emitted
// and those that can be emitted on a call basis, so possibly multiple times per transaction. // once per transaction and those that can be emitted on a call basis, so possibly
// multiple times per transaction.
// //
// They can be recognized easily by their name, those that start with `GasChangeTx` are emitted // They can be recognized easily by their name, those that start with `GasChangeTx`
// once per transaction, while those that start with `GasChangeCall` are emitted on a call basis. // are emitted once per transaction, while those that start with `GasChangeCall`
// are emitted on a call basis.
type GasChangeReason byte type GasChangeReason byte
//go:generate go run golang.org/x/tools/cmd/stringer -type=GasChangeReason -trimprefix=GasChange -output gen_gas_change_reason_stringer.go //go:generate go run golang.org/x/tools/cmd/stringer -type=GasChangeReason -trimprefix=GasChange -output gen_gas_change_reason_stringer.go
@ -348,14 +383,19 @@ type GasChangeReason byte
const ( const (
GasChangeUnspecified GasChangeReason = 0 GasChangeUnspecified GasChangeReason = 0
// GasChangeTxInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only // GasChangeTxInitialBalance is the initial balance for the call which will
// one such gas change per transaction. // be equal to the gasLimit of the call. There is only one such gas change
// per transaction.
GasChangeTxInitialBalance GasChangeReason = 1 GasChangeTxInitialBalance GasChangeReason = 1
// GasChangeTxIntrinsicGas is the amount of gas that will be charged for the intrinsic cost of the transaction, there is
// always exactly one of those per transaction. // GasChangeTxIntrinsicGas is the amount of gas that will be charged for the
// intrinsic cost of the transaction, there is always exactly one of those
// per transaction.
GasChangeTxIntrinsicGas GasChangeReason = 2 GasChangeTxIntrinsicGas GasChangeReason = 2
// GasChangeTxRefunds is the sum of all refunds which happened during the tx execution (e.g. storage slot being cleared)
// this generates an increase in gas. There is at most one of such gas change per transaction. // GasChangeTxRefunds is the sum of all refunds which happened during the tx
// execution (e.g. storage slot being cleared). this generates an increase in
// gas. There is at most one of such gas change per transaction.
GasChangeTxRefunds GasChangeReason = 3 GasChangeTxRefunds GasChangeReason = 3
// GasChangeTxLeftOverReturned is the amount of gas left over at the end of transaction's execution that will be returned // GasChangeTxLeftOverReturned is the amount of gas left over at the end of transaction's execution that will be returned
// to the account. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas // to the account. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas
@ -363,46 +403,77 @@ const (
// There is at most one of such gas change per transaction. // There is at most one of such gas change per transaction.
GasChangeTxLeftOverReturned GasChangeReason = 4 GasChangeTxLeftOverReturned GasChangeReason = 4
// GasChangeCallInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only // GasChangeCallInitialBalance is the initial balance for the call which
// one such gas change per call. // will be equal to the gasLimit of the call. There is only one such gas
// change per call.
GasChangeCallInitialBalance GasChangeReason = 5 GasChangeCallInitialBalance GasChangeReason = 5
// GasChangeCallLeftOverReturned is the amount of gas left over that will be returned to the caller, this change will always
// be a negative change as we "drain" left over gas towards 0. If there was no gas left at the end of execution, no such even // GasChangeCallLeftOverReturned is the amount of gas left over that will
// will be emitted. // be returned to the caller, this change will always be a negative change
// as we "drain" left over gas towards 0. If there was no gas left at the
// end of execution, no such even will be emitted.
GasChangeCallLeftOverReturned GasChangeReason = 6 GasChangeCallLeftOverReturned GasChangeReason = 6
// GasChangeCallLeftOverRefunded is the amount of gas that will be refunded to the call after the child call execution it
// executed completed. This value is always positive as we are giving gas back to the you, the left over gas of the child. // GasChangeCallLeftOverRefunded is the amount of gas that will be refunded
// If there was no gas left to be refunded, no such even will be emitted. // to the call after the child call execution it executed completed. This
// value is always positive as we are giving gas back to the you, the left over
// gas of the child. If there was no gas left to be refunded, no such event
// will be emitted.
GasChangeCallLeftOverRefunded GasChangeReason = 7 GasChangeCallLeftOverRefunded GasChangeReason = 7
// GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE.
// GasChangeCallContractCreation is the amount of gas that will be burned
// for a CREATE.
GasChangeCallContractCreation GasChangeReason = 8 GasChangeCallContractCreation GasChangeReason = 8
// GasChangeCallContractCreation2 is the amount of gas that will be burned for a CREATE2.
// GasChangeCallContractCreation2 is the amount of gas that will be burned
// for a CREATE2.
GasChangeCallContractCreation2 GasChangeReason = 9 GasChangeCallContractCreation2 GasChangeReason = 9
// GasChangeCallCodeStorage is the amount of gas that will be charged for code storage.
// GasChangeCallCodeStorage is the amount of gas that will be charged for
// code storage.
GasChangeCallCodeStorage GasChangeReason = 10 GasChangeCallCodeStorage GasChangeReason = 10
// GasChangeCallOpCode is the amount of gas that will be charged for an opcode executed by the EVM, exact opcode that was
// performed can be check by `OnOpcode` handling. // GasChangeCallOpCode is the amount of gas that will be charged for an opcode
// executed by the EVM, exact opcode that was performed can be check by
// `OnOpcode` handling.
GasChangeCallOpCode GasChangeReason = 11 GasChangeCallOpCode GasChangeReason = 11
// GasChangeCallPrecompiledContract is the amount of gas that will be charged for a precompiled contract execution.
// GasChangeCallPrecompiledContract is the amount of gas that will be charged
// for a precompiled contract execution.
GasChangeCallPrecompiledContract GasChangeReason = 12 GasChangeCallPrecompiledContract GasChangeReason = 12
// GasChangeCallStorageColdAccess is the amount of gas that will be charged for a cold storage access as controlled by EIP2929 rules.
// GasChangeCallStorageColdAccess is the amount of gas that will be charged
// for a cold storage access as controlled by EIP2929 rules.
GasChangeCallStorageColdAccess GasChangeReason = 13 GasChangeCallStorageColdAccess GasChangeReason = 13
// GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert.
// GasChangeCallFailedExecution is the burning of the remaining gas when the
// execution failed without a revert.
GasChangeCallFailedExecution GasChangeReason = 14 GasChangeCallFailedExecution GasChangeReason = 14
// GasChangeWitnessContractInit flags the event of adding to the witness during the contract creation initialization step.
// GasChangeWitnessContractInit flags the event of adding to the witness
// during the contract creation initialization step.
GasChangeWitnessContractInit GasChangeReason = 15 GasChangeWitnessContractInit GasChangeReason = 15
// GasChangeWitnessContractCreation flags the event of adding to the witness during the contract creation finalization step.
// GasChangeWitnessContractCreation flags the event of adding to the witness
// during the contract creation finalization step.
GasChangeWitnessContractCreation GasChangeReason = 16 GasChangeWitnessContractCreation GasChangeReason = 16
// GasChangeWitnessCodeChunk flags the event of adding one or more contract code chunks to the witness.
// GasChangeWitnessCodeChunk flags the event of adding one or more contract
// code chunks to the witness.
GasChangeWitnessCodeChunk GasChangeReason = 17 GasChangeWitnessCodeChunk GasChangeReason = 17
// GasChangeWitnessContractCollisionCheck flags the event of adding to the witness when checking for contract address collision.
// GasChangeWitnessContractCollisionCheck flags the event of adding to the
// witness when checking for contract address collision.
GasChangeWitnessContractCollisionCheck GasChangeReason = 18 GasChangeWitnessContractCollisionCheck GasChangeReason = 18
// GasChangeTxDataFloor is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the
// transaction data. This change will always be a negative change. // GasChangeTxDataFloor is the amount of extra gas the transaction has to
// pay to reach the minimum gas requirement for the transaction data.
// This change will always be a negative change.
GasChangeTxDataFloor GasChangeReason = 19 GasChangeTxDataFloor GasChangeReason = 19
// GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as // GasChangeIgnored is a special value that can be used to indicate that
// it will be "manually" tracked by a direct emit of the gas change event. // the gas change should be ignored as it will be "manually" tracked by
// a direct emit of the gas change event.
GasChangeIgnored GasChangeReason = 0xFF GasChangeIgnored GasChangeReason = 0xFF
) )
@ -426,11 +497,12 @@ const (
// NonceChangeNewContract is the nonce change of a newly created contract. // NonceChangeNewContract is the nonce change of a newly created contract.
NonceChangeNewContract NonceChangeReason = 4 NonceChangeNewContract NonceChangeReason = 4
// NonceChangeTransaction is the nonce change due to a EIP-7702 authorization. // NonceChangeAuthorization is the nonce change due to a EIP-7702 authorization.
NonceChangeAuthorization NonceChangeReason = 5 NonceChangeAuthorization NonceChangeReason = 5
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure. // NonceChangeRevert is emitted when the nonce is reverted back to a previous
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). // value due to call failure. It is only emitted when the tracer has opted in
// to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6 NonceChangeRevert NonceChangeReason = 6
// NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct // NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct
@ -445,22 +517,26 @@ type CodeChangeReason byte
const ( const (
CodeChangeUnspecified CodeChangeReason = 0 CodeChangeUnspecified CodeChangeReason = 0
// CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations. // CodeChangeContractCreation is when a new contract is deployed via
// CREATE/CREATE2 operations.
CodeChangeContractCreation CodeChangeReason = 1 CodeChangeContractCreation CodeChangeReason = 1
// CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup. // CodeChangeGenesis is when contract code is set during blockchain genesis
// or initial setup.
CodeChangeGenesis CodeChangeReason = 2 CodeChangeGenesis CodeChangeReason = 2
// CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization. // CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization.
CodeChangeAuthorization CodeChangeReason = 3 CodeChangeAuthorization CodeChangeReason = 3
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address. // CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by
// setting to zero address.
CodeChangeAuthorizationClear CodeChangeReason = 4 CodeChangeAuthorizationClear CodeChangeReason = 4
// CodeChangeSelfDestruct is when contract code is cleared due to self-destruct. // CodeChangeSelfDestruct is when contract code is cleared due to self-destruct.
CodeChangeSelfDestruct CodeChangeReason = 5 CodeChangeSelfDestruct CodeChangeReason = 5
// CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure. // CodeChangeRevert is emitted when the code is reverted back to a previous
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). // value due to call failure. It is only emitted when the tracer has opted
// in to use the journaling wrapper (WrapWithJournal).
CodeChangeRevert CodeChangeReason = 6 CodeChangeRevert CodeChangeReason = 6
) )

View file

@ -42,7 +42,9 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
return nil, errors.New("wrapping nil tracer") return nil, errors.New("wrapping nil tracer")
} }
// No state change to journal, return the wrapped hooks as is // No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil { if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil &&
hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
// TODO(sina) hooks.OnLog should also be handled here
return hooks, nil return hooks, nil
} }
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil { if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
@ -56,11 +58,14 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
wrapped := *hooks wrapped := *hooks
// Create journal // Create journal
j := &journal{hooks: hooks} j := &journal{
hooks: hooks,
}
// Scope hooks need to be re-implemented. // Scope hooks need to be re-implemented.
wrapped.OnTxEnd = j.OnTxEnd wrapped.OnTxEnd = j.OnTxEnd
wrapped.OnEnter = j.OnEnter wrapped.OnEnter = j.OnEnter
wrapped.OnExit = j.OnExit wrapped.OnExit = j.OnExit
// Wrap state change hooks. // Wrap state change hooks.
if hooks.OnBalanceChange != nil { if hooks.OnBalanceChange != nil {
wrapped.OnBalanceChange = j.OnBalanceChange wrapped.OnBalanceChange = j.OnBalanceChange
@ -69,6 +74,7 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
// Regardless of which hook version is used in the tracer, // Regardless of which hook version is used in the tracer,
// the journal will want to capture the nonce change reason. // the journal will want to capture the nonce change reason.
wrapped.OnNonceChangeV2 = j.OnNonceChangeV2 wrapped.OnNonceChangeV2 = j.OnNonceChangeV2
// A precaution to ensure EVM doesn't call both hooks. // A precaution to ensure EVM doesn't call both hooks.
wrapped.OnNonceChange = nil wrapped.OnNonceChange = nil
} }
@ -81,7 +87,6 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks.OnStorageChange != nil { if hooks.OnStorageChange != nil {
wrapped.OnStorageChange = j.OnStorageChange wrapped.OnStorageChange = j.OnStorageChange
} }
return &wrapped, nil return &wrapped, nil
} }
@ -148,7 +153,11 @@ func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, re
} }
func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) { func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) {
j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new}) j.entries = append(j.entries, balanceChange{
addr: addr,
prev: prev,
new: new,
})
if j.hooks.OnBalanceChange != nil { if j.hooks.OnBalanceChange != nil {
j.hooks.OnBalanceChange(addr, prev, new, reason) j.hooks.OnBalanceChange(addr, prev, new, reason)
} }
@ -202,7 +211,12 @@ func (j *journal) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash,
} }
func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) { func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) {
j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new}) j.entries = append(j.entries, storageChange{
addr: addr,
slot: slot,
prev: prev,
new: new,
})
if j.hooks.OnStorageChange != nil { if j.hooks.OnStorageChange != nil {
j.hooks.OnStorageChange(addr, slot, prev, new) j.hooks.OnStorageChange(addr, slot, prev, new)
} }

View file

@ -63,7 +63,7 @@ func (t *testTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Has
} }
func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) { func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new) t.t.Logf("OnStorageChange(%v, %v, %v -> %v)", addr, slot, prev, new)
if t.storage == nil { if t.storage == nil {
t.storage = make(map[common.Hash]common.Hash) t.storage = make(map[common.Hash]common.Hash)
} }
@ -76,7 +76,12 @@ func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev
func TestJournalIntegration(t *testing.T) { func TestJournalIntegration(t *testing.T) {
tr := &testTracer{t: t} tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange, OnCodeChange: tr.OnCodeChange, OnStorageChange: tr.OnStorageChange}) wr, err := WrapWithJournal(&Hooks{
OnBalanceChange: tr.OnBalanceChange,
OnNonceChange: tr.OnNonceChange,
OnCodeChange: tr.OnCodeChange,
OnStorageChange: tr.OnStorageChange,
})
if err != nil { if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err) t.Fatalf("failed to wrap test tracer: %v", err)
} }

View file

@ -18,6 +18,7 @@ package core
import ( import (
"context" "context"
"github.com/ethereum/go-ethereum/core/types/bal"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
@ -33,7 +34,7 @@ type Validator interface {
ValidateBody(block *types.Block) error ValidateBody(block *types.Block) error
// ValidateState validates the given statedb and optionally the process result. // ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error ValidateState(block *types.Block, state state.BlockStateTransition, res *ProcessResult, stateless bool) error
} }
// Prefetcher is an interface for pre-caching transaction signatures and state. // Prefetcher is an interface for pre-caching transaction signatures and state.
@ -54,8 +55,10 @@ type Processor interface {
// ProcessResult contains the values computed by Process. // ProcessResult contains the values computed by Process.
type ProcessResult struct { type ProcessResult struct {
AccessList bal.ConstructionBlockAccessList
Receipts types.Receipts Receipts types.Receipts
Requests [][]byte Requests [][]byte
Logs []*types.Log Logs []*types.Log
GasUsed uint64 GasUsed uint64
Error error
} }

View file

@ -18,156 +18,516 @@ package bal
import ( import (
"bytes" "bytes"
"maps" "encoding/json"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"maps"
) )
// ConstructionAccountAccess contains post-block account state for mutations as well as // ConstructionAccountAccesses contains post-block account state for mutations as well as
// all storage keys that were read during execution. It is used when building block // all storage keys that were read during execution. It is used when building block
// access list during execution. // access list during execution.
type ConstructionAccountAccess struct { type ConstructionAccountAccesses struct {
// StorageWrites is the post-state values of an account's storage slots // StorageWrites is the post-state values of an account's storage slots
// that were modified in a block, keyed by the slot key and the tx index // that were modified in a block, keyed by the slot key and the tx index
// where the modification occurred. // where the modification occurred.
StorageWrites map[common.Hash]map[uint16]common.Hash `json:"storageWrites,omitempty"` StorageWrites map[common.Hash]map[uint16]common.Hash
// StorageReads is the set of slot keys that were accessed during block // StorageReads is the set of slot keys that were accessed during block
// execution. // execution.
// //
// Storage slots which are both read and written (with changed values) // storage slots which are both read and written (with changed values)
// appear only in StorageWrites. // appear only in StorageWrites.
StorageReads map[common.Hash]struct{} `json:"storageReads,omitempty"` StorageReads map[common.Hash]struct{}
// BalanceChanges contains the post-transaction balances of an account, // BalanceChanges contains the post-transaction balances of an account,
// keyed by transaction indices where it was changed. // keyed by transaction indices where it was changed.
BalanceChanges map[uint16]*uint256.Int `json:"balanceChanges,omitempty"` BalanceChanges map[uint16]*uint256.Int
// NonceChanges contains the post-state nonce values of an account keyed // NonceChanges contains the post-state nonce values of an account keyed
// by tx index. // by tx index.
NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"` NonceChanges map[uint16]uint64
// CodeChange contains the post-state contract code of an account keyed CodeChanges map[uint16][]byte
// by tx index.
CodeChange map[uint16][]byte `json:"codeChange,omitempty"`
} }
// NewConstructionAccountAccess initializes the account access object. func (c *ConstructionAccountAccesses) Copy() (res ConstructionAccountAccesses) {
func NewConstructionAccountAccess() *ConstructionAccountAccess { if c.StorageWrites != nil {
return &ConstructionAccountAccess{ res.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
for slot, writes := range c.StorageWrites {
res.StorageWrites[slot] = maps.Clone(writes)
}
}
if c.StorageReads != nil {
res.StorageReads = maps.Clone(c.StorageReads)
}
if c.BalanceChanges != nil {
res.BalanceChanges = maps.Clone(c.BalanceChanges)
}
if c.NonceChanges != nil {
res.NonceChanges = maps.Clone(c.NonceChanges)
}
if c.CodeChanges != nil {
res.CodeChanges = maps.Clone(c.CodeChanges)
}
return res
}
type StateMutations map[common.Address]AccountMutations
func (s StateMutations) String() string {
b, _ := json.MarshalIndent(s, "", " ")
return string(b)
}
// Merge merges the state changes present in next into the caller. After,
// the state of the caller is the aggregate diff through next.
func (s StateMutations) Merge(next StateMutations) {
for account, diff := range next {
if mut, ok := s[account]; ok {
if diff.Balance != nil {
mut.Balance = diff.Balance
}
if diff.Code != nil {
mut.Code = diff.Code
}
if diff.Nonce != nil {
mut.Nonce = diff.Nonce
}
if len(diff.StorageWrites) > 0 {
if mut.StorageWrites == nil {
mut.StorageWrites = maps.Clone(diff.StorageWrites)
} else {
for key, val := range diff.StorageWrites {
mut.StorageWrites[key] = val
}
}
}
} else {
s[account] = *diff.Copy()
}
}
}
func (s StateMutations) Eq(other StateMutations) bool {
if len(s) != len(other) {
return false
}
for addr, mut := range s {
otherMut, ok := other[addr]
if !ok {
return false
}
if !mut.Eq(&otherMut) {
return false
}
}
return true
}
type ConstructionBlockAccessList map[common.Address]*ConstructionAccountAccesses
func (c ConstructionBlockAccessList) Copy() ConstructionBlockAccessList {
res := make(ConstructionBlockAccessList)
for addr, accountAccess := range c {
aaCopy := accountAccess.Copy()
res[addr] = &aaCopy
}
return res
}
func (c ConstructionBlockAccessList) AccumulateMutations(muts StateMutations, idx uint16) {
for addr, mut := range muts {
if _, exist := c[addr]; !exist {
c[addr] = newConstructionAccountAccesses()
}
if mut.Nonce != nil {
if c[addr].NonceChanges == nil {
c[addr].NonceChanges = make(map[uint16]uint64)
}
c[addr].NonceChanges[idx] = *mut.Nonce
}
if mut.Balance != nil {
if c[addr].BalanceChanges == nil {
c[addr].BalanceChanges = make(map[uint16]*uint256.Int)
}
c[addr].BalanceChanges[idx] = mut.Balance.Clone()
}
if mut.Code != nil {
if c[addr].CodeChanges == nil {
c[addr].CodeChanges = make(map[uint16][]byte)
}
c[addr].CodeChanges[idx] = bytes.Clone(mut.Code)
}
if len(mut.StorageWrites) > 0 {
for key, val := range mut.StorageWrites {
if c[addr].StorageWrites[key] == nil {
c[addr].StorageWrites[key] = make(map[uint16]common.Hash)
}
c[addr].StorageWrites[key][idx] = val
}
}
}
}
func (c ConstructionBlockAccessList) AccumulateReads(reads StateAccesses) {
for addr, addrReads := range reads {
if _, ok := c[addr]; !ok {
c[addr] = newConstructionAccountAccesses()
}
for storageKey, _ := range addrReads {
if c[addr].StorageWrites != nil {
if _, ok := c[addr].StorageWrites[storageKey]; ok {
continue
}
}
if c[addr].StorageReads == nil {
c[addr].StorageReads = make(map[common.Hash]struct{})
}
c[addr].StorageReads[storageKey] = struct{}{}
}
}
}
func newConstructionAccountAccesses() *ConstructionAccountAccesses {
return &ConstructionAccountAccesses{
StorageWrites: make(map[common.Hash]map[uint16]common.Hash), StorageWrites: make(map[common.Hash]map[uint16]common.Hash),
StorageReads: make(map[common.Hash]struct{}), StorageReads: make(map[common.Hash]struct{}),
BalanceChanges: make(map[uint16]*uint256.Int), BalanceChanges: make(map[uint16]*uint256.Int),
NonceChanges: make(map[uint16]uint64), NonceChanges: make(map[uint16]uint64),
CodeChange: make(map[uint16][]byte), CodeChanges: make(map[uint16][]byte),
} }
} }
// ConstructionBlockAccessList contains post-block modified state and some state accessed // StateDiff contains state mutations occuring over one or more access list
// in execution (account addresses and storage keys). // index.
type ConstructionBlockAccessList struct { type StateDiff struct {
Accounts map[common.Address]*ConstructionAccountAccess Mutations map[common.Address]*AccountMutations `json:"Mutations,omitempty"`
} }
// NewConstructionBlockAccessList instantiates an empty access list. // StateAccesses contains a set of accounts/storage that were accessed during the
func NewConstructionBlockAccessList() ConstructionBlockAccessList { // execution of one or more access list indices.
return ConstructionBlockAccessList{ type StateAccesses map[common.Address]StorageAccessList
Accounts: make(map[common.Address]*ConstructionAccountAccess), type StorageAccessList map[common.Hash]struct{}
// Merge combines adds the accesses from other into s.
func (s StateAccesses) Merge(other StateAccesses) {
for addr, accesses := range other {
if _, ok := s[addr]; !ok {
s[addr] = make(map[common.Hash]struct{})
}
for slot := range accesses {
s[addr][slot] = struct{}{}
}
} }
} }
// AccountRead records the address of an account that has been read during execution. func (s StateAccesses) Eq(other StateAccesses) bool {
func (b *ConstructionBlockAccessList) AccountRead(addr common.Address) { if len(s) != len(other) {
if _, ok := b.Accounts[addr]; !ok { return false
b.Accounts[addr] = NewConstructionAccountAccess()
} }
for addr, accesses := range s {
if _, ok := other[addr]; !ok {
return false
}
if !maps.Equal(accesses, other[addr]) {
return false
}
}
return true
} }
// StorageRead records a storage key read during execution. type StorageMutations map[common.Hash]common.Hash
func (b *ConstructionBlockAccessList) StorageRead(address common.Address, key common.Hash) {
if _, ok := b.Accounts[address]; !ok { // AccountMutations contains mutations that were made to an account across
b.Accounts[address] = NewConstructionAccountAccess() // one or more access list indices.
type AccountMutations struct {
Balance *uint256.Int `json:"Balance,omitempty"`
Nonce *uint64 `json:"Nonce,omitempty"`
Code ContractCode `json:"Code,omitempty"`
StorageWrites StorageMutations `json:"StorageWrites,omitempty"`
}
// String returns a human-readable JSON representation of the account mutations.
func (a *AccountMutations) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(a)
return res.String()
}
// Copy returns a deep-copy of the instance.
func (a *AccountMutations) Copy() *AccountMutations {
res := &AccountMutations{
nil,
nil,
nil,
nil,
}
if a.Nonce != nil {
res.Nonce = new(uint64)
*res.Nonce = *a.Nonce
}
if a.Code != nil {
res.Code = bytes.Clone(a.Code)
}
if a.Balance != nil {
res.Balance = new(uint256.Int).Set(a.Balance)
}
if a.StorageWrites != nil {
res.StorageWrites = maps.Clone(a.StorageWrites)
}
return res
}
// String returns the state diff as a formatted JSON string.
func (s *StateDiff) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
enc.Encode(s)
return res.String()
}
// Copy returns a deep copy of the StateDiff
func (s *StateDiff) Copy() *StateDiff {
res := &StateDiff{make(map[common.Address]*AccountMutations)}
for addr, accountDiff := range s.Mutations {
cpy := accountDiff.Copy()
res.Mutations[addr] = cpy
}
return res
}
// AccessListReader exposes utilities to read state mutations and accesses from an access list
// TODO: expose this an an interface?
type AccessListReader map[common.Address]*AccountAccess
func NewAccessListReader(bal BlockAccessList) (reader AccessListReader) {
reader = make(AccessListReader)
for _, accountAccess := range bal {
reader[accountAccess.Address] = &accountAccess
} }
if _, ok := b.Accounts[address].StorageWrites[key]; ok {
return return
} }
b.Accounts[address].StorageReads[key] = struct{}{}
func (a AccessListReader) Accesses() (accesses StateAccesses) {
accesses = make(StateAccesses)
for addr, acctAccess := range a {
if len(acctAccess.StorageReads) > 0 {
accesses[addr] = make(StorageAccessList)
for _, key := range acctAccess.StorageReads {
accesses[addr][key.ToHash()] = struct{}{}
}
} else if len(acctAccess.CodeChanges) == 0 && len(acctAccess.StorageChanges) == 0 && len(acctAccess.BalanceChanges) == 0 && len(acctAccess.NonceChanges) == 0 {
accesses[addr] = make(StorageAccessList)
}
}
return
} }
// StorageWrite records the post-transaction value of a mutated storage slot. // TODO: these methods should return the mutations accrued before the execution of the given index
// The storage slot is removed from the list of read slots.
func (b *ConstructionBlockAccessList) StorageWrite(txIdx uint16, address common.Address, key, value common.Hash) {
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess()
}
if _, ok := b.Accounts[address].StorageWrites[key]; !ok {
b.Accounts[address].StorageWrites[key] = make(map[uint16]common.Hash)
}
b.Accounts[address].StorageWrites[key][txIdx] = value
delete(b.Accounts[address].StorageReads, key) // TODO: strip the storage mutations from the returned result
// the returned object should be able to be modified
func (a AccessListReader) accountMutationsAt(addr common.Address, idx int) (res *AccountMutations) {
acct, exist := a[addr]
if !exist {
return nil
} }
// CodeChange records the code of a newly-created contract. res = &AccountMutations{}
func (b *ConstructionBlockAccessList) CodeChange(address common.Address, txIndex uint16, code []byte) { // TODO: remove the reverse iteration here to clean the code up
if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess() for i := len(acct.BalanceChanges) - 1; i >= 0; i-- {
if acct.BalanceChanges[i].TxIdx == uint16(idx) {
res.Balance = acct.BalanceChanges[i].Balance
}
if acct.BalanceChanges[i].TxIdx < uint16(idx) {
break
} }
// TODO(rjl493456442) is it essential to deep-copy the code?
b.Accounts[address].CodeChange[txIndex] = bytes.Clone(code)
} }
// NonceChange records tx post-state nonce of any contract-like accounts whose for i := len(acct.CodeChanges) - 1; i >= 0; i-- {
// nonce was incremented. if acct.CodeChanges[i].TxIndex == uint16(idx) {
func (b *ConstructionBlockAccessList) NonceChange(address common.Address, txIdx uint16, postNonce uint64) { res.Code = bytes.Clone(acct.CodeChanges[i].Code)
if _, ok := b.Accounts[address]; !ok { break
b.Accounts[address] = NewConstructionAccountAccess() }
if acct.CodeChanges[i].TxIndex < uint16(idx) {
break
} }
b.Accounts[address].NonceChanges[txIdx] = postNonce
} }
// BalanceChange records the post-transaction balance of an account whose for i := len(acct.NonceChanges) - 1; i >= 0; i-- {
// balance changed. if acct.NonceChanges[i].TxIdx == uint16(idx) {
func (b *ConstructionBlockAccessList) BalanceChange(txIdx uint16, address common.Address, balance *uint256.Int) { res.Nonce = new(uint64)
if _, ok := b.Accounts[address]; !ok { *res.Nonce = acct.NonceChanges[i].Nonce
b.Accounts[address] = NewConstructionAccountAccess() break
}
if acct.NonceChanges[i].TxIdx < uint16(idx) {
break
} }
b.Accounts[address].BalanceChanges[txIdx] = balance.Clone()
} }
// PrettyPrint returns a human-readable representation of the access list for i := len(acct.StorageChanges) - 1; i >= 0; i-- {
func (b *ConstructionBlockAccessList) PrettyPrint() string { if res.StorageWrites == nil {
enc := b.toEncodingObj() res.StorageWrites = make(map[common.Hash]common.Hash)
return enc.PrettyPrint() }
slotWrites := acct.StorageChanges[i]
for j := len(slotWrites.Accesses) - 1; j >= 0; j-- {
if slotWrites.Accesses[j].TxIdx == uint16(idx) {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[j].ValueAfter.ToHash()
break
}
if slotWrites.Accesses[j].TxIdx < uint16(idx) {
break
}
}
if len(res.StorageWrites) == 0 {
res.StorageWrites = nil
}
} }
// Copy returns a deep copy of the access list. if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList { return nil
res := NewConstructionBlockAccessList()
for addr, aa := range b.Accounts {
var aaCopy ConstructionAccountAccess
slotWrites := make(map[common.Hash]map[uint16]common.Hash, len(aa.StorageWrites))
for key, m := range aa.StorageWrites {
slotWrites[key] = maps.Clone(m)
} }
aaCopy.StorageWrites = slotWrites return res
aaCopy.StorageReads = maps.Clone(aa.StorageReads)
balances := make(map[uint16]*uint256.Int, len(aa.BalanceChanges))
for index, balance := range aa.BalanceChanges {
balances[index] = balance.Clone()
} }
aaCopy.BalanceChanges = balances
aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
codes := make(map[uint16][]byte, len(aa.CodeChange)) func (a AccessListReader) AccountMutations(addr common.Address, idx int) (res *AccountMutations) {
for index, code := range aa.CodeChange { diff, exist := a[addr]
codes[index] = bytes.Clone(code) if !exist {
return nil
}
res = &AccountMutations{}
for i := 0; i < len(diff.BalanceChanges) && diff.BalanceChanges[i].TxIdx < uint16(idx); i++ {
res.Balance = diff.BalanceChanges[i].Balance.Clone()
}
for i := 0; i < len(diff.CodeChanges) && diff.CodeChanges[i].TxIndex < uint16(idx); i++ {
res.Code = bytes.Clone(diff.CodeChanges[i].Code)
}
for i := 0; i < len(diff.NonceChanges) && diff.NonceChanges[i].TxIdx < uint16(idx); i++ {
res.Nonce = new(uint64)
*res.Nonce = diff.NonceChanges[i].Nonce
}
if len(diff.StorageChanges) > 0 {
res.StorageWrites = make(map[common.Hash]common.Hash)
for _, slotWrites := range diff.StorageChanges {
for i := 0; i < len(slotWrites.Accesses) && slotWrites.Accesses[i].TxIdx < uint16(idx); i++ {
res.StorageWrites[slotWrites.Slot.ToHash()] = slotWrites.Accesses[i].ValueAfter.ToHash()
}
}
}
if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
return nil
}
return res
}
// Mutations returns the aggregate state mutations from [0, idx)
func (a AccessListReader) Mutations(idx int) *StateMutations {
res := make(StateMutations)
for addr := range a {
if mut := a.AccountMutations(addr, idx); mut != nil {
res[addr] = *mut
} }
aaCopy.CodeChange = codes
res.Accounts[addr] = &aaCopy
} }
return &res return &res
} }
// MutationsAt returns the state mutations from an index
func (a AccessListReader) MutationsAt(idx int) *StateMutations {
res := make(StateMutations)
for addr := range a {
if mut := a.accountMutationsAt(addr, idx); mut != nil {
res[addr] = *mut
}
}
return &res
}
type StorageKeys map[common.Address][]common.Hash
// StorageKeys returns the set of accounts and storage keys mutated in the access list.
// If reads is set, the un-mutated accounts/keys are included in the result.
func (a AccessListReader) StorageKeys(reads bool) (keys StorageKeys) {
keys = make(StorageKeys)
for addr, acct := range a {
for _, storageChange := range acct.StorageChanges {
keys[addr] = append(keys[addr], storageChange.Slot.ToHash())
}
if !(reads && len(acct.StorageReads) > 0) {
continue
}
for _, storageRead := range acct.StorageReads {
keys[addr] = append(keys[addr], storageRead.ToHash())
}
}
return
}
// Storage returns the value of a storage key at the start of executing an index.
// If the slot has no mutations in the access list, it returns nil.
func (a AccessListReader) Storage(addr common.Address, key common.Hash, idx int) (val *common.Hash) {
storageMuts := a.AccountMutations(addr, idx)
if storageMuts != nil {
res, ok := storageMuts.StorageWrites[key]
if ok {
return &res
}
}
return nil
}
// Copy returns a deep copy of the access list
func (e BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e {
res = append(res, accountAccess.Copy())
}
return
}
// Eq returns whether the calling instance is equal to the provided one.
func (a *AccountMutations) Eq(other *AccountMutations) bool {
if a.Balance != nil || other.Balance != nil {
if a.Balance == nil || other.Balance == nil {
return false
}
if !a.Balance.Eq(other.Balance) {
return false
}
}
if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
return false
}
if a.Nonce != nil || other.Nonce != nil {
if a.Nonce == nil || other.Nonce == nil {
return false
}
if *a.Nonce != *other.Nonce {
return false
}
}
if a.StorageWrites != nil || other.StorageWrites != nil {
if !maps.Equal(a.StorageWrites, other.StorageWrites) {
return false
}
}
return true
}

File diff suppressed because one or more lines are too long

View file

@ -19,8 +19,11 @@ package bal
import ( import (
"bytes" "bytes"
"cmp" "cmp"
"encoding/hex"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/log"
"io" "io"
"maps" "maps"
"slices" "slices"
@ -33,27 +36,95 @@ import (
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type BlockAccessList -decoder //go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type AccountAccess -decoder
// These are objects used as input for the access list encoding. They mirror // These are objects used as input for the access list encoding. They mirror
// the spec format. // the spec format.
// BlockAccessList is the encoding format of ConstructionBlockAccessList. // BlockAccessList is the encoding format of AccessListBuilder.
type BlockAccessList struct { type BlockAccessList []AccountAccess
Accesses []AccountAccess `ssz-max:"300000"`
func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
l := w.List()
for _, access := range e {
access.EncodeRLP(w)
}
w.ListEnd(l)
return w.Flush()
} }
func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
if _, err := dec.List(); err != nil {
return err
}
*e = (*e)[:0]
for dec.MoreDataInList() {
var access AccountAccess
if err := access.DecodeRLP(dec); err != nil {
return err
}
*e = append(*e, access)
}
dec.ListEnd()
return nil
}
func (e *BlockAccessList) EncodedSize() int {
b, err := rlp.EncodeToBytes(e)
if err != nil {
// TODO: proper to crit here?
log.Crit("failed to rlp encode access list", "err", err)
}
return len(b)
}
func (e *BlockAccessList) JSONString() string {
res, _ := json.MarshalIndent(e.StringableRepresentation(), "", " ")
return string(res)
}
// StringableRepresentation returns an instance of the block access list
// which can be converted to a human-readable JSON representation.
func (e *BlockAccessList) StringableRepresentation() interface{} {
res := []AccountAccess{}
for _, aa := range *e {
res = append(res, aa)
}
return &res
}
func (e *BlockAccessList) String() string {
var res bytes.Buffer
enc := json.NewEncoder(&res)
enc.SetIndent("", " ")
// TODO: check error
enc.Encode(e)
return res.String()
}
// TODO: check that no fields are nil in Validate (unless it's valid for them to be nil)
// Validate returns an error if the contents of the access list are not ordered // Validate returns an error if the contents of the access list are not ordered
// according to the spec or any code changes are contained which exceed protocol // according to the spec or any code changes are contained which exceed protocol
// max code size. // max code size.
func (e *BlockAccessList) Validate() error { func (e BlockAccessList) Validate(blockTxCount int) error {
if !slices.IsSortedFunc(e.Accesses, func(a, b AccountAccess) int { if !slices.IsSortedFunc(e, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:]) return bytes.Compare(a.Address[:], b.Address[:])
}) { }) {
return errors.New("block access list accounts not in lexicographic order") return errors.New("block access list accounts not in lexicographic order")
} }
for _, entry := range e.Accesses { // check that the accounts are unique
if err := entry.validate(); err != nil { addrs := make(map[common.Address]struct{})
for _, acct := range e {
addr := acct.Address
if _, ok := addrs[addr]; ok {
return fmt.Errorf("duplicate account in block access list: %x", addr)
}
addrs[addr] = struct{}{}
}
for _, entry := range e {
if err := entry.validate(blockTxCount); err != nil {
return err return err
} }
} }
@ -70,54 +141,136 @@ func (e *BlockAccessList) Hash() common.Hash {
// under reasonable conditions. // under reasonable conditions.
panic(err) panic(err)
} }
/*
bal, err := json.MarshalIndent(e.StringableRepresentation(), "", " ")
if err != nil {
panic(err)
}
*/
return crypto.Keccak256Hash(enc.Bytes()) return crypto.Keccak256Hash(enc.Bytes())
} }
// encodeBalance encodes the provided balance into 16-bytes.
func encodeBalance(val *uint256.Int) [16]byte {
valBytes := val.Bytes()
if len(valBytes) > 16 {
panic("can't encode value that is greater than 16 bytes in size")
}
var enc [16]byte
copy(enc[16-len(valBytes):], valBytes[:])
return enc
}
// encodingBalanceChange is the encoding format of BalanceChange. // encodingBalanceChange is the encoding format of BalanceChange.
type encodingBalanceChange struct { type encodingBalanceChange struct {
TxIdx uint16 `ssz-size:"2"` TxIdx uint16 `json:"txIndex"`
Balance [16]byte `ssz-size:"16"` Balance *uint256.Int `json:"balance"`
} }
// encodingAccountNonce is the encoding format of NonceChange. // encodingAccountNonce is the encoding format of NonceChange.
type encodingAccountNonce struct { type encodingAccountNonce struct {
TxIdx uint16 `ssz-size:"2"` TxIdx uint16 `json:"txIndex"`
Nonce uint64 `ssz-size:"8"` Nonce uint64 `json:"nonce"`
} }
// encodingStorageWrite is the encoding format of StorageWrites. // encodingStorageWrite is the encoding format of StorageWrites.
type encodingStorageWrite struct { type encodingStorageWrite struct {
TxIdx uint16 TxIdx uint16 `json:"txIndex"`
ValueAfter [32]byte `ssz-size:"32"` ValueAfter *EncodedStorage `json:"valueAfter"`
}
// EncodedStorage can represent either a storage key or value
type EncodedStorage struct {
inner *uint256.Int
}
var _ rlp.Encoder = &EncodedStorage{}
var _ rlp.Decoder = &EncodedStorage{}
func (e *EncodedStorage) ToHash() common.Hash {
if e == nil {
return common.Hash{}
}
return e.inner.Bytes32()
}
func newEncodedStorageFromHash(hash common.Hash) *EncodedStorage {
return &EncodedStorage{
new(uint256.Int).SetBytes(hash[:]),
}
}
func (s *EncodedStorage) UnmarshalJSON(b []byte) error {
var str string
if err := json.Unmarshal(b, &str); err != nil {
return err
}
str = strings.TrimLeft(str, "0x")
if len(str) == 0 {
return nil
}
if len(str)%2 == 1 {
str = "0" + str
}
val, err := hex.DecodeString(str)
if err != nil {
return err
}
if len(val) > 32 {
return fmt.Errorf("storage key/value cannot be greater than 32 bytes")
}
// TODO: check is s == nil ?? should be programmer error
*s = EncodedStorage{
inner: new(uint256.Int).SetBytes(val),
}
return nil
}
func (s EncodedStorage) MarshalJSON() ([]byte, error) {
return json.Marshal(s.inner.Hex())
}
func (s *EncodedStorage) EncodeRLP(_w io.Writer) error {
return s.inner.EncodeRLP(_w)
}
func (s *EncodedStorage) DecodeRLP(dec *rlp.Stream) error {
if s == nil {
*s = EncodedStorage{}
}
s.inner = uint256.NewInt(0)
return dec.ReadUint256(s.inner)
} }
// encodingStorageWrite is the encoding format of SlotWrites. // encodingStorageWrite is the encoding format of SlotWrites.
type encodingSlotWrites struct { type encodingSlotWrites struct {
Slot [32]byte `ssz-size:"32"` Slot *EncodedStorage `json:"slot"`
Accesses []encodingStorageWrite `ssz-max:"300000"` Accesses []encodingStorageWrite `json:"accesses"`
} }
// validate returns an instance of the encoding-representation slot writes in // validate returns an instance of the encoding-representation slot writes in
// working representation. // working representation.
func (e *encodingSlotWrites) validate() error { func (e *encodingSlotWrites) validate(blockTxCount int) error {
if slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int { if e.Slot == nil {
return errors.New("nil slot key")
}
if !slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx) return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) { }) {
return nil
}
return errors.New("storage write tx indices not in order") return errors.New("storage write tx indices not in order")
} }
for i, access := range e.Accesses {
if access.ValueAfter == nil {
return errors.New("nil storage write post")
}
if i > 0 && e.Accesses[i-1].TxIdx == access.TxIdx {
return errors.New("duplicate storage write index")
}
}
// TODO: add test that covers there are actually storage modifications here
// if there aren't, it should be a bad block
if len(e.Accesses) == 0 {
return fmt.Errorf("empty storage writes")
} else if int(e.Accesses[len(e.Accesses)-1].TxIdx) >= blockTxCount+2 {
return fmt.Errorf("storage access reported index higher than allowed")
}
return nil
}
// encodingCodeChange contains the runtime bytecode deployed at an address // encodingCodeChange contains the runtime bytecode deployed at an address
// and the transaction index where the deployment took place. // and the transaction index where the deployment took place.
@ -126,64 +279,120 @@ type encodingCodeChange struct {
Code []byte `ssz-max:"300000"` // TODO(rjl493456442) shall we put the limit here? The limit will be increased gradually Code []byte `ssz-max:"300000"` // TODO(rjl493456442) shall we put the limit here? The limit will be increased gradually
} }
// AccountAccess is the encoding format of ConstructionAccountAccess. // AccountAccess is the encoding format of ConstructionAccountAccesses.
type AccountAccess struct { type AccountAccess struct {
Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address Address common.Address `json:"address,omitempty"` // 20-byte Ethereum address
StorageWrites []encodingSlotWrites `ssz-max:"300000"` // Storage changes (slot -> [tx_index -> new_value]) StorageChanges []encodingSlotWrites `json:"storageChanges,omitempty"` // EncodedStorage changes (slot -> [tx_index -> new_value])
StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys StorageReads []*EncodedStorage `json:"storageReads,omitempty"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance]) BalanceChanges []encodingBalanceChange `json:"balanceChanges,omitempty"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce]) NonceChanges []encodingAccountNonce `json:"nonceChanges,omitempty"` // Nonce changes ([tx_index -> new_nonce])
CodeChanges []encodingCodeChange `ssz-max:"300000"` // Code changes ([tx_index -> new_code]) CodeChanges []encodingCodeChange `json:"code,omitempty"` // CodeChanges changes ([tx_index -> new_code])
} }
// validate converts the account accesses out of encoding format. // validate converts the account accesses out of encoding format.
// If any of the keys in the encoding object are not ordered according to the // If any of the keys in the encoding object are not ordered according to the
// spec, an error is returned. // spec, an error is returned.
func (e *AccountAccess) validate() error { func (e *AccountAccess) validate(blockTxCount int) error {
// Check the storage write slots are sorted in order // Check the storage write slots are sorted in order
if !slices.IsSortedFunc(e.StorageWrites, func(a, b encodingSlotWrites) int { if !slices.IsSortedFunc(e.StorageChanges, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:]) aHash, bHash := a.Slot.ToHash(), b.Slot.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) { }) {
return errors.New("storage writes slots not in lexicographic order") return errors.New("storage writes slots not in lexicographic order")
} }
for _, write := range e.StorageWrites { for _, write := range e.StorageChanges {
if err := write.validate(); err != nil { if err := write.validate(blockTxCount); err != nil {
return err return err
} }
} }
readKeys := make(map[common.Hash]struct{})
writeKeys := make(map[common.Hash]struct{})
for _, readKey := range e.StorageReads {
if _, ok := readKeys[readKey.ToHash()]; ok {
return errors.New("duplicate read key")
}
readKeys[readKey.ToHash()] = struct{}{}
}
for _, write := range e.StorageChanges {
writeKey := write.Slot
if _, ok := writeKeys[writeKey.ToHash()]; ok {
return errors.New("duplicate write key")
}
writeKeys[writeKey.ToHash()] = struct{}{}
}
for readKey := range readKeys {
if _, ok := writeKeys[readKey]; ok {
return errors.New("storage key reported in both read/write sets")
}
}
// Check the storage read slots are sorted in order // Check the storage read slots are sorted in order
if !slices.IsSortedFunc(e.StorageReads, func(a, b [32]byte) int { if !slices.IsSortedFunc(e.StorageReads, func(a, b *EncodedStorage) int {
return bytes.Compare(a[:], b[:]) aHash, bHash := a.ToHash(), b.ToHash()
return bytes.Compare(aHash[:], bHash[:])
}) { }) {
return errors.New("storage read slots not in lexicographic order") return errors.New("storage read slots not in lexicographic order")
} }
// Check the balance changes are sorted in order // Check the balance changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.BalanceChanges, func(a, b encodingBalanceChange) int { if !slices.IsSortedFunc(e.BalanceChanges, func(a, b encodingBalanceChange) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx) return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) { }) {
return errors.New("balance changes not in ascending order by tx index") return errors.New("balance changes not in ascending order by tx index")
} }
if len(e.BalanceChanges) > 0 && int(e.BalanceChanges[len(e.BalanceChanges)-1].TxIdx) > blockTxCount+2 {
return errors.New("highest balance change index beyond what is allowed")
}
// check that the balance values are set and there are no duplicate index entries
for i, balanceChange := range e.BalanceChanges {
if balanceChange.Balance == nil {
return errors.New("nil balance change value")
}
if i > 0 && e.BalanceChanges[i-1].TxIdx == balanceChange.TxIdx {
return errors.New("duplicate index for balance change")
}
}
// Check the nonce changes are sorted in order // Check the nonce changes are sorted in order
// and that none of them report an index above what is allowed
if !slices.IsSortedFunc(e.NonceChanges, func(a, b encodingAccountNonce) int { if !slices.IsSortedFunc(e.NonceChanges, func(a, b encodingAccountNonce) int {
return cmp.Compare[uint16](a.TxIdx, b.TxIdx) return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
}) { }) {
return errors.New("nonce changes not in ascending order by tx index") return errors.New("nonce changes not in ascending order by tx index")
} }
if len(e.NonceChanges) > 0 && int(e.NonceChanges[len(e.NonceChanges)-1].TxIdx) >= blockTxCount+2 {
return errors.New("highest nonce change index beyond what is allowed")
}
for i, nonceChange := range e.NonceChanges {
if i > 0 && nonceChange.TxIdx == e.NonceChanges[i-1].TxIdx {
return errors.New("duplicate index reported in nonce changes")
}
}
// Check the code changes are sorted in order // TODO: contact testing team to add a test case which has the code changes out of order,
// as it wasn't checked here previously
if !slices.IsSortedFunc(e.CodeChanges, func(a, b encodingCodeChange) int { if !slices.IsSortedFunc(e.CodeChanges, func(a, b encodingCodeChange) int {
return cmp.Compare[uint16](a.TxIndex, b.TxIndex) return cmp.Compare[uint16](a.TxIndex, b.TxIndex)
}) { }) {
return errors.New("code changes not in ascending order by tx index") return errors.New("code changes not in ascending order")
} }
for _, change := range e.CodeChanges { if len(e.CodeChanges) > 0 && int(e.CodeChanges[len(e.CodeChanges)-1].TxIndex) >= blockTxCount+2 {
// TODO(rjl493456442): This check should be fork-aware, since the limit may return errors.New("highest code change index beyond what is allowed")
// differ across forks. }
if len(change.Code) > params.MaxCodeSize { for i, codeChange := range e.CodeChanges {
return errors.New("code change contained oversized code") if i > 0 && codeChange.TxIndex == e.CodeChanges[i-1].TxIndex {
return errors.New("duplicate index reported in code changes")
}
}
// validate that code changes could plausibly be correct (none exceed
// max code size of a contract)
for _, codeChange := range e.CodeChanges {
if len(codeChange.Code) > params.MaxCodeSize {
return fmt.Errorf("code change contained oversized code")
} }
} }
return nil return nil
@ -196,41 +405,40 @@ func (e *AccountAccess) Copy() AccountAccess {
StorageReads: slices.Clone(e.StorageReads), StorageReads: slices.Clone(e.StorageReads),
BalanceChanges: slices.Clone(e.BalanceChanges), BalanceChanges: slices.Clone(e.BalanceChanges),
NonceChanges: slices.Clone(e.NonceChanges), NonceChanges: slices.Clone(e.NonceChanges),
StorageWrites: make([]encodingSlotWrites, 0, len(e.StorageWrites)),
CodeChanges: make([]encodingCodeChange, 0, len(e.CodeChanges)),
} }
for _, storageWrite := range e.StorageWrites { for _, storageWrite := range e.StorageChanges {
res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{ res.StorageChanges = append(res.StorageChanges, encodingSlotWrites{
Slot: storageWrite.Slot, Slot: storageWrite.Slot,
Accesses: slices.Clone(storageWrite.Accesses), Accesses: slices.Clone(storageWrite.Accesses),
}) })
} }
for _, codeChange := range e.CodeChanges { for _, codeChange := range e.CodeChanges {
res.CodeChanges = append(res.CodeChanges, encodingCodeChange{ res.CodeChanges = append(res.CodeChanges,
TxIndex: codeChange.TxIndex, encodingCodeChange{
Code: bytes.Clone(codeChange.Code), codeChange.TxIndex,
bytes.Clone(codeChange.Code),
}) })
} }
return res return res
} }
// EncodeRLP returns the RLP-encoded access list // EncodeRLP returns the RLP-encoded access list
func (b *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error { func (c ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
return b.toEncodingObj().EncodeRLP(wr) return c.ToEncodingObj().EncodeRLP(wr)
} }
var _ rlp.Encoder = &ConstructionBlockAccessList{} var _ rlp.Encoder = &ConstructionBlockAccessList{}
// toEncodingObj creates an instance of the ConstructionAccountAccess of the type that is // toEncodingObj creates an instance of the ConstructionAccountAccesses of the type that is
// used as input for the encoding. // used as input for the encoding.
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess { func (a *ConstructionAccountAccesses) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{ res := AccountAccess{
Address: addr, Address: addr,
StorageWrites: make([]encodingSlotWrites, 0, len(a.StorageWrites)), StorageChanges: make([]encodingSlotWrites, 0),
StorageReads: make([][32]byte, 0, len(a.StorageReads)), StorageReads: make([]*EncodedStorage, 0),
BalanceChanges: make([]encodingBalanceChange, 0, len(a.BalanceChanges)), BalanceChanges: make([]encodingBalanceChange, 0),
NonceChanges: make([]encodingAccountNonce, 0, len(a.NonceChanges)), NonceChanges: make([]encodingAccountNonce, 0),
CodeChanges: make([]encodingCodeChange, 0, len(a.CodeChange)), CodeChanges: make([]encodingCodeChange, 0),
} }
// Convert write slots // Convert write slots
@ -238,7 +446,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
slices.SortFunc(writeSlots, common.Hash.Cmp) slices.SortFunc(writeSlots, common.Hash.Cmp)
for _, slot := range writeSlots { for _, slot := range writeSlots {
var obj encodingSlotWrites var obj encodingSlotWrites
obj.Slot = slot obj.Slot = newEncodedStorageFromHash(slot)
slotWrites := a.StorageWrites[slot] slotWrites := a.StorageWrites[slot]
obj.Accesses = make([]encodingStorageWrite, 0, len(slotWrites)) obj.Accesses = make([]encodingStorageWrite, 0, len(slotWrites))
@ -248,17 +456,17 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, index := range indices { for _, index := range indices {
obj.Accesses = append(obj.Accesses, encodingStorageWrite{ obj.Accesses = append(obj.Accesses, encodingStorageWrite{
TxIdx: index, TxIdx: index,
ValueAfter: slotWrites[index], ValueAfter: newEncodedStorageFromHash(slotWrites[index]),
}) })
} }
res.StorageWrites = append(res.StorageWrites, obj) res.StorageChanges = append(res.StorageChanges, obj)
} }
// Convert read slots // Convert read slots
readSlots := slices.Collect(maps.Keys(a.StorageReads)) readSlots := slices.Collect(maps.Keys(a.StorageReads))
slices.SortFunc(readSlots, common.Hash.Cmp) slices.SortFunc(readSlots, common.Hash.Cmp)
for _, slot := range readSlots { for _, slot := range readSlots {
res.StorageReads = append(res.StorageReads, slot) res.StorageReads = append(res.StorageReads, newEncodedStorageFromHash(slot))
} }
// Convert balance changes // Convert balance changes
@ -267,7 +475,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
for _, idx := range balanceIndices { for _, idx := range balanceIndices {
res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{ res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{
TxIdx: idx, TxIdx: idx,
Balance: encodeBalance(a.BalanceChanges[idx]), Balance: new(uint256.Int).Set(a.BalanceChanges[idx]),
}) })
} }
@ -282,77 +490,31 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
} }
// Convert code change // Convert code change
codeIndices := slices.Collect(maps.Keys(a.CodeChange)) codeChangeIdxs := slices.Collect(maps.Keys(a.CodeChanges))
slices.SortFunc(codeIndices, cmp.Compare[uint16]) slices.SortFunc(codeChangeIdxs, cmp.Compare[uint16])
for _, idx := range codeIndices { for _, idx := range codeChangeIdxs {
res.CodeChanges = append(res.CodeChanges, encodingCodeChange{ res.CodeChanges = append(res.CodeChanges, encodingCodeChange{
TxIndex: idx, idx,
Code: a.CodeChange[idx], bytes.Clone(a.CodeChanges[idx]),
}) })
} }
return res return res
} }
// toEncodingObj returns an instance of the access list expressed as the type // ToEncodingObj returns an instance of the access list expressed as the type
// which is used as input for the encoding/decoding. // which is used as input for the encoding/decoding.
func (b *ConstructionBlockAccessList) toEncodingObj() *BlockAccessList { func (c ConstructionBlockAccessList) ToEncodingObj() *BlockAccessList {
var addresses []common.Address var addresses []common.Address
for addr := range b.Accounts { for addr := range c {
addresses = append(addresses, addr) addresses = append(addresses, addr)
} }
slices.SortFunc(addresses, common.Address.Cmp) slices.SortFunc(addresses, common.Address.Cmp)
var res BlockAccessList var res BlockAccessList
for _, addr := range addresses { for _, addr := range addresses {
res.Accesses = append(res.Accesses, b.Accounts[addr].toEncodingObj(addr)) res = append(res, c[addr].toEncodingObj(addr))
} }
return &res return &res
} }
func (e *BlockAccessList) PrettyPrint() string { type ContractCode []byte
var res bytes.Buffer
printWithIndent := func(indent int, text string) {
fmt.Fprintf(&res, "%s%s\n", strings.Repeat(" ", indent), text)
}
for _, accountDiff := range e.Accesses {
printWithIndent(0, fmt.Sprintf("%x:", accountDiff.Address))
printWithIndent(1, "storage writes:")
for _, sWrite := range accountDiff.StorageWrites {
printWithIndent(2, fmt.Sprintf("%x:", sWrite.Slot))
for _, access := range sWrite.Accesses {
printWithIndent(3, fmt.Sprintf("%d: %x", access.TxIdx, access.ValueAfter))
}
}
printWithIndent(1, "storage reads:")
for _, slot := range accountDiff.StorageReads {
printWithIndent(2, fmt.Sprintf("%x", slot))
}
printWithIndent(1, "balance changes:")
for _, change := range accountDiff.BalanceChanges {
balance := new(uint256.Int).SetBytes(change.Balance[:]).String()
printWithIndent(2, fmt.Sprintf("%d: %s", change.TxIdx, balance))
}
printWithIndent(1, "nonce changes:")
for _, change := range accountDiff.NonceChanges {
printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce))
}
printWithIndent(1, "code changes:")
for _, change := range accountDiff.CodeChanges {
printWithIndent(2, fmt.Sprintf("%d: %x", change.TxIndex, change.Code))
}
}
return res.String()
}
// Copy returns a deep copy of the access list
func (e *BlockAccessList) Copy() (res BlockAccessList) {
for _, accountAccess := range e.Accesses {
res.Accesses = append(res.Accesses, accountAccess.Copy())
}
return
}

View file

@ -0,0 +1,107 @@
package bal
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
)
func (c *ContractCode) MarshalJSON() ([]byte, error) {
hexStr := fmt.Sprintf("%x", *c)
return json.Marshal(hexStr)
}
func (e encodingBalanceChange) MarshalJSON() ([]byte, error) {
type Alias encodingBalanceChange
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Alias: (*Alias)(&e),
})
}
func (e *encodingBalanceChange) UnmarshalJSON(data []byte) error {
type Alias encodingBalanceChange
aux := &struct {
TxIdx string `json:"txIndex"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
return nil
}
func (e encodingAccountNonce) MarshalJSON() ([]byte, error) {
type Alias encodingAccountNonce
return json.Marshal(&struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
Nonce: fmt.Sprintf("0x%x", e.Nonce),
Alias: (*Alias)(&e),
})
}
func (e *encodingAccountNonce) UnmarshalJSON(data []byte) error {
type Alias encodingAccountNonce
aux := &struct {
TxIdx string `json:"txIndex"`
Nonce string `json:"nonce"`
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
return err
}
}
if len(aux.Nonce) >= 2 && aux.Nonce[:2] == "0x" {
if _, err := fmt.Sscanf(aux.Nonce, "0x%x", &e.Nonce); err != nil {
return err
}
}
return nil
}
// UnmarshalJSON implements json.Unmarshaler to decode from RLP hex bytes
func (b *BlockAccessList) UnmarshalJSON(input []byte) error {
// Handle both hex string and object formats
var hexBytes hexutil.Bytes
if err := json.Unmarshal(input, &hexBytes); err == nil {
// It's a hex string, decode from RLP
return rlp.DecodeBytes(hexBytes, b)
}
// Otherwise try to unmarshal as structured JSON
var tmp []AccountAccess
if err := json.Unmarshal(input, &tmp); err != nil {
return err
}
*b = BlockAccessList(tmp)
return nil
}
// MarshalJSON implements json.Marshaler to encode as RLP hex bytes
func (b BlockAccessList) MarshalJSON() ([]byte, error) {
// Encode to RLP then to hex
rlpBytes, err := rlp.EncodeToBytes(b)
if err != nil {
return nil, err
}
return json.Marshal(hexutil.Bytes(rlpBytes))
}

View file

@ -2,275 +2,260 @@
package bal package bal
import "github.com/ethereum/go-ethereum/common"
import "github.com/ethereum/go-ethereum/rlp" import "github.com/ethereum/go-ethereum/rlp"
import "github.com/holiman/uint256"
import "io" import "io"
func (obj *BlockAccessList) EncodeRLP(_w io.Writer) error { func (obj *AccountAccess) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w) w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List() _tmp0 := w.List()
w.WriteBytes(obj.Address[:])
_tmp1 := w.List() _tmp1 := w.List()
for _, _tmp2 := range obj.Accesses { for _, _tmp2 := range obj.StorageChanges {
_tmp3 := w.List() _tmp3 := w.List()
w.WriteBytes(_tmp2.Address[:]) if err := _tmp2.Slot.EncodeRLP(w); err != nil {
_tmp4 := w.List() return err
for _, _tmp5 := range _tmp2.StorageWrites { }
_tmp6 := w.List() _tmp4 := w.List()
w.WriteBytes(_tmp5.Slot[:]) for _, _tmp5 := range _tmp2.Accesses {
_tmp7 := w.List() _tmp6 := w.List()
for _, _tmp8 := range _tmp5.Accesses { w.WriteUint64(uint64(_tmp5.TxIdx))
_tmp9 := w.List() if err := _tmp5.ValueAfter.EncodeRLP(w); err != nil {
w.WriteUint64(uint64(_tmp8.TxIdx)) return err
w.WriteBytes(_tmp8.ValueAfter[:])
w.ListEnd(_tmp9)
} }
w.ListEnd(_tmp7)
w.ListEnd(_tmp6) w.ListEnd(_tmp6)
} }
w.ListEnd(_tmp4) w.ListEnd(_tmp4)
_tmp10 := w.List() w.ListEnd(_tmp3)
for _, _tmp11 := range _tmp2.StorageReads {
w.WriteBytes(_tmp11[:])
} }
w.ListEnd(_tmp10) w.ListEnd(_tmp1)
_tmp7 := w.List()
for _, _tmp8 := range obj.StorageReads {
if err := _tmp8.EncodeRLP(w); err != nil {
return err
}
}
w.ListEnd(_tmp7)
_tmp9 := w.List()
for _, _tmp10 := range obj.BalanceChanges {
_tmp11 := w.List()
w.WriteUint64(uint64(_tmp10.TxIdx))
if _tmp10.Balance == nil {
w.Write(rlp.EmptyString)
} else {
w.WriteUint256(_tmp10.Balance)
}
w.ListEnd(_tmp11)
}
w.ListEnd(_tmp9)
_tmp12 := w.List() _tmp12 := w.List()
for _, _tmp13 := range _tmp2.BalanceChanges { for _, _tmp13 := range obj.NonceChanges {
_tmp14 := w.List() _tmp14 := w.List()
w.WriteUint64(uint64(_tmp13.TxIdx)) w.WriteUint64(uint64(_tmp13.TxIdx))
w.WriteBytes(_tmp13.Balance[:]) w.WriteUint64(_tmp13.Nonce)
w.ListEnd(_tmp14) w.ListEnd(_tmp14)
} }
w.ListEnd(_tmp12) w.ListEnd(_tmp12)
_tmp15 := w.List() _tmp15 := w.List()
for _, _tmp16 := range _tmp2.NonceChanges { for _, _tmp16 := range obj.CodeChanges {
_tmp17 := w.List() _tmp17 := w.List()
w.WriteUint64(uint64(_tmp16.TxIdx)) w.WriteUint64(uint64(_tmp16.TxIndex))
w.WriteUint64(_tmp16.Nonce) w.WriteBytes(_tmp16.Code)
w.ListEnd(_tmp17) w.ListEnd(_tmp17)
} }
w.ListEnd(_tmp15) w.ListEnd(_tmp15)
_tmp18 := w.List()
for _, _tmp19 := range _tmp2.CodeChanges {
_tmp20 := w.List()
w.WriteUint64(uint64(_tmp19.TxIndex))
w.WriteBytes(_tmp19.Code)
w.ListEnd(_tmp20)
}
w.ListEnd(_tmp18)
w.ListEnd(_tmp3)
}
w.ListEnd(_tmp1)
w.ListEnd(_tmp0) w.ListEnd(_tmp0)
return w.Flush() return w.Flush()
} }
func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error { func (obj *AccountAccess) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 BlockAccessList var _tmp0 AccountAccess
{
if _, err := dec.List(); err != nil {
return err
}
// Accesses:
var _tmp1 []AccountAccess
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp2 AccountAccess
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
// Address: // Address:
var _tmp3 [20]byte var _tmp1 common.Address
if err := dec.ReadBytes(_tmp3[:]); err != nil { if err := dec.ReadBytes(_tmp1[:]); err != nil {
return err return err
} }
_tmp2.Address = _tmp3 _tmp0.Address = _tmp1
// StorageWrites: // StorageChanges:
var _tmp4 []encodingSlotWrites var _tmp2 []encodingSlotWrites
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp5 encodingSlotWrites var _tmp3 encodingSlotWrites
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
// Slot: // Slot:
var _tmp6 [32]byte _tmp4 := new(EncodedStorage)
if err := dec.ReadBytes(_tmp6[:]); err != nil { if err := _tmp4.DecodeRLP(dec); err != nil {
return err return err
} }
_tmp5.Slot = _tmp6 _tmp3.Slot = _tmp4
// Accesses: // Accesses:
var _tmp7 []encodingStorageWrite var _tmp5 []encodingStorageWrite
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp8 encodingStorageWrite var _tmp6 encodingStorageWrite
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
// TxIdx: // TxIdx:
_tmp9, err := dec.Uint16() _tmp7, err := dec.Uint16()
if err != nil { if err != nil {
return err return err
} }
_tmp8.TxIdx = _tmp9 _tmp6.TxIdx = _tmp7
// ValueAfter: // ValueAfter:
var _tmp10 [32]byte _tmp8 := new(EncodedStorage)
if err := dec.ReadBytes(_tmp10[:]); err != nil { if err := _tmp8.DecodeRLP(dec); err != nil {
return err return err
} }
_tmp8.ValueAfter = _tmp10 _tmp6.ValueAfter = _tmp8
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
} }
_tmp7 = append(_tmp7, _tmp8) _tmp5 = append(_tmp5, _tmp6)
} }
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp5.Accesses = _tmp7 _tmp3.Accesses = _tmp5
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
} }
_tmp4 = append(_tmp4, _tmp5) _tmp2 = append(_tmp2, _tmp3)
} }
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp2.StorageWrites = _tmp4 _tmp0.StorageChanges = _tmp2
// StorageReads: // StorageReads:
var _tmp11 [][32]byte var _tmp9 []*EncodedStorage
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp12 [32]byte _tmp10 := new(EncodedStorage)
if err := dec.ReadBytes(_tmp12[:]); err != nil { if err := _tmp10.DecodeRLP(dec); err != nil {
return err return err
} }
_tmp9 = append(_tmp9, _tmp10)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.StorageReads = _tmp9
// BalanceChanges:
var _tmp11 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp12 encodingBalanceChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp13, err := dec.Uint16()
if err != nil {
return err
}
_tmp12.TxIdx = _tmp13
// Balance:
var _tmp14 uint256.Int
if err := dec.ReadUint256(&_tmp14); err != nil {
return err
}
_tmp12.Balance = &_tmp14
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp11 = append(_tmp11, _tmp12) _tmp11 = append(_tmp11, _tmp12)
} }
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp2.StorageReads = _tmp11 _tmp0.BalanceChanges = _tmp11
// BalanceChanges:
var _tmp13 []encodingBalanceChange
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp14 encodingBalanceChange
{
if _, err := dec.List(); err != nil {
return err
}
// TxIdx:
_tmp15, err := dec.Uint16()
if err != nil {
return err
}
_tmp14.TxIdx = _tmp15
// Balance:
var _tmp16 [16]byte
if err := dec.ReadBytes(_tmp16[:]); err != nil {
return err
}
_tmp14.Balance = _tmp16
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp13 = append(_tmp13, _tmp14)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.BalanceChanges = _tmp13
// NonceChanges: // NonceChanges:
var _tmp17 []encodingAccountNonce var _tmp15 []encodingAccountNonce
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp18 encodingAccountNonce var _tmp16 encodingAccountNonce
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
// TxIdx: // TxIdx:
_tmp19, err := dec.Uint16() _tmp17, err := dec.Uint16()
if err != nil { if err != nil {
return err return err
} }
_tmp18.TxIdx = _tmp19 _tmp16.TxIdx = _tmp17
// Nonce: // Nonce:
_tmp20, err := dec.Uint64() _tmp18, err := dec.Uint64()
if err != nil { if err != nil {
return err return err
} }
_tmp18.Nonce = _tmp20 _tmp16.Nonce = _tmp18
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
} }
_tmp17 = append(_tmp17, _tmp18) _tmp15 = append(_tmp15, _tmp16)
} }
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp2.NonceChanges = _tmp17 _tmp0.NonceChanges = _tmp15
// CodeChanges: // CodeChanges:
var _tmp21 []encodingCodeChange var _tmp19 []encodingCodeChange
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp22 encodingCodeChange var _tmp20 encodingCodeChange
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
// TxIndex: // TxIndex:
_tmp23, err := dec.Uint16() _tmp21, err := dec.Uint16()
if err != nil { if err != nil {
return err return err
} }
_tmp22.TxIndex = _tmp23 _tmp20.TxIndex = _tmp21
// Code: // Code:
_tmp24, err := dec.Bytes() _tmp22, err := dec.Bytes()
if err != nil { if err != nil {
return err return err
} }
_tmp22.Code = _tmp24 _tmp20.Code = _tmp22
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
} }
_tmp21 = append(_tmp21, _tmp22) _tmp19 = append(_tmp19, _tmp20)
} }
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp2.CodeChanges = _tmp21 _tmp0.CodeChanges = _tmp19
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp1 = append(_tmp1, _tmp2)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp0.Accesses = _tmp1
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }

View file

@ -36,9 +36,9 @@ func equalBALs(a *BlockAccessList, b *BlockAccessList) bool {
return true return true
} }
func makeTestConstructionBAL() *ConstructionBlockAccessList { func makeTestConstructionBAL() *AccessListBuilder {
return &ConstructionBlockAccessList{ return &AccessListBuilder{
map[common.Address]*ConstructionAccountAccess{ FinalizedAccesses: map[common.Address]*ConstructionAccountAccesses{
common.BytesToAddress([]byte{0xff, 0xff}): { common.BytesToAddress([]byte{0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{ StorageWrites: map[common.Hash]map[uint16]common.Hash{
common.BytesToHash([]byte{0x01}): { common.BytesToHash([]byte{0x01}): {
@ -60,9 +60,10 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
1: 2, 1: 2,
2: 6, 2: 6,
}, },
CodeChange: map[uint16][]byte{ CodeChanges: map[uint16]CodeChange{0: {
0: common.Hex2Bytes("deadbeef"), TxIdx: 0,
}, Code: common.Hex2Bytes("deadbeef"),
}},
}, },
common.BytesToAddress([]byte{0xff, 0xff, 0xff}): { common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
StorageWrites: map[common.Hash]map[uint16]common.Hash{ StorageWrites: map[common.Hash]map[uint16]common.Hash{
@ -84,9 +85,6 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
NonceChanges: map[uint16]uint64{ NonceChanges: map[uint16]uint64{
1: 2, 1: 2,
}, },
CodeChange: map[uint16][]byte{
0: common.Hex2Bytes("deadbeef"),
},
}, },
}, },
} }
@ -95,7 +93,8 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
// TestBALEncoding tests that a populated access list can be encoded/decoded correctly. // TestBALEncoding tests that a populated access list can be encoded/decoded correctly.
func TestBALEncoding(t *testing.T) { func TestBALEncoding(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
bal := makeTestConstructionBAL() balBuilder := makeTestConstructionBAL()
bal := balBuilder.FinalizedAccesses
err := bal.EncodeRLP(&buf) err := bal.EncodeRLP(&buf)
if err != nil { if err != nil {
t.Fatalf("encoding failed: %v\n", err) t.Fatalf("encoding failed: %v\n", err)
@ -104,10 +103,10 @@ func TestBALEncoding(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil { if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil {
t.Fatalf("decoding failed: %v\n", err) t.Fatalf("decoding failed: %v\n", err)
} }
if dec.Hash() != bal.toEncodingObj().Hash() { if dec.Hash() != bal.ToEncodingObj().Hash() {
t.Fatalf("encoded block hash doesn't match decoded") t.Fatalf("encoded block hash doesn't match decoded")
} }
if !equalBALs(bal.toEncodingObj(), &dec) { if !equalBALs(bal.ToEncodingObj(), &dec) {
t.Fatal("decoded BAL doesn't match") t.Fatal("decoded BAL doesn't match")
} }
} }
@ -115,18 +114,18 @@ func TestBALEncoding(t *testing.T) {
func makeTestAccountAccess(sort bool) AccountAccess { func makeTestAccountAccess(sort bool) AccountAccess {
var ( var (
storageWrites []encodingSlotWrites storageWrites []encodingSlotWrites
storageReads [][32]byte storageReads []common.Hash
balances []encodingBalanceChange balances []encodingBalanceChange
nonces []encodingAccountNonce nonces []encodingAccountNonce
) )
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
slot := encodingSlotWrites{ slot := encodingSlotWrites{
Slot: testrand.Hash(), Slot: newEncodedStorageFromHash(testrand.Hash()),
} }
for j := 0; j < 3; j++ { for j := 0; j < 3; j++ {
slot.Accesses = append(slot.Accesses, encodingStorageWrite{ slot.Accesses = append(slot.Accesses, encodingStorageWrite{
TxIdx: uint16(2 * j), TxIdx: uint16(2 * j),
ValueAfter: testrand.Hash(), ValueAfter: newEncodedStorageFromHash(testrand.Hash()),
}) })
} }
if sort { if sort {
@ -138,7 +137,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
} }
if sort { if sort {
slices.SortFunc(storageWrites, func(a, b encodingSlotWrites) int { slices.SortFunc(storageWrites, func(a, b encodingSlotWrites) int {
return bytes.Compare(a.Slot[:], b.Slot[:]) return bytes.Compare(a.Slot.inner.Bytes(), b.Slot.inner.Bytes())
}) })
} }
@ -146,7 +145,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
storageReads = append(storageReads, testrand.Hash()) storageReads = append(storageReads, testrand.Hash())
} }
if sort { if sort {
slices.SortFunc(storageReads, func(a, b [32]byte) int { slices.SortFunc(storageReads, func(a, b common.Hash) int {
return bytes.Compare(a[:], b[:]) return bytes.Compare(a[:], b[:])
}) })
} }
@ -154,7 +153,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
balances = append(balances, encodingBalanceChange{ balances = append(balances, encodingBalanceChange{
TxIdx: uint16(2 * i), TxIdx: uint16(2 * i),
Balance: [16]byte(testrand.Bytes(16)), Balance: new(uint256.Int).SetBytes(testrand.Bytes(32)),
}) })
} }
if sort { if sort {
@ -175,15 +174,19 @@ func makeTestAccountAccess(sort bool) AccountAccess {
}) })
} }
var encodedStorageReads []*EncodedStorage
for _, slot := range storageReads {
encodedStorageReads = append(encodedStorageReads, newEncodedStorageFromHash(slot))
}
return AccountAccess{ return AccountAccess{
Address: [20]byte(testrand.Bytes(20)), Address: [20]byte(testrand.Bytes(20)),
StorageWrites: storageWrites, StorageChanges: storageWrites,
StorageReads: storageReads, StorageReads: encodedStorageReads,
BalanceChanges: balances, BalanceChanges: balances,
NonceChanges: nonces, NonceChanges: nonces,
CodeChanges: []encodingCodeChange{ CodeChanges: []CodeChange{
{ {
TxIndex: 100, TxIdx: 100,
Code: testrand.Bytes(256), Code: testrand.Bytes(256),
}, },
}, },
@ -193,10 +196,10 @@ func makeTestAccountAccess(sort bool) AccountAccess {
func makeTestBAL(sort bool) BlockAccessList { func makeTestBAL(sort bool) BlockAccessList {
list := BlockAccessList{} list := BlockAccessList{}
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
list.Accesses = append(list.Accesses, makeTestAccountAccess(sort)) list = append(list, makeTestAccountAccess(sort))
} }
if sort { if sort {
slices.SortFunc(list.Accesses, func(a, b AccountAccess) int { slices.SortFunc(list, func(a, b AccountAccess) int {
return bytes.Compare(a.Address[:], b.Address[:]) return bytes.Compare(a.Address[:], b.Address[:])
}) })
} }
@ -216,9 +219,9 @@ func TestBlockAccessListCopy(t *testing.T) {
} }
// Make sure the mutations on copy won't affect the origin // Make sure the mutations on copy won't affect the origin
for _, aa := range cpyCpy.Accesses { for _, aa := range cpyCpy {
for i := 0; i < len(aa.StorageReads); i++ { for i := 0; i < len(aa.StorageReads); i++ {
aa.StorageReads[i] = [32]byte(testrand.Bytes(32)) aa.StorageReads[i] = &EncodedStorage{new(uint256.Int).SetBytes(testrand.Bytes(32))}
} }
} }
if !reflect.DeepEqual(list, cpy) { if !reflect.DeepEqual(list, cpy) {
@ -228,8 +231,9 @@ func TestBlockAccessListCopy(t *testing.T) {
func TestBlockAccessListValidation(t *testing.T) { func TestBlockAccessListValidation(t *testing.T) {
// Validate the block access list after RLP decoding // Validate the block access list after RLP decoding
testBALMaxIndex := 8
enc := makeTestBAL(true) enc := makeTestBAL(true)
if err := enc.Validate(); err != nil { if err := enc.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err) t.Fatalf("Unexpected validation error: %v", err)
} }
var buf bytes.Buffer var buf bytes.Buffer
@ -241,14 +245,17 @@ func TestBlockAccessListValidation(t *testing.T) {
if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)); err != nil { if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)); err != nil {
t.Fatalf("Unexpected RLP-decode error: %v", err) t.Fatalf("Unexpected RLP-decode error: %v", err)
} }
if err := dec.Validate(); err != nil { if err := dec.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err) t.Fatalf("Unexpected validation error: %v", err)
} }
// Validate the derived block access list // Validate the derived block access list
cBAL := makeTestConstructionBAL() cBAL := makeTestConstructionBAL().FinalizedAccesses
listB := cBAL.toEncodingObj() listB := cBAL.ToEncodingObj()
if err := listB.Validate(); err != nil { if err := listB.Validate(testBALMaxIndex); err != nil {
t.Fatalf("Unexpected validation error: %v", err) t.Fatalf("Unexpected validation error: %v", err)
} }
} }
// BALReader test ideas
// * BAL which doesn't have any pre-tx system contracts should return an empty state diff at idx 0

View file

@ -0,0 +1,32 @@
package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"io"
"os"
"testing"
)
func TestBALDecoding(t *testing.T) {
var (
err error
data []byte
)
data, err = os.ReadFile("blocks_bal_one.rlp")
if err != nil {
t.Fatalf("error opening file: %v", err)
}
reader := bytes.NewReader(data)
stream := rlp.NewStream(reader, 0)
var blocks Block
for i := 0; err == nil; i++ {
fmt.Printf("decode %d\n", i)
err = stream.Decode(&blocks)
if err != nil && err != io.EOF {
t.Fatalf("error decoding blocks: %v", err)
}
fmt.Printf("block number is %d\n", blocks.NumberU64())
}
}

View file

@ -28,6 +28,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -99,6 +101,9 @@ type Header struct {
// RequestsHash was added by EIP-7685 and is ignored in legacy headers. // RequestsHash was added by EIP-7685 and is ignored in legacy headers.
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
// BlockAccessListHash was added by EIP-7928 and is ignored in legacy headers.
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
// SlotNumber was added by EIP-7843 and is ignored in legacy headers. // SlotNumber was added by EIP-7843 and is ignored in legacy headers.
SlotNumber *uint64 `json:"slotNumber" rlp:"optional"` SlotNumber *uint64 `json:"slotNumber" rlp:"optional"`
} }
@ -163,10 +168,8 @@ func (h *Header) SanityCheck() error {
// EmptyBody returns true if there is no additional 'body' to complete the header // EmptyBody returns true if there is no additional 'body' to complete the header
// that is: no transactions, no uncles and no withdrawals. // that is: no transactions, no uncles and no withdrawals.
func (h *Header) EmptyBody() bool { func (h *Header) EmptyBody() bool {
var ( // quick hack to ensure that we download bodies for empty blocks so that we receive the BALs
emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash return false
)
return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals
} }
// EmptyReceipts returns true if there are no receipts for this header/block. // EmptyReceipts returns true if there are no receipts for this header/block.
@ -204,6 +207,7 @@ type Block struct {
uncles []*Header uncles []*Header
transactions Transactions transactions Transactions
withdrawals Withdrawals withdrawals Withdrawals
accessList *bal.BlockAccessList
// caches // caches
hash atomic.Pointer[common.Hash] hash atomic.Pointer[common.Hash]
@ -221,6 +225,7 @@ type extblock struct {
Txs []*Transaction Txs []*Transaction
Uncles []*Header Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"` Withdrawals []*Withdrawal `rlp:"optional"`
AccessList *bal.BlockAccessList `rlp:"optional"`
} }
// NewBlock creates a new block. The input data is copied, changes to header and to the // NewBlock creates a new block. The input data is copied, changes to header and to the
@ -284,6 +289,14 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher
return b return b
} }
func NewBlockWithAccessList(header *Header, body *Body, receipts []*Receipt, accessList *bal.BlockAccessList, hasher ListHasher) *Block {
block := NewBlock(header, body, receipts, hasher)
block.accessList = accessList
balHash := accessList.Hash()
block.header.BlockAccessListHash = &balHash
return block
}
// CopyHeader creates a deep copy of a block header. // CopyHeader creates a deep copy of a block header.
func CopyHeader(h *Header) *Header { func CopyHeader(h *Header) *Header {
cpy := *h cpy := *h
@ -329,12 +342,14 @@ func CopyHeader(h *Header) *Header {
// DecodeRLP decodes a block from RLP. // DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error { func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock var (
eb extblock
)
_, size, _ := s.Kind() _, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil { if err := s.Decode(&eb); err != nil {
return err return err
} }
b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals b.header, b.uncles, b.transactions, b.withdrawals, b.accessList = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.AccessList
b.size.Store(rlp.ListSize(size)) b.size.Store(rlp.ListSize(size))
return nil return nil
} }
@ -346,6 +361,7 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions, Txs: b.transactions,
Uncles: b.uncles, Uncles: b.uncles,
Withdrawals: b.withdrawals, Withdrawals: b.withdrawals,
AccessList: b.accessList,
}) })
} }
@ -361,6 +377,7 @@ func (b *Block) Body() *Body {
func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions } func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals } func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
func (b *Block) AccessList() *bal.BlockAccessList { return b.accessList }
func (b *Block) Transaction(hash common.Hash) *Transaction { func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions { for _, transaction := range b.transactions {
@ -513,6 +530,24 @@ func (b *Block) WithBody(body Body) *Block {
return block return block
} }
// WithAccessList returns a copy of the block with the access list embedded.
// It does not set the access list hash in the header of the returned block.
// TODO: ^ when support for --experimental.bal is removed, this function should set the access list hash in the header
func (b *Block) WithAccessList(accessList *bal.BlockAccessList) *Block {
alCopy := accessList.Copy()
block := &Block{
header: b.header,
transactions: slices.Clone(b.transactions),
uncles: make([]*Header, len(b.uncles)),
withdrawals: slices.Clone(b.withdrawals),
accessList: &alCopy,
}
for i := range b.uncles {
block.uncles[i] = CopyHeader(b.uncles[i])
}
return block
}
// Hash returns the keccak256 hash of b's header. // Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter. // The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash { func (b *Block) Hash() common.Hash {

View file

@ -37,6 +37,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"` SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
} }
@ -62,6 +63,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas) enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot enc.ParentBeaconRoot = h.ParentBeaconRoot
enc.RequestsHash = h.RequestsHash enc.RequestsHash = h.RequestsHash
enc.BlockAccessListHash = h.BlockAccessListHash
enc.SlotNumber = (*hexutil.Uint64)(h.SlotNumber) enc.SlotNumber = (*hexutil.Uint64)(h.SlotNumber)
enc.Hash = h.Hash() enc.Hash = h.Hash()
return json.Marshal(&enc) return json.Marshal(&enc)
@ -91,6 +93,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
BlockAccessListHash *common.Hash `json:"balHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"` SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
} }
var dec Header var dec Header
@ -172,6 +175,9 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.RequestsHash != nil { if dec.RequestsHash != nil {
h.RequestsHash = dec.RequestsHash h.RequestsHash = dec.RequestsHash
} }
if dec.BlockAccessListHash != nil {
h.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil { if dec.SlotNumber != nil {
h.SlotNumber = (*uint64)(dec.SlotNumber) h.SlotNumber = (*uint64)(dec.SlotNumber)
} }

View file

@ -43,8 +43,9 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp4 := obj.ExcessBlobGas != nil _tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil _tmp5 := obj.ParentBeaconRoot != nil
_tmp6 := obj.RequestsHash != nil _tmp6 := obj.RequestsHash != nil
_tmp7 := obj.SlotNumber != nil _tmp7 := obj.BlockAccessListHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { _tmp8 := obj.SlotNumber != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.BaseFee == nil { if obj.BaseFee == nil {
w.Write(rlp.EmptyString) w.Write(rlp.EmptyString)
} else { } else {
@ -54,42 +55,49 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee) w.WriteBigInt(obj.BaseFee)
} }
} }
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.WithdrawalsHash == nil { if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.WithdrawalsHash[:]) w.WriteBytes(obj.WithdrawalsHash[:])
} }
} }
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.BlobGasUsed == nil { if obj.BlobGasUsed == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteUint64((*obj.BlobGasUsed)) w.WriteUint64((*obj.BlobGasUsed))
} }
} }
if _tmp4 || _tmp5 || _tmp6 || _tmp7 { if _tmp4 || _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.ExcessBlobGas == nil { if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteUint64((*obj.ExcessBlobGas)) w.WriteUint64((*obj.ExcessBlobGas))
} }
} }
if _tmp5 || _tmp6 || _tmp7 { if _tmp5 || _tmp6 || _tmp7 || _tmp8 {
if obj.ParentBeaconRoot == nil { if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.ParentBeaconRoot[:]) w.WriteBytes(obj.ParentBeaconRoot[:])
} }
} }
if _tmp6 || _tmp7 { if _tmp6 || _tmp7 || _tmp8 {
if obj.RequestsHash == nil { if obj.RequestsHash == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.RequestsHash[:]) w.WriteBytes(obj.RequestsHash[:])
} }
} }
if _tmp7 { if _tmp7 || _tmp8 {
if obj.BlockAccessListHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.BlockAccessListHash[:])
}
}
if _tmp8 {
if obj.SlotNumber == nil { if obj.SlotNumber == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {

View file

@ -479,29 +479,32 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
} }
// create creates a new contract using code as deployment code. // create creates a new contract using code as deployment code.
func (evm *EVM) create(caller common.Address, code []byte, gas GasCosts, value *uint256.Int, address common.Address, typ OpCode) (ret []byte, createAddress common.Address, leftOverGas GasCosts, gasUsed GasUsed, err error) { func (evm *EVM) create(caller common.Address, code []byte, gas GasCosts, value *uint256.Int, address common.Address, typ OpCode) (ret []byte, createAddress common.Address, leftOverGas GasCosts, used GasUsed, err error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
var nonce uint64
if evm.depth > int(params.CallCreateDepth) {
err = ErrDepth
} else if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
err = ErrInsufficientBalance
} else {
nonce = evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
err = ErrNonceUintOverflow
}
}
if err == nil {
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
}
if evm.Config.Tracer != nil { if evm.Config.Tracer != nil {
evm.captureBegin(evm.depth, typ, caller, address, code, gas, value.ToBig()) evm.captureBegin(evm.depth, typ, caller, address, code, gas, value.ToBig())
defer func(startGas GasCosts) { defer func(startGas GasCosts) {
evm.captureEnd(evm.depth, startGas, leftOverGas, ret, err) evm.captureEnd(evm.depth, startGas, leftOverGas, ret, err)
}(gas) }(gas)
} }
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
err = ErrDepth
} else if !evm.Context.CanTransfer(evm.StateDB, caller, value) {
err = ErrInsufficientBalance
} else {
nonce := evm.StateDB.GetNonce(caller)
if nonce+1 < nonce {
err = ErrNonceUintOverflow
} else {
evm.StateDB.SetNonce(caller, nonce+1, tracing.NonceChangeContractCreator)
}
}
if err != nil { if err != nil {
return nil, common.Address{}, gas, GasUsed{}, err return nil, common.Address{}, GasCosts{}, GasUsed{}, err
} }
// Charge the contract creation init gas in verkle mode // Charge the contract creation init gas in verkle mode
@ -528,6 +531,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas GasCosts, value *
// - the storage is non-empty // - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address) contractHash := evm.StateDB.GetCodeHash(address)
storageRoot := evm.StateDB.GetStorageRoot(address) storageRoot := evm.StateDB.GetStorageRoot(address)
if evm.StateDB.GetNonce(address) != 0 || if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage (storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage

View file

@ -479,10 +479,13 @@ func gasCallCodeIntrinsic(evm *EVM, contract *Contract, stack *Stack, mem *Memor
var ( var (
gas uint64 gas uint64
overflow bool overflow bool
transfersValue = !stack.Back(2).IsZero()
) )
if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 { if transfersValue {
if !evm.chainRules.IsEIP4762 {
gas += params.CallValueTransferGas gas += params.CallValueTransferGas
} }
}
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
return 0, ErrGasUintOverflow return 0, ErrGasUintOverflow
} }
@ -502,11 +505,16 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
return GasCosts{}, ErrWriteProtection return GasCosts{}, ErrWriteProtection
} }
var gas uint64 var gas uint64
// EIP150 homestead gas reprice fork: // EIP150 homestead gas reprice fork:
if evm.chainRules.IsEIP150 { if evm.chainRules.IsEIP150 {
gas = params.SelfdestructGasEIP150 gas = params.SelfdestructGasEIP150
var address = common.Address(stack.Back(0).Bytes20()) var address = common.Address(stack.Back(0).Bytes20())
if gas > contract.Gas {
return gas, nil
}
if evm.chainRules.IsEIP158 { if evm.chainRules.IsEIP158 {
// if empty and transfers value // if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {

View file

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -95,5 +96,5 @@ type StateDB interface {
AccessEvents() *state.AccessEvents AccessEvents() *state.AccessEvents
// Finalise must be invoked at the end of a transaction // Finalise must be invoked at the end of a transaction
Finalise(bool) Finalise(bool) bal.StateMutations
} }

View file

@ -195,10 +195,11 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc, addressPosition int) g
} }
var ( var (
gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall, 1) // TODO: we can use the same functions already defined above for the 7702 gas handlers
gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall, 1) gasCallEIP2929 = makeCallVariantGasCall(gasCallStateless, gasCallStateful)
gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall, 1) gasDelegateCallEIP2929 = makeCallVariantGasCall(gasDelegateCallStateless, gasDelegateCallStateful)
gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode, 1) gasStaticCallEIP2929 = makeCallVariantGasCall(gasStaticCallStateless, gasStaticCallStateful)
gasCallCodeEIP2929 = makeCallVariantGasCall(gasCallCodeStateless, gasCallCodeStateful)
gasSelfdestructEIP2929 = makeSelfdestructGasFn(true) gasSelfdestructEIP2929 = makeSelfdestructGasFn(true)
// gasSelfdestructEIP3529 implements the changes in EIP-3529 (no refunds) // gasSelfdestructEIP3529 implements the changes in EIP-3529 (no refunds)
gasSelfdestructEIP3529 = makeSelfdestructGasFn(false) gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
@ -243,6 +244,10 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
return GasCosts{}, ErrOutOfGas return GasCosts{}, ErrOutOfGas
} }
} }
if contract.Gas < gas {
return gas, nil
}
// if empty and transfers value // if empty and transfers value
if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
gas += params.CreateBySelfdestructGas gas += params.CreateBySelfdestructGas

View file

@ -19,6 +19,7 @@ package eth
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math/big" "math/big"
"time" "time"
@ -499,3 +500,22 @@ func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration {
func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration { func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration {
return b.eth.config.TxSyncMaxTimeout return b.eth.config.TxSyncMaxTimeout
} }
// GetBlockAccessList returns a block access list for the given number/hash
// or nil if one does not exist.
func (b *EthAPIBackend) BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error) {
var block *types.Block
if num := number.BlockNumber; num != nil {
block = b.eth.blockchain.GetBlockByNumber(uint64(num.Int64()))
} else if hash := number.BlockHash; hash != nil {
block = b.eth.blockchain.GetBlockByHash(*hash)
}
if block == nil {
return nil, fmt.Errorf("block not found")
}
if block.AccessList() == nil {
return nil, nil
}
return block.AccessList().StringableRepresentation(), nil
}

View file

@ -281,6 +281,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
overrides.OverrideVerkle = config.OverrideVerkle overrides.OverrideVerkle = config.OverrideVerkle
} }
options.Overrides = &overrides options.Overrides = &overrides
options.BALExecutionMode = config.BALExecutionMode
eth.blockchain, err = core.NewBlockChain(chainDb, config.Genesis, eth.engine, options) eth.blockchain, err = core.NewBlockChain(chainDb, config.Genesis, eth.engine, options)
if err != nil { if err != nil {

View file

@ -201,9 +201,13 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(ctx context.Context, update engine.
return engine.STATUS_INVALID, attributesErr("missing withdrawals") return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil: case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root") return engine.STATUS_INVALID, attributesErr("missing beacon root")
case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5, forks.Amsterdam):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads") return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads")
} }
if api.checkFork(params.Timestamp, forks.Amsterdam) {
return api.forkchoiceUpdated(update, params, engine.PayloadV4, false)
}
} }
// TODO(matt): the spec requires that fcu is applied when called on a valid // TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up // hash, even if params are wrong. To do this we need to split up
@ -499,6 +503,7 @@ func (api *ConsensusAPI) GetPayloadV6(payloadID engine.PayloadID) (*engine.Execu
// //
// Note passing nil `forks`, `versions` disables the respective check. // Note passing nil `forks`, `versions` disables the respective check.
func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) { func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID) log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
if versions != nil && !payloadID.Is(versions...) { if versions != nil && !payloadID.Is(versions...) {
return nil, engine.UnsupportedFork return nil, engine.UnsupportedFork
@ -751,6 +756,8 @@ func (api *ConsensusAPI) NewPayloadV5(ctx context.Context, params engine.Executa
return invalidStatus, paramsErr("nil beaconRoot post-cancun") return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil: case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague") return invalidStatus, paramsErr("nil executionRequests post-prague")
case params.BlockAccessList == nil:
return invalidStatus, paramsErr("nil block access list post-amsterdam")
case params.SlotNumber == nil: case params.SlotNumber == nil:
return invalidStatus, paramsErr("nil slotnumber post-amsterdam") return invalidStatus, paramsErr("nil slotnumber post-amsterdam")
case !api.checkFork(params.Timestamp, forks.Amsterdam): case !api.checkFork(params.Timestamp, forks.Amsterdam):
@ -760,7 +767,7 @@ func (api *ConsensusAPI) NewPayloadV5(ctx context.Context, params engine.Executa
if err := validateRequests(requests); err != nil { if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err) return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
} }
return api.newPayload(ctx, params, versionedHashes, beaconRoot, requests, false) return api.newPayload(context.Background(), params, versionedHashes, beaconRoot, requests, false)
} }
func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (result engine.PayloadStatusV1, err error) { func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (result engine.PayloadStatusV1, err error) {
@ -1181,6 +1188,10 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBody {
result.Withdrawals = []*types.Withdrawal{} result.Withdrawals = []*types.Withdrawal{}
} }
if block.AccessList() != nil {
result.AccessList = block.AccessList()
}
return &result return &result
} }

View file

@ -209,6 +209,8 @@ type Config struct {
// RangeLimit restricts the maximum range (end - start) for range queries. // RangeLimit restricts the maximum range (end - start) for range queries.
RangeLimit uint64 `toml:",omitempty"` RangeLimit uint64 `toml:",omitempty"`
BALExecutionMode int
} }
// CreateConsensusEngine creates a consensus engine for the given chain config. // CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -68,6 +68,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
TxSyncMaxTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"`
RangeLimit uint64 `toml:",omitempty"` RangeLimit uint64 `toml:",omitempty"`
BALExecutionMode int
} }
var enc Config var enc Config
enc.Genesis = c.Genesis enc.Genesis = c.Genesis
@ -121,6 +122,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout
enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout
enc.RangeLimit = c.RangeLimit enc.RangeLimit = c.RangeLimit
enc.BALExecutionMode = c.BALExecutionMode
return &enc, nil return &enc, nil
} }
@ -178,6 +180,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TxSyncDefaultTimeout *time.Duration `toml:",omitempty"` TxSyncDefaultTimeout *time.Duration `toml:",omitempty"`
TxSyncMaxTimeout *time.Duration `toml:",omitempty"` TxSyncMaxTimeout *time.Duration `toml:",omitempty"`
RangeLimit *uint64 `toml:",omitempty"` RangeLimit *uint64 `toml:",omitempty"`
BALExecutionMode *int
} }
var dec Config var dec Config
if err := unmarshal(&dec); err != nil { if err := unmarshal(&dec); err != nil {
@ -336,5 +339,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.RangeLimit != nil { if dec.RangeLimit != nil {
c.RangeLimit = *dec.RangeLimit c.RangeLimit = *dec.RangeLimit
} }
if dec.BALExecutionMode != nil {
c.BALExecutionMode = *dec.BALExecutionMode
}
return nil return nil
} }

View file

@ -1055,7 +1055,7 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
// Call Prepare to clear out the statedb access list // Call Prepare to clear out the statedb access list
statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) statedb.SetTxContext(txctx.TxHash, txctx.TxIndex)
_, err = core.ApplyTransactionWithEVM(message, core.NewGasPool(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, vmctx.Time, tx, evm) _, _, err = core.ApplyTransactionWithEVM(message, core.NewGasPool(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, vmctx.Time, tx, evm)
if err != nil { if err != nil {
return nil, fmt.Errorf("tracing failed: %w", err) return nil, fmt.Errorf("tracing failed: %w", err)
} }

View file

@ -1003,6 +1003,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil { if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals() fields["withdrawals"] = block.Withdrawals()
} }
if block.AccessList() != nil {
fields["accessList"] = block.AccessList()
}
return fields return fields
} }
@ -1375,6 +1378,18 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
} }
} }
// GetBlockAccessListByBlockNumber returns a block access list for the given block number
// or nil if one does not exist.
func (api *BlockChainAPI) GetBlockAccessListByBlockNumber(number rpc.BlockNumber) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockNumber: &number})
}
// GetBlockAccessListByBlockHash returns a block access list for the given block hash
// or nil if one does not exist.
func (api *BlockChainAPI) GetBlockAccessListByBlockHash(hash common.Hash) (interface{}, error) {
return api.b.BlockAccessListByNumberOrHash(rpc.BlockNumberOrHash{BlockHash: &hash})
}
// TransactionAPI exposes methods for reading and creating transaction data. // TransactionAPI exposes methods for reading and creating transaction data.
type TransactionAPI struct { type TransactionAPI struct {
b Backend b Backend

View file

@ -74,6 +74,7 @@ type Backend interface {
GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
BlockAccessListByNumberOrHash(number rpc.BlockNumberOrHash) (interface{}, error)
// Transaction pool API // Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error SendTx(ctx context.Context, signedTx *types.Transaction) error

View file

@ -393,11 +393,11 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
return nil, nil, nil, err return nil, nil, nil, err
} }
// EIP-7002 // EIP-7002
if err := core.ProcessWithdrawalQueue(&requests, evm); err != nil { if _, err := core.ProcessWithdrawalQueue(&requests, evm); err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
// EIP-7251 // EIP-7251
if err := core.ProcessConsolidationQueue(&requests, evm); err != nil { if _, err := core.ProcessConsolidationQueue(&requests, evm); err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
} }
@ -411,7 +411,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
Withdrawals: *block.BlockOverrides.Withdrawals, Withdrawals: *block.BlockOverrides.Withdrawals,
} }
chainHeadReader := &simChainHeadReader{ctx, sim.b} chainHeadReader := &simChainHeadReader{ctx, sim.b}
b, err := sim.b.Engine().FinalizeAndAssemble(ctx, chainHeadReader, header, sim.state, blockBody, receipts) b, err := sim.b.Engine().FinalizeAndAssemble(ctx, chainHeadReader, header, sim.state, blockBody, receipts, nil)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }

View file

@ -612,7 +612,17 @@ web3._extend({
name: 'config', name: 'config',
call: 'eth_config', call: 'eth_config',
params: 0, params: 0,
}) }),
new web3._extend.Method({
name: 'getBlockAccessListByBlockNumber',
call: 'eth_getBlockAccessListByBlockNumber',
params: 1,
}),
new web3._extend.Method({
name: 'getBlockAccessListByBlockHash',
call: 'eth_getBlockAccessListByBlockHash',
params: 1,
}),
], ],
properties: [ properties: [
new web3._extend.Property({ new web3._extend.Property({

View file

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/internal/telemetry" "github.com/ethereum/go-ethereum/internal/telemetry"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -73,6 +74,7 @@ type environment struct {
blobs int blobs int
witness *stateless.Witness witness *stateless.Witness
accessList bal.ConstructionBlockAccessList
} }
// txFits reports whether the transaction fits into the block size limit. // txFits reports whether the transaction fits into the block size limit.
@ -191,7 +193,10 @@ func (miner *Miner) generateWork(ctx context.Context, genParam *generateParams,
} }
// Collect consensus-layer requests if Prague is enabled. // Collect consensus-layer requests if Prague is enabled.
var requests [][]byte var (
requests [][]byte
postMut = make(bal.StateMutations)
)
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) { if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
requests = [][]byte{} requests = [][]byte{}
// EIP-6110 deposits // EIP-6110 deposits
@ -199,23 +204,47 @@ func (miner *Miner) generateWork(ctx context.Context, genParam *generateParams,
return &newPayloadResult{err: err} return &newPayloadResult{err: err}
} }
// EIP-7002 // EIP-7002
if err := core.ProcessWithdrawalQueue(&requests, work.evm); err != nil { mut, err := core.ProcessWithdrawalQueue(&requests, work.evm)
if err != nil {
return &newPayloadResult{err: err} return &newPayloadResult{err: err}
} }
postMut.Merge(mut)
// EIP-7251 consolidations // EIP-7251 consolidations
if err := core.ProcessConsolidationQueue(&requests, work.evm); err != nil { mut, err = core.ProcessConsolidationQueue(&requests, work.evm)
if err != nil {
return &newPayloadResult{err: err} return &newPayloadResult{err: err}
} }
postMut.Merge(mut)
work.accessList.AccumulateMutations(postMut, uint16(work.tcount)+1)
work.accessList.AccumulateReads(work.state.Reader().(state.StateReaderTracker).GetStateAccessList())
} }
if requests != nil { if requests != nil {
reqHash := types.CalcRequestsHash(requests) reqHash := types.CalcRequestsHash(requests)
work.header.RequestsHash = &reqHash work.header.RequestsHash = &reqHash
} }
block, err := miner.engine.FinalizeAndAssemble(ctx, miner.chain, work.header, work.state, &body, work.receipts) // set the block access list on the body after the block has finished executing
// but before the header hash is computed (in FinalizeAndAssemble).
//
// I considered trying to instantiate the beacon consensus engine with a tracer.
// however, the BAL tracer instance is used once per block, while the engine object
// lives for the entire time the client is running.
var onBlockFinalization func(mutations bal.StateMutations) *bal.BlockAccessList
if miner.chainConfig.IsAmsterdam(work.header.Number, work.header.Time) {
onBlockFinalization = func(withdrawalMut bal.StateMutations) *bal.BlockAccessList {
work.accessList.AccumulateMutations(withdrawalMut, uint16(work.tcount)+1)
work.accessList.AccumulateReads(work.state.Reader().(state.StateReaderTracker).GetStateAccessList())
return work.accessList.ToEncodingObj()
}
}
block, err := miner.engine.FinalizeAndAssemble(ctx, miner.chain, work.header, work.state, &body, work.receipts, onBlockFinalization)
if err != nil { if err != nil {
return &newPayloadResult{err: err} return &newPayloadResult{err: err}
} }
return &newPayloadResult{ return &newPayloadResult{
block: block, block: block,
fees: totalFees(block, work.receipts), fees: totalFees(block, work.receipts),
@ -312,19 +341,21 @@ func (miner *Miner) prepareWork(ctx context.Context, genParams *generateParams,
log.Error("Failed to create sealing context", "err", err) log.Error("Failed to create sealing context", "err", err)
return nil, err return nil, err
} }
mut := make(bal.StateMutations)
if header.ParentBeaconRoot != nil { if header.ParentBeaconRoot != nil {
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm) mut.Merge(core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm))
} }
if miner.chainConfig.IsPrague(header.Number, header.Time) { if miner.chainConfig.IsPrague(header.Number, header.Time) {
core.ProcessParentBlockHash(header.ParentHash, env.evm) mut.Merge(core.ProcessParentBlockHash(header.ParentHash, env.evm))
} }
env.accessList.AccumulateMutations(mut, 0)
return env, nil return env, nil
} }
// makeEnv creates a new environment for the sealing block. // makeEnv creates a new environment for the sealing block.
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) { func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top. // Retrieve the parent state to execute on top.
state, err := miner.chain.StateAt(parent.Root) sdb, err := miner.chain.StateAt(parent.Root)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -335,30 +366,45 @@ func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase
return nil, err return nil, err
} }
} }
state.StartPrefetcher("miner", bundle, nil) sdb.StartPrefetcher("miner", bundle, nil)
var accessListBuilder bal.ConstructionBlockAccessList
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
accessListBuilder = make(bal.ConstructionBlockAccessList)
sdb = sdb.WithReader(state.NewReaderWithTracker(sdb.Reader()))
}
// Note the passed coinbase may be different with header.Coinbase. // Note the passed coinbase may be different with header.Coinbase.
return &environment{ return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time), signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: state, state: sdb,
size: uint64(header.Size()), size: uint64(header.Size()),
coinbase: coinbase, coinbase: coinbase,
gasPool: core.NewGasPool(header.GasLimit), gasPool: core.NewGasPool(header.GasLimit),
header: header, header: header,
witness: state.Witness(), witness: sdb.Witness(),
evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), state, miner.chainConfig, vm.Config{}), evm: vm.NewEVM(core.NewEVMBlockContext(header, miner.chain, &coinbase), sdb, miner.chainConfig, vm.Config{}),
accessList: accessListBuilder,
}, nil }, nil
} }
var (
errAccessListOversized = errors.New("access list oversized")
)
func (miner *Miner) commitTransaction(ctx context.Context, env *environment, tx *types.Transaction) (err error) { func (miner *Miner) commitTransaction(ctx context.Context, env *environment, tx *types.Transaction) (err error) {
_, _, spanEnd := telemetry.StartSpan(ctx, "miner.commitTransaction") _, _, spanEnd := telemetry.StartSpan(ctx, "miner.commitTransaction")
defer spanEnd(&err) defer spanEnd(&err)
if tx.Type() == types.BlobTxType { if tx.Type() == types.BlobTxType {
return miner.commitBlobTransaction(env, tx) return miner.commitBlobTransaction(env, tx)
} }
receipt, err := miner.applyTransaction(env, tx) receipt, err := miner.applyTransaction(env, tx)
if err != nil { if err != nil {
return err return err
} }
env.txs = append(env.txs, tx) env.txs = append(env.txs, tx)
env.receipts = append(env.receipts, receipt) env.receipts = append(env.receipts, receipt)
env.size += tx.Size() env.size += tx.Size()
@ -366,7 +412,7 @@ func (miner *Miner) commitTransaction(ctx context.Context, env *environment, tx
return nil return nil
} }
func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) error { func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transaction) (err error) {
sc := tx.BlobTxSidecar() sc := tx.BlobTxSidecar()
if sc == nil { if sc == nil {
panic("blob transaction without blobs in miner") panic("blob transaction without blobs in miner")
@ -400,20 +446,56 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
snap = env.state.Snapshot() snap = env.state.Snapshot()
gp = env.gasPool.Snapshot() gp = env.gasPool.Snapshot()
) )
receipt, err := core.ApplyTransaction(env.evm, env.gasPool, env.state, env.header, tx) var stateCopy *state.StateDB
if err != nil { var prevReader state.Reader
env.state.RevertToSnapshot(snap) if env.accessList != nil {
env.gasPool.Set(gp) prevReader = env.state.Reader()
return nil, err stateCopy = env.state.WithReader(state.NewReaderWithTracker(env.state.Reader()))
env.evm.StateDB = stateCopy
} }
env.header.GasUsed = env.gasPool.Used()
return receipt, nil mutations, receipt, err := core.ApplyTransaction(env.evm, env.gasPool, stateCopy, env.header, tx)
if err != nil {
if env.accessList != nil {
// transaction couldn't be applied. reset env state to what it was before
env.state = env.state.WithReader(prevReader)
env.evm.StateDB = env.state
} else {
env.state.RevertToSnapshot(snap)
}
env.gasPool.Set(gp)
}
if env.accessList != nil {
al := env.accessList.Copy()
al.AccumulateMutations(mutations, uint16(env.tcount)+1)
al.AccumulateReads(stateCopy.Reader().(state.StateReaderTracker).GetStateAccessList())
if env.size+tx.Size()+uint64(al.ToEncodingObj().EncodedSize()) >= params.MaxBlockSize-maxBlockSizeBufferZone {
env.gasPool.Set(gp)
// transaction couldn't be applied. reset env state to what it was before
env.state = env.state.WithReader(prevReader)
env.evm.StateDB = env.state
return nil, errAccessListOversized
}
env.state = stateCopy.WithReader(prevReader)
env.evm.StateDB = env.state
env.accessList = al
}
return receipt, err
} }
func (miner *Miner) commitTransactions(ctx context.Context, env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error { func (miner *Miner) commitTransactions(ctx context.Context, env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error {
ctx, _, spanEnd := telemetry.StartSpan(ctx, "miner.commitTransactions") ctx, _, spanEnd := telemetry.StartSpan(ctx, "miner.commitTransactions")
defer spanEnd(nil) defer spanEnd(nil)
isCancun := miner.chainConfig.IsCancun(env.header.Number, env.header.Time) var (
isCancun = miner.chainConfig.IsCancun(env.header.Number, env.header.Time)
gasLimit = env.header.GasLimit
)
if env.gasPool == nil {
env.gasPool = core.NewGasPool(gasLimit)
}
loop:
for { for {
// Check interruption signal and abort building if it's fired. // Check interruption signal and abort building if it's fired.
if interrupt != nil { if interrupt != nil {
@ -512,7 +594,12 @@ func (miner *Miner) commitTransactions(ctx context.Context, env *environment, pl
case errors.Is(err, nil): case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account // Everything ok, collect the logs and shift in the next transaction from the same account
txs.Shift() txs.Shift()
case errors.Is(err, errAccessListOversized):
// Transaction can't be applied because it would cause the block to be oversized due to the
// contribution of the state accesses/modifications it makes.
// terminate the payload construction as it's not guaranteed we will be able to find a transaction
// that can fit in a short amount of time.
break loop
default: default:
// Transaction is regarded as invalid, drop all consecutive transactions from // Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause. // the same sender because of `nonce-too-high` clause.

View file

@ -236,6 +236,8 @@ var (
Cancun: DefaultCancunBlobConfig, Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig, Prague: DefaultPragueBlobConfig,
Osaka: DefaultOsakaBlobConfig, Osaka: DefaultOsakaBlobConfig,
BPO1: DefaultBPO1BlobConfig,
BPO2: DefaultBPO2BlobConfig,
}, },
} }
@ -1015,9 +1017,11 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
} }
if cur.timestamp != nil { if cur.timestamp != nil {
// If the fork is configured, a blob schedule must be defined for it. // If the fork is configured, a blob schedule must be defined for it.
/*
if cur.config == nil { if cur.config == nil {
return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name) return fmt.Errorf("invalid chain configuration: missing entry for fork %q in blobSchedule", cur.name)
} }
*/
} }
} }
return nil return nil
@ -1172,6 +1176,9 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
// BlobConfig returns the blob config associated with the provided fork. // BlobConfig returns the blob config associated with the provided fork.
func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig { func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig {
switch fork { switch fork {
case forks.Amsterdam:
// TODO: (????)
return c.BlobScheduleConfig.BPO2
case forks.BPO5: case forks.BPO5:
return c.BlobScheduleConfig.BPO5 return c.BlobScheduleConfig.BPO5
case forks.BPO4: case forks.BPO4:
@ -1217,6 +1224,8 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre
// the fork isn't defined or isn't a time-based fork. // the fork isn't defined or isn't a time-based fork.
func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 { func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 {
switch { switch {
case fork == forks.Amsterdam:
return c.AmsterdamTime
case fork == forks.BPO5: case fork == forks.BPO5:
return c.BPO5Time return c.BPO5Time
case fork == forks.BPO4: case fork == forks.BPO4:

View file

@ -25,6 +25,7 @@ import (
"io" "io"
"math/big" "math/big"
"reflect" "reflect"
"runtime/debug"
"strings" "strings"
"sync" "sync"
@ -672,6 +673,7 @@ func (s *Stream) ReadBytes(b []byte) error {
return nil return nil
case String: case String:
if uint64(len(b)) != size { if uint64(len(b)) != size {
debug.PrintStack()
return fmt.Errorf("input value has wrong size %d, want %d", size, len(b)) return fmt.Errorf("input value has wrong size %d, want %d", size, len(b))
} }
if err = s.readFull(b); err != nil { if err = s.readFull(b); err != nil {

View file

@ -24,6 +24,65 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
) )
func TestBlockchainBAL(t *testing.T) {
bt := new(testMatcher)
// We are running most of GeneralStatetests to tests witness support, even
// though they are ran as state tests too. Still, the performance tests are
// less about state andmore about EVM number crunching, so skip those.
bt.skipLoad(`^GeneralStateTests/VMTests/vmPerformance`)
// Skip random failures due to selfish mining test
bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`)
// Slow tests
bt.slow(`.*bcExploitTest/DelegateCallSpam.json`)
bt.slow(`.*bcExploitTest/ShanghaiLove.json`)
bt.slow(`.*bcExploitTest/SuicideIssue.json`)
bt.slow(`.*/bcForkStressTest/`)
bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`)
bt.slow(`.*/bcWalletTest/`)
// Very slow test
bt.skipLoad(`.*/stTimeConsuming/.*`)
// test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range,
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
// After the merge we would accept side chains as canonical even if they have lower td
bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
// With chain history removal, TDs become unavailable, this transition tests based on TTD are unrunnable
bt.skipLoad(`.*bcArrowGlacierToParis/powToPosBlockRejection.json`)
// This directory contains no test.
bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
config, ok := Forks[test.json.Network]
if !ok {
t.Fatalf("unsupported fork: %s\n", test.json.Network)
}
gspec := test.genesis(config)
// skip any tests which are not past the cancun fork (selfdestruct removal)
if gspec.Config.CancunTime == nil || *gspec.Config.CancunTime != 0 {
return
}
execBlockTest(t, bt, test, true)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
func TestBlockchain(t *testing.T) { func TestBlockchain(t *testing.T) {
bt := new(testMatcher) bt := new(testMatcher)
@ -67,17 +126,16 @@ func TestBlockchain(t *testing.T) {
bt.skipLoad(`.*\.meta/.*`) bt.skipLoad(`.*\.meta/.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test) execBlockTest(t, bt, test, false)
}) })
// There is also a LegacyTests folder, containing blockchain tests generated // There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests, // prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here. // which run natively, so there's no reason to run them here.
} }
// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests. func testExecutionSpecBlocktests(t *testing.T, testDir string) {
func TestExecutionSpecBlocktests(t *testing.T) { if !common.FileExist(testDir) {
if !common.FileExist(executionSpecBlockchainTestDir) { t.Skipf("directory %s does not exist", testDir)
t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir)
} }
bt := new(testMatcher) bt := new(testMatcher)
@ -85,12 +143,24 @@ func TestExecutionSpecBlocktests(t *testing.T) {
bt.skipLoad(".*prague/eip7251_consolidations/test_system_contract_deployment.json") bt.skipLoad(".*prague/eip7251_consolidations/test_system_contract_deployment.json")
bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json") bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json")
bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) { bt.walk(t, testDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test) execBlockTest(t, bt, test, true)
}) })
} }
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) { // TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktests(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBlockchainTestDir)
}
// TestExecutionSpecBlocktestsBAL runs the BAL release test fixtures from execution-spec-tests.
func TestExecutionSpecBlocktestsBAL(t *testing.T) {
testExecutionSpecBlocktests(t, executionSpecBALBlockchainTestDir)
}
var failures = 0
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest, buildAndVerifyBAL bool) {
// Define all the different flag combinations we should run the tests with, // Define all the different flag combinations we should run the tests with,
// picking only one for short tests. // picking only one for short tests.
// //
@ -106,7 +176,13 @@ func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
} }
for _, snapshot := range snapshotConf { for _, snapshot := range snapshotConf {
for _, dbscheme := range dbschemeConf { for _, dbscheme := range dbschemeConf {
if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, nil, nil)); err != nil { if err := bt.checkFailure(t, test.Run(snapshot, dbscheme, true, buildAndVerifyBAL, nil, nil)); err != nil {
failures++
/*
if failures > 10 {
panic("adsf")
}
*/
t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err) t.Errorf("test with config {snapshotter:%v, scheme:%v} failed: %v", snapshot, dbscheme, err)
return return
} }

View file

@ -22,11 +22,6 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
stdmath "math"
"math/big"
"os"
"reflect"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
@ -37,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -44,6 +40,11 @@ import (
"github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb" "github.com/ethereum/go-ethereum/triedb/pathdb"
stdmath "math"
"math/big"
"os"
"reflect"
"strings"
) )
// A BlockTest checks handling of entire blocks. // A BlockTest checks handling of entire blocks.
@ -71,6 +72,7 @@ type btBlock struct {
ExpectException string ExpectException string
Rlp string Rlp string
UncleHeaders []*btHeader UncleHeaders []*btHeader
AccessList *bal.BlockAccessList `json:"blockAccessList,omitempty"`
} }
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go //go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
@ -97,6 +99,7 @@ type btHeader struct {
BlobGasUsed *uint64 BlobGasUsed *uint64
ExcessBlobGas *uint64 ExcessBlobGas *uint64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *uint64 SlotNumber *uint64
} }
@ -113,27 +116,20 @@ type btHeaderMarshaling struct {
SlotNumber *math.HexOrDecimal64 SlotNumber *math.HexOrDecimal64
} }
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) { func (t *BlockTest) createTestBlockChain(config *params.ChainConfig, snapshotter bool, scheme string, witness, createAndVerifyBAL bool, tracer *tracing.Hooks) (*core.BlockChain, error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root // import pre accounts & construct test genesis block & state root
// Commit genesis state
var ( var (
gspec = t.genesis(config)
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
tconf = &triedb.Config{ tconf = &triedb.Config{
Preimages: true, Preimages: true,
IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp,
} }
) )
if scheme == rawdb.PathScheme || tconf.IsVerkle { if scheme == rawdb.PathScheme {
tconf.PathDB = pathdb.Defaults tconf.PathDB = pathdb.Defaults
} else { } else {
tconf.HashDB = hashdb.Defaults tconf.HashDB = hashdb.Defaults
} }
gspec := t.genesis(config)
// if ttd is not specified, set an arbitrary huge value // if ttd is not specified, set an arbitrary huge value
if gspec.Config.TerminalTotalDifficulty == nil { if gspec.Config.TerminalTotalDifficulty == nil {
@ -142,15 +138,15 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
triedb := triedb.NewDatabase(db, tconf) triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb, nil) gblock, err := gspec.Commit(db, triedb, nil)
if err != nil { if err != nil {
return err return nil, err
} }
triedb.Close() // close the db to prevent memory leak triedb.Close() // close the db to prevent memory leak
if gblock.Hash() != t.json.Genesis.Hash { if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6]) return nil, fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
} }
if gblock.Root() != t.json.Genesis.StateRoot { if gblock.Root() != t.json.Genesis.StateRoot {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6]) return nil, fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
} }
// Wrap the original engine within the beacon-engine // Wrap the original engine within the beacon-engine
engine := beacon.New(ethash.NewFaker()) engine := beacon.New(ethash.NewFaker())
@ -164,12 +160,27 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
Tracer: tracer, Tracer: tracer,
}, },
StatelessSelfValidation: witness, StatelessSelfValidation: witness,
NoPrefetch: true,
} }
if snapshotter { if snapshotter {
options.SnapshotLimit = 1 options.SnapshotLimit = 1
options.SnapshotWait = true options.SnapshotWait = true
} }
chain, err := core.NewBlockChain(db, gspec, engine, options) chain, err := core.NewBlockChain(db, gspec, engine, options)
if err != nil {
return nil, err
}
return chain, nil
}
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, createAndVerifyBAL bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
chain, err := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
if err != nil { if err != nil {
return err return err
} }
@ -203,7 +214,50 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
} }
} }
} }
return t.validateImportedHeaders(chain, validBlocks) err = t.validateImportedHeaders(chain, validBlocks)
if err != nil {
return err
}
if createAndVerifyBAL {
newChain, _ := t.createTestBlockChain(config, snapshotter, scheme, witness, createAndVerifyBAL, tracer)
defer newChain.Stop()
var blocksWithBAL types.Blocks
for i := uint64(1); i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := chain.GetBlockByNumber(i)
if chain.Config().IsAmsterdam(block.Number(), block.Time()) && block.AccessList() == nil {
return fmt.Errorf("block %d missing BAL", block.NumberU64())
}
blocksWithBAL = append(blocksWithBAL, block)
}
amt, err := newChain.InsertChain(blocksWithBAL)
if err != nil {
return err
}
_ = amt
newDB, err := newChain.State()
if err != nil {
return err
}
if err = t.validatePostState(newDB); err != nil {
return fmt.Errorf("post state validation failed: %v", err)
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
if newChain.Snapshots() != nil {
if err := chain.Snapshots().Verify(chain.CurrentBlock().Root); err != nil {
return err
}
}
}
err = t.validateImportedHeaders(newChain, validBlocks)
if err != nil {
return err
}
}
return nil
} }
// Network returns the network/fork name for this test. // Network returns the network/fork name for this test.
@ -227,6 +281,7 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
BaseFee: t.json.Genesis.BaseFeePerGas, BaseFee: t.json.Genesis.BaseFeePerGas,
BlobGasUsed: t.json.Genesis.BlobGasUsed, BlobGasUsed: t.json.Genesis.BlobGasUsed,
ExcessBlobGas: t.json.Genesis.ExcessBlobGas, ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
BlockAccessListHash: t.json.Genesis.BlockAccessListHash,
} }
} }
@ -256,6 +311,16 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
return nil, fmt.Errorf("block RLP decoding failed when expected to succeed: %v", err) return nil, fmt.Errorf("block RLP decoding failed when expected to succeed: %v", err)
} }
} }
// check that if we encode the same block, it will result in the same RLP
var enc bytes.Buffer
if err := rlp.Encode(&enc, cb); err != nil {
return nil, err
}
expected := common.Hex2Bytes(strings.TrimLeft(b.Rlp, "0x"))
if !bytes.Equal(enc.Bytes(), expected) {
return nil, fmt.Errorf("mismatch. expected\n%s\ngot\n%x\n", expected, enc.Bytes())
}
// RLP decoding worked, try to insert into chain: // RLP decoding worked, try to insert into chain:
blocks := types.Blocks{cb} blocks := types.Blocks{cb}
i, err := blockchain.InsertChain(blocks) i, err := blockchain.InsertChain(blocks)
@ -268,7 +333,7 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error)
} }
if b.BlockHeader == nil { if b.BlockHeader == nil {
if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil { if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil {
fmt.Fprintf(os.Stdout, "block (index %d) insertion should have failed due to: %v:\n%v\n", fmt.Fprintf(os.Stderr, "block (index %d) insertion should have failed due to: %v:\n%v\n",
bi, b.ExpectException, string(data)) bi, b.ExpectException, string(data))
} }
return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v", return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v",

View file

@ -38,6 +38,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *math.HexOrDecimal64 SlotNumber *math.HexOrDecimal64
} }
var enc btHeader var enc btHeader
@ -62,6 +63,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed) enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas) enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas)
enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot
enc.BlockAccessListHash = b.BlockAccessListHash
enc.SlotNumber = (*math.HexOrDecimal64)(b.SlotNumber) enc.SlotNumber = (*math.HexOrDecimal64)(b.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -90,6 +92,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
BlockAccessListHash *common.Hash
SlotNumber *math.HexOrDecimal64 SlotNumber *math.HexOrDecimal64
} }
var dec btHeader var dec btHeader
@ -159,6 +162,9 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil { if dec.ParentBeaconBlockRoot != nil {
b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
} }
if dec.BlockAccessListHash != nil {
b.BlockAccessListHash = dec.BlockAccessListHash
}
if dec.SlotNumber != nil { if dec.SlotNumber != nil {
b.SlotNumber = (*uint64)(dec.SlotNumber) b.SlotNumber = (*uint64)(dec.SlotNumber)
} }

View file

@ -493,6 +493,70 @@ var Forks = map[string]*params.ChainConfig{
BPO1: bpo1BlobConfig, BPO1: bpo1BlobConfig,
}, },
}, },
"Amsterdam": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(0),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"BPO2ToAmsterdamAtTime15k": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
OsakaTime: u64(0),
BPO1Time: u64(0),
BPO2Time: u64(0),
AmsterdamTime: u64(15_000),
DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Osaka: params.DefaultOsakaBlobConfig,
BPO1: bpo1BlobConfig,
BPO2: bpo2BlobConfig,
},
},
"OsakaToBPO1AtTime15k": { "OsakaToBPO1AtTime15k": {
ChainID: big.NewInt(1), ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0), HomesteadBlock: big.NewInt(0),

View file

@ -45,6 +45,7 @@ var (
executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests") executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests") executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks") benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
executionSpecBALBlockchainTestDir = filepath.Join(".", "spec-tests-bal", "fixtures", "blockchain_tests")
) )
func readJSON(reader io.Reader, value interface{}) error { func readJSON(reader io.Reader, value interface{}) error {

View file

@ -239,6 +239,14 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
return t.root.Get(GetBinaryTreeKeyStorageSlot(addr, key), t.nodeResolver) return t.root.Get(GetBinaryTreeKeyStorageSlot(addr, key), t.nodeResolver)
} }
func (t *BinaryTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *BinaryTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// UpdateAccount updates the account information for the given address. // UpdateAccount updates the account information for the given address.
func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error { func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var ( var (

View file

@ -210,6 +210,29 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil return nil
} }
// UpdateStorageBatch attempts to update a list storages in the batch manner.
func (t *StateTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
var (
hkeys = make([][]byte, 0, len(keys))
evals = make([][]byte, 0, len(values))
)
for _, key := range keys {
hk := crypto.Keccak256(key)
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = key
}
hkeys = append(hkeys, hk)
}
for _, val := range values {
data, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
evals = append(evals, data)
}
return t.trie.UpdateBatch(hkeys, evals)
}
// UpdateAccount will abstract the write of an account to the secure trie. // UpdateAccount will abstract the write of an account to the secure trie.
func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error { func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := crypto.Keccak256(address.Bytes()) hk := crypto.Keccak256(address.Bytes())
@ -226,6 +249,29 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil return nil
} }
// UpdateAccountBatch attempts to update a list accounts in the batch manner.
func (t *StateTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
var (
hkeys = make([][]byte, 0, len(addresses))
values = make([][]byte, 0, len(accounts))
)
for _, addr := range addresses {
hk := crypto.Keccak256(addr.Bytes())
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = addr.Bytes()
}
hkeys = append(hkeys, hk)
}
for _, acc := range accounts {
data, err := rlp.EncodeToBytes(acc)
if err != nil {
return err
}
values = append(values, data)
}
return t.trie.UpdateBatch(hkeys, values)
}
func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil return nil
} }

View file

@ -33,12 +33,10 @@ import (
// while the latter is inserted/deleted in order to follow the rule of trie. // while the latter is inserted/deleted in order to follow the rule of trie.
// This tool can track all of them no matter the node is embedded in its // This tool can track all of them no matter the node is embedded in its
// parent or not, but valueNode is never tracked. // parent or not, but valueNode is never tracked.
//
// Note opTracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type opTracer struct { type opTracer struct {
inserts map[string]struct{} inserts map[string]struct{}
deletes map[string]struct{} deletes map[string]struct{}
lock sync.RWMutex
} }
// newOpTracer initializes the tracer for capturing trie changes. // newOpTracer initializes the tracer for capturing trie changes.
@ -53,6 +51,9 @@ func newOpTracer() *opTracer {
// in the deletion set (resurrected node), then just wipe it from // in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched". // the deletion set as it's "untouched".
func (t *opTracer) onInsert(path []byte) { func (t *opTracer) onInsert(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.deletes[string(path)]; present { if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path)) delete(t.deletes, string(path))
return return
@ -64,6 +65,9 @@ func (t *opTracer) onInsert(path []byte) {
// in the addition set, then just wipe it from the addition set // in the addition set, then just wipe it from the addition set
// as it's untouched. // as it's untouched.
func (t *opTracer) onDelete(path []byte) { func (t *opTracer) onDelete(path []byte) {
t.lock.Lock()
defer t.lock.Unlock()
if _, present := t.inserts[string(path)]; present { if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path)) delete(t.inserts, string(path))
return return
@ -73,12 +77,18 @@ func (t *opTracer) onDelete(path []byte) {
// reset clears the content tracked by tracer. // reset clears the content tracked by tracer.
func (t *opTracer) reset() { func (t *opTracer) reset() {
t.lock.Lock()
defer t.lock.Unlock()
clear(t.inserts) clear(t.inserts)
clear(t.deletes) clear(t.deletes)
} }
// copy returns a deep copied tracer instance. // copy returns a deep copied tracer instance.
func (t *opTracer) copy() *opTracer { func (t *opTracer) copy() *opTracer {
t.lock.RLock()
defer t.lock.RUnlock()
return &opTracer{ return &opTracer{
inserts: maps.Clone(t.inserts), inserts: maps.Clone(t.inserts),
deletes: maps.Clone(t.deletes), deletes: maps.Clone(t.deletes),
@ -87,6 +97,9 @@ func (t *opTracer) copy() *opTracer {
// deletedList returns a list of node paths which are deleted from the trie. // deletedList returns a list of node paths which are deleted from the trie.
func (t *opTracer) deletedList() [][]byte { func (t *opTracer) deletedList() [][]byte {
t.lock.RLock()
defer t.lock.RUnlock()
paths := make([][]byte, 0, len(t.deletes)) paths := make([][]byte, 0, len(t.deletes))
for path := range t.deletes { for path := range t.deletes {
paths = append(paths, []byte(path)) paths = append(paths, []byte(path))

View file

@ -49,6 +49,14 @@ func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bo
} }
} }
func (t *TransitionTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
panic("not implemented")
}
func (t *TransitionTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
panic("not implemented")
}
// Base returns the base trie. // Base returns the base trie.
func (t *TransitionTrie) Base() *trie.SecureTrie { func (t *TransitionTrie) Base() *trie.SecureTrie {
return t.base return t.base

View file

@ -480,6 +480,72 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
} }
} }
// UpdateBatch updates a batch of entries concurrently.
func (t *Trie) UpdateBatch(keys [][]byte, values [][]byte) error {
// Short circuit if the trie is already committed and unusable.
if t.committed {
return ErrCommitted
}
if len(keys) != len(values) {
return fmt.Errorf("keys and values length mismatch: %d != %d", len(keys), len(values))
}
// Insert the entries sequentially if there are not too many
// trie nodes in the trie.
fn, ok := t.root.(*fullNode)
if !ok || len(keys) < 4 { // TODO(rjl493456442) the parallelism threshold should be twisted
for i, key := range keys {
err := t.Update(key, values[i])
if err != nil {
return err
}
}
return nil
}
var (
ikeys = make(map[byte][][]byte)
ivals = make(map[byte][][]byte)
eg errgroup.Group
)
for i, key := range keys {
hkey := keybytesToHex(key)
ikeys[hkey[0]] = append(ikeys[hkey[0]], hkey)
ivals[hkey[0]] = append(ivals[hkey[0]], values[i])
}
if len(keys) > 0 {
fn.flags = t.newFlag()
}
for p, k := range ikeys {
pos := p
ks := k
eg.Go(func() error {
vs := ivals[pos]
for i, k := range ks {
if len(vs[i]) != 0 {
_, n, err := t.insert(fn.Children[pos], []byte{pos}, k[1:], valueNode(vs[i]))
if err != nil {
return err
}
fn.Children[pos] = n
} else {
_, n, err := t.delete(fn.Children[pos], []byte{pos}, k[1:])
if err != nil {
return err
}
fn.Children[pos] = n
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
t.unhashed += len(keys)
t.uncommitted += len(keys)
return nil
}
// MustDelete is a wrapper of Delete and will omit any encountered error but // MustDelete is a wrapper of Delete and will omit any encountered error but
// just print out an error message. // just print out an error message.
func (t *Trie) MustDelete(key []byte) { func (t *Trie) MustDelete(key []byte) {

View file

@ -1501,82 +1501,56 @@ func testTrieCopyNewTrie(t *testing.T, entries []kv) {
} }
} }
// goos: darwin func TestUpdateBatch(t *testing.T) {
// goarch: arm64 testUpdateBatch(t, []kv{
// pkg: github.com/ethereum/go-ethereum/trie {k: []byte("do"), v: []byte("verb")},
// cpu: Apple M1 Pro {k: []byte("ether"), v: []byte("wookiedoo")},
// BenchmarkTriePrefetch {k: []byte("horse"), v: []byte("stallion")},
// BenchmarkTriePrefetch-8 9961 100706 ns/op {k: []byte("shaman"), v: []byte("horse")},
func BenchmarkTriePrefetch(b *testing.B) { {k: []byte("doge"), v: []byte("coin")},
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) {k: []byte("dog"), v: []byte("puppy")},
tr := NewEmpty(db) })
vals := make(map[string]*kv)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
}
tr.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
}
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
for i := 0; i < b.N; i++ { var entries []kv
tr, err := New(TrieID(root), db) for i := 0; i < 256; i++ {
if err != nil { entries = append(entries, kv{k: testrand.Bytes(32), v: testrand.Bytes(32)})
b.Fatalf("Failed to open the trie")
}
var keys [][]byte
for k := range vals {
keys = append(keys, []byte(k))
if len(keys) > 64 {
break
}
}
tr.Prefetch(keys)
} }
testUpdateBatch(t, entries)
} }
// goos: darwin func testUpdateBatch(t *testing.T, entries []kv) {
// goarch: arm64 var (
// pkg: github.com/ethereum/go-ethereum/trie base = NewEmpty(nil)
// cpu: Apple M1 Pro keys [][]byte
// BenchmarkTrieSeqPrefetch vals [][]byte
// BenchmarkTrieSeqPrefetch-8 12879 96710 ns/op )
func BenchmarkTrieSeqPrefetch(b *testing.B) { for _, entry := range entries {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) base.Update(entry.k, entry.v)
tr := NewEmpty(db) keys = append(keys, entry.k)
vals := make(map[string]*kv) vals = append(vals, entry.v)
for i := 0; i < 3000; i++ {
value := &kv{
k: randBytes(32),
v: randBytes(20),
t: false,
} }
tr.MustUpdate(value.k, value.v) for i := 0; i < 10; i++ {
vals[string(value.k)] = value k, v := testrand.Bytes(32), testrand.Bytes(32)
base.Update(k, v)
keys = append(keys, k)
vals = append(vals, v)
} }
root, nodes := tr.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.ResetTimer()
for i := 0; i < b.N; i++ { cmp := NewEmpty(nil)
tr, err := New(TrieID(root), db) if err := cmp.UpdateBatch(keys, vals); err != nil {
if err != nil { t.Fatalf("Failed to update batch, %v", err)
b.Fatalf("Failed to open the trie")
} }
var keys [][]byte
for k := range vals { // Traverse the original tree, the changes made on the copy one shouldn't
keys = append(keys, []byte(k)) // affect the old one
if len(keys) > 64 { for _, key := range keys {
break v1, _ := base.Get(key)
v2, _ := cmp.Get(key)
if !bytes.Equal(v1, v2) {
t.Errorf("Unexpected data, key: %v, want: %v, got: %v", key, v1, v2)
} }
} }
for _, k := range keys { if base.Hash() != cmp.Hash() {
tr.Get(k) t.Errorf("Hash mismatch: want %x, got %x", base.Hash(), cmp.Hash())
}
} }
} }

View file

@ -18,7 +18,6 @@ package triedb
import ( import (
"errors" "errors"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"