Merge branch 'master' into simplify_limbo

This commit is contained in:
maskpp 2025-08-10 12:30:31 +08:00
commit 7ad2e1102f
109 changed files with 1786 additions and 940 deletions

View file

@ -33,6 +33,10 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
var (
intRegex = regexp.MustCompile(`(u)?int([0-9]*)`)
)
func isKeyWord(arg string) bool { func isKeyWord(arg string) bool {
switch arg { switch arg {
case "break": case "break":
@ -299,7 +303,7 @@ func bindBasicType(kind abi.Type) string {
case abi.AddressTy: case abi.AddressTy:
return "common.Address" return "common.Address"
case abi.IntTy, abi.UintTy: case abi.IntTy, abi.UintTy:
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String()) parts := intRegex.FindStringSubmatch(kind.String())
switch parts[2] { switch parts[2] {
case "8", "16", "32", "64": case "8", "16", "32", "64":
return fmt.Sprintf("%sint%s", parts[1], parts[2]) return fmt.Sprintf("%sint%s", parts[1], parts[2])

View file

@ -32,7 +32,7 @@ type Value [32]byte
// Values represent a series of merkle tree leaves/nodes. // Values represent a series of merkle tree leaves/nodes.
type Values []Value type Values []Value
var valueT = reflect.TypeOf(Value{}) var valueT = reflect.TypeFor[Value]()
// UnmarshalJSON parses a merkle value in hex syntax. // UnmarshalJSON parses a merkle value in hex syntax.
func (m *Value) UnmarshalJSON(input []byte) error { func (m *Value) UnmarshalJSON(input []byte) error {

View file

@ -1 +1 @@
0xd60e5310c5d52ced44cfb13be4e9f22a1e6a6dc56964c3cccd429182d26d72d0 0x4bae4b97deda095724560012cab1f80a5221ce0a37a4b5236d8ab63f595f29d9

View file

@ -0,0 +1 @@
0x1bbf958008172591b6cbdb3d8d52e26998258e83d4bdb9eec10969d84519a6bd

View file

@ -1 +1 @@
0x02f0bb348b0d45f95a9b7e2bb5705768ad06548876cee03d880a2c9dabb1ff88 0x2fe39a39b6f7cbd549e0f74d259de6db486005a65bd3bd92840dd6ce21d6f4c8

View file

@ -1 +1 @@
0xa0dad451a230c01be6f2492980ec5bb412d8cf33351a75e8b172b5b84a5fd03a 0x86686b2b366e24134e0e3969a9c5f3759f92e5d2b04785b42e22cc7d468c2107

View file

@ -31,6 +31,9 @@ var checkpointSepolia string
//go:embed checkpoint_holesky.hex //go:embed checkpoint_holesky.hex
var checkpointHolesky string var checkpointHolesky string
//go:embed checkpoint_hoodi.hex
var checkpointHoodi string
var ( var (
MainnetLightConfig = (&ChainConfig{ MainnetLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"), GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
@ -71,7 +74,7 @@ var (
HoodiLightConfig = (&ChainConfig{ HoodiLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"), GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"),
GenesisTime: 1742212800, GenesisTime: 1742212800,
Checkpoint: common.HexToHash(""), Checkpoint: common.HexToHash(checkpointHoodi),
}). }).
AddFork("GENESIS", 0, common.FromHex("0x10000910")). AddFork("GENESIS", 0, common.FromHex("0x10000910")).
AddFork("ALTAIR", 0, common.FromHex("0x20000910")). AddFork("ALTAIR", 0, common.FromHex("0x20000910")).

View file

@ -129,7 +129,7 @@ func (c *Conn) Write(proto Proto, code uint64, msg any) error {
return err return err
} }
var errDisc error = fmt.Errorf("disconnect") var errDisc error = errors.New("disconnect")
// ReadEth reads an Eth sub-protocol wire message. // ReadEth reads an Eth sub-protocol wire message.
func (c *Conn) ReadEth() (any, error) { func (c *Conn) ReadEth() (any, error) {

View file

@ -19,6 +19,7 @@ package ethtest
import ( import (
"context" "context"
"crypto/rand" "crypto/rand"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sync" "sync"
@ -1092,7 +1093,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return return
} }
if !readUntilDisconnect(conn) { if !readUntilDisconnect(conn) {
errc <- fmt.Errorf("expected bad peer to be disconnected") errc <- errors.New("expected bad peer to be disconnected")
return return
} }
stage3.Done() stage3.Done()
@ -1139,7 +1140,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
} }
if req.GetPooledTransactionsRequest[0] != tx.Hash() { if req.GetPooledTransactionsRequest[0] != tx.Hash() {
errc <- fmt.Errorf("requested unknown tx hash") errc <- errors.New("requested unknown tx hash")
return return
} }
@ -1149,7 +1150,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return return
} }
if readUntilDisconnect(conn) { if readUntilDisconnect(conn) {
errc <- fmt.Errorf("unexpected disconnect") errc <- errors.New("unexpected disconnect")
return return
} }
close(errc) close(errc)

View file

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/p2p/discover/v4wire" "github.com/ethereum/go-ethereum/p2p/discover/v4wire"
"github.com/ethereum/go-ethereum/p2p/enode"
) )
const ( const (
@ -501,6 +502,36 @@ func FindnodeAmplificationWrongIP(t *utesting.T) {
} }
} }
func ENRRequest(t *utesting.T) {
t.Log(`This test sends an ENRRequest packet and expects a response containing a valid ENR.`)
te := newTestEnv(Remote, Listen1, Listen2)
defer te.close()
bond(t, te)
req := &v4wire.ENRRequest{Expiration: futureExpiration()}
hash := te.send(te.l1, req)
response, _, err := te.read(te.l1)
if err != nil {
t.Fatal("read error:", err)
}
enrResp, ok := response.(*v4wire.ENRResponse)
if !ok {
t.Fatalf("expected ENRResponse packet, got %T", response)
}
if !bytes.Equal(enrResp.ReplyTok, hash) {
t.Errorf("wrong hash in response packet: got %x, want %x", enrResp.ReplyTok, hash)
}
node, err := enode.New(enode.ValidSchemes, &enrResp.Record)
if err != nil {
t.Errorf("invalid record in response: %v", err)
}
if node.ID() != te.remote.ID() {
t.Errorf("wrong node ID in response: got %v, want %v", node.ID(), te.remote.ID())
}
}
var AllTests = []utesting.Test{ var AllTests = []utesting.Test{
{Name: "Ping/Basic", Fn: BasicPing}, {Name: "Ping/Basic", Fn: BasicPing},
{Name: "Ping/WrongTo", Fn: PingWrongTo}, {Name: "Ping/WrongTo", Fn: PingWrongTo},
@ -510,6 +541,7 @@ var AllTests = []utesting.Test{
{Name: "Ping/PastExpiration", Fn: PingPastExpiration}, {Name: "Ping/PastExpiration", Fn: PingPastExpiration},
{Name: "Ping/WrongPacketType", Fn: WrongPacketType}, {Name: "Ping/WrongPacketType", Fn: WrongPacketType},
{Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom}, {Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom},
{Name: "ENRRequest", Fn: ENRRequest},
{Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof}, {Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof},
{Name: "Findnode/BasicFindnode", Fn: BasicFindnode}, {Name: "Findnode/BasicFindnode", Fn: BasicFindnode},
{Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors}, {Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors},

View file

@ -716,7 +716,7 @@ func downloadEra(ctx *cli.Context) error {
case ctx.IsSet(utils.SepoliaFlag.Name): case ctx.IsSet(utils.SepoliaFlag.Name):
network = "sepolia" network = "sepolia"
default: default:
return fmt.Errorf("unsupported network, no known era1 checksums") return errors.New("unsupported network, no known era1 checksums")
} }
} }

View file

@ -262,14 +262,16 @@ func makeFullNode(ctx *cli.Context) *node.Node {
if cfg.Ethstats.URL != "" { if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
} }
// Configure full-sync tester service if requested // Configure synchronization override service
var synctarget common.Hash
if ctx.IsSet(utils.SyncTargetFlag.Name) { if ctx.IsSet(utils.SyncTargetFlag.Name) {
hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name)) hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
if len(hex) != common.HashLength { if len(hex) != common.HashLength {
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength) utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
} }
utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex), ctx.Bool(utils.ExitWhenSyncedFlag.Name)) synctarget = common.BytesToHash(hex)
} }
utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name))
if ctx.IsSet(utils.DeveloperFlag.Name) { if ctx.IsSet(utils.DeveloperFlag.Name) {
// Start dev mode. // Start dev mode.

View file

@ -49,10 +49,10 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/syncer"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/remotedb" "github.com/ethereum/go-ethereum/ethdb/remotedb"
@ -1997,10 +1997,14 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
return filterSystem return filterSystem
} }
// RegisterFullSyncTester adds the full-sync tester service into node. // RegisterSyncOverrideService adds the synchronization override service into node.
func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) { func RegisterSyncOverrideService(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) {
catalyst.RegisterFullSyncTester(stack, eth, target, exitWhenSynced) if target != (common.Hash{}) {
log.Info("Registered full-sync tester", "hash", target, "exitWhenSynced", exitWhenSynced) log.Info("Registered sync override service", "hash", target, "exitWhenSynced", exitWhenSynced)
} else {
log.Info("Registered sync override service")
}
syncer.Register(stack, eth, target, exitWhenSynced)
} }
// SetupMetrics configures the metrics system. // SetupMetrics configures the metrics system.

View file

@ -18,7 +18,7 @@ package main
import ( import (
"embed" "embed"
"fmt" "errors"
"io/fs" "io/fs"
"os" "os"
@ -97,7 +97,7 @@ type testConfig struct {
traceTestFile string traceTestFile string
} }
var errPrunedHistory = fmt.Errorf("attempt to access pruned history") var errPrunedHistory = errors.New("attempt to access pruned history")
// validateHistoryPruneErr checks whether the given error is caused by access // validateHistoryPruneErr checks whether the given error is caused by access
// to history before the pruning threshold block (it is an rpc.Error with code 4444). // to history before the pruning threshold block (it is an rpc.Error with code 4444).
@ -109,7 +109,7 @@ func validateHistoryPruneErr(err error, blockNum uint64, historyPruneBlock *uint
if err != nil { if err != nil {
if rpcErr, ok := err.(rpc.Error); ok && rpcErr.ErrorCode() == 4444 { if rpcErr, ok := err.(rpc.Error); ok && rpcErr.ErrorCode() == 4444 {
if historyPruneBlock != nil && blockNum > *historyPruneBlock { if historyPruneBlock != nil && blockNum > *historyPruneBlock {
return fmt.Errorf("pruned history error returned after pruning threshold") return errors.New("pruned history error returned after pruning threshold")
} }
return errPrunedHistory return errPrunedHistory
} }

View file

@ -34,11 +34,10 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"math/big" "math/big"
"math/bits"
"strconv" "strconv"
) )
const uintBits = 32 << (uint64(^uint(0)) >> 63)
// Errors // Errors
var ( var (
ErrEmptyString = &decError{"empty hex string"} ErrEmptyString = &decError{"empty hex string"}
@ -48,7 +47,7 @@ var (
ErrEmptyNumber = &decError{"hex string \"0x\""} ErrEmptyNumber = &decError{"hex string \"0x\""}
ErrLeadingZero = &decError{"hex number with leading zero digits"} ErrLeadingZero = &decError{"hex number with leading zero digits"}
ErrUint64Range = &decError{"hex number > 64 bits"} ErrUint64Range = &decError{"hex number > 64 bits"}
ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)} ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", bits.UintSize)}
ErrBig256Range = &decError{"hex number > 256 bits"} ErrBig256Range = &decError{"hex number > 256 bits"}
) )

View file

@ -28,11 +28,11 @@ import (
) )
var ( var (
bytesT = reflect.TypeOf(Bytes(nil)) bytesT = reflect.TypeFor[Bytes]()
bigT = reflect.TypeOf((*Big)(nil)) bigT = reflect.TypeFor[*Big]()
uintT = reflect.TypeOf(Uint(0)) uintT = reflect.TypeFor[Uint]()
uint64T = reflect.TypeOf(Uint64(0)) uint64T = reflect.TypeFor[Uint64]()
u256T = reflect.TypeOf((*uint256.Int)(nil)) u256T = reflect.TypeFor[*uint256.Int]()
) )
// Bytes marshals/unmarshals as a JSON string with 0x prefix. // Bytes marshals/unmarshals as a JSON string with 0x prefix.

View file

@ -22,6 +22,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"math/big" "math/big"
"math/bits"
"testing" "testing"
"github.com/holiman/uint256" "github.com/holiman/uint256"
@ -384,7 +385,7 @@ func TestUnmarshalUint(t *testing.T) {
for _, test := range unmarshalUintTests { for _, test := range unmarshalUintTests {
var v Uint var v Uint
err := json.Unmarshal([]byte(test.input), &v) err := json.Unmarshal([]byte(test.input), &v)
if uintBits == 32 && test.wantErr32bit != nil { if bits.UintSize == 32 && test.wantErr32bit != nil {
checkError(t, test.input, err, test.wantErr32bit) checkError(t, test.input, err, test.wantErr32bit)
continue continue
} }

View file

@ -42,8 +42,8 @@ const (
) )
var ( var (
hashT = reflect.TypeOf(Hash{}) hashT = reflect.TypeFor[Hash]()
addressT = reflect.TypeOf(Address{}) addressT = reflect.TypeFor[Address]()
// MaxAddress represents the maximum possible address value. // MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff") MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
@ -466,7 +466,7 @@ func isString(input []byte) bool {
// UnmarshalJSON parses a hash in hex syntax. // UnmarshalJSON parses a hash in hex syntax.
func (d *Decimal) UnmarshalJSON(input []byte) error { func (d *Decimal) UnmarshalJSON(input []byte) error {
if !isString(input) { if !isString(input) {
return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(uint64(0))} return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeFor[uint64]()}
} }
if i, err := strconv.ParseUint(string(input[1:len(input)-1]), 10, 64); err == nil { if i, err := strconv.ParseUint(string(input[1:len(input)-1]), 10, 64); err == nil {
*d = Decimal(i) *d = Decimal(i)

View file

@ -682,7 +682,7 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()] predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil || freezerTail != predefinedPoint.BlockNumber { if predefinedPoint == nil || freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned with unknown configuration", "tail", freezerTail) log.Error("Chain history database is pruned with unknown configuration", "tail", freezerTail)
return fmt.Errorf("unexpected database tail") return errors.New("unexpected database tail")
} }
bc.historyPrunePoint.Store(predefinedPoint) bc.historyPrunePoint.Store(predefinedPoint)
return nil return nil
@ -695,15 +695,15 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
// action to happen. So just tell them how to do it. // action to happen. So just tell them how to do it.
log.Error(fmt.Sprintf("Chain history mode is configured as %q, but database is not pruned.", bc.cfg.ChainHistoryMode.String())) log.Error(fmt.Sprintf("Chain history mode is configured as %q, but database is not pruned.", bc.cfg.ChainHistoryMode.String()))
log.Error(fmt.Sprintf("Run 'geth prune-history' to prune pre-merge history.")) log.Error(fmt.Sprintf("Run 'geth prune-history' to prune pre-merge history."))
return fmt.Errorf("history pruning requested via configuration") return errors.New("history pruning requested via configuration")
} }
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()] predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil { if predefinedPoint == nil {
log.Error("Chain history pruning is not supported for this network", "genesis", bc.genesisBlock.Hash()) log.Error("Chain history pruning is not supported for this network", "genesis", bc.genesisBlock.Hash())
return fmt.Errorf("history pruning requested for unknown network") return errors.New("history pruning requested for unknown network")
} else if freezerTail > 0 && freezerTail != predefinedPoint.BlockNumber { } else if freezerTail > 0 && freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned to unknown block", "tail", freezerTail) log.Error("Chain history database is pruned to unknown block", "tail", freezerTail)
return fmt.Errorf("unexpected database tail") return errors.New("unexpected database tail")
} }
bc.historyPrunePoint.Store(predefinedPoint) bc.historyPrunePoint.Store(predefinedPoint)
return nil return nil

View file

@ -540,8 +540,10 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
return block, b.receipts return block, b.receipts
} }
sdb := state.NewDatabase(trdb, nil)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), state.NewDatabase(trdb, nil)) statedb, err := state.New(parent.Root(), sdb)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -124,19 +124,12 @@ func (cv *ChainView) RawReceipts(number uint64) types.Receipts {
// SharedRange returns the block range shared by two chain views. // SharedRange returns the block range shared by two chain views.
func (cv *ChainView) SharedRange(cv2 *ChainView) common.Range[uint64] { func (cv *ChainView) SharedRange(cv2 *ChainView) common.Range[uint64] {
cv.lock.Lock() if cv == nil || cv2 == nil {
defer cv.lock.Unlock()
if cv == nil || cv2 == nil || !cv.extendNonCanonical() || !cv2.extendNonCanonical() {
return common.Range[uint64]{} return common.Range[uint64]{}
} }
var sharedLen uint64 sharedLen := min(cv.headNumber, cv2.headNumber) + 1
for n := min(cv.headNumber+1-uint64(len(cv.hashes)), cv2.headNumber+1-uint64(len(cv2.hashes))); n <= cv.headNumber && n <= cv2.headNumber; n++ { for sharedLen > 0 && cv.BlockId(sharedLen-1) != cv2.BlockId(sharedLen-1) {
h1, h2 := cv.blockHash(n), cv2.blockHash(n) sharedLen--
if h1 != h2 || h1 == (common.Hash{}) {
break
}
sharedLen = n + 1
} }
return common.NewRange(0, sharedLen) return common.NewRange(0, sharedLen)
} }

View file

@ -18,5 +18,11 @@
{"blockNumber": 3221725, "blockId": "0xe771f897dece48b1583cc1d1d10de8015da57407eb1fdf239fdbe46eaab85143", "firstIndex": 1140850137}, {"blockNumber": 3221725, "blockId": "0xe771f897dece48b1583cc1d1d10de8015da57407eb1fdf239fdbe46eaab85143", "firstIndex": 1140850137},
{"blockNumber": 3357164, "blockId": "0x6252d0aa54c79623b0680069c88d7b5c47983f0d5c4845b6c811b8d9b5e8ff3c", "firstIndex": 1207959453}, {"blockNumber": 3357164, "blockId": "0x6252d0aa54c79623b0680069c88d7b5c47983f0d5c4845b6c811b8d9b5e8ff3c", "firstIndex": 1207959453},
{"blockNumber": 3447019, "blockId": "0xeb7d585e1e063f3cc05ed399fbf6c2df63c271f62f030acb804e9fb1e74b6dc1", "firstIndex": 1275067542}, {"blockNumber": 3447019, "blockId": "0xeb7d585e1e063f3cc05ed399fbf6c2df63c271f62f030acb804e9fb1e74b6dc1", "firstIndex": 1275067542},
{"blockNumber": 3546397, "blockId": "0xdabdef7defa4281180a57c5af121877b82274f15ccf074ea0096146f4c246df2", "firstIndex": 1342176778} {"blockNumber": 3546397, "blockId": "0xdabdef7defa4281180a57c5af121877b82274f15ccf074ea0096146f4c246df2", "firstIndex": 1342176778},
{"blockNumber": 3867885, "blockId": "0x8be069dd7a3e2ffb869ee164d11b28555233d2510b134ab9d5484fdae55d2225", "firstIndex": 1409285539},
{"blockNumber": 3935446, "blockId": "0xc91a61bc215bbcccc3020c62e5c8153162df0d8bcc59813d74671b2d24903ed7", "firstIndex": 1476394742},
{"blockNumber": 3989508, "blockId": "0xc85dec36a767e44237842ef51915944c2a49780c8c394a3aa6cfb013c99cf58b", "firstIndex": 1543503452},
{"blockNumber": 4057078, "blockId": "0xccdb79f6705629cb6ab1667a1244938f60911236549143fcff23a3989213e67e", "firstIndex": 1610612030},
{"blockNumber": 4126499, "blockId": "0x92f2ef21fc911e87e81e38373d5f2915587b9648a0ab3cf4fcfe3e5aaffe7b85", "firstIndex": 1677720416},
{"blockNumber": 4239335, "blockId": "0x64fbd22965eb583a584552b7edb9b7ce26fb6aad247c1063d0d5a4d11cbcc58c", "firstIndex": 1744830176}
] ]

View file

@ -267,5 +267,26 @@
{"blockNumber": 22168652, "blockId": "0x6ae43618c915e636794e2cc2d75dde9992766881c7405fe6479c045ed4bee57e", "firstIndex": 17850956277}, {"blockNumber": 22168652, "blockId": "0x6ae43618c915e636794e2cc2d75dde9992766881c7405fe6479c045ed4bee57e", "firstIndex": 17850956277},
{"blockNumber": 22190975, "blockId": "0x9437121647899a4b7b84d67fbea7cc6ff967481c2eab4328ccd86e2cefe19420", "firstIndex": 17918066140}, {"blockNumber": 22190975, "blockId": "0x9437121647899a4b7b84d67fbea7cc6ff967481c2eab4328ccd86e2cefe19420", "firstIndex": 17918066140},
{"blockNumber": 22234357, "blockId": "0x036030830134f9224160d5a0b62da35ec7813dc8855d554bd22e9d38545243ed", "firstIndex": 17985175075}, {"blockNumber": 22234357, "blockId": "0x036030830134f9224160d5a0b62da35ec7813dc8855d554bd22e9d38545243ed", "firstIndex": 17985175075},
{"blockNumber": 22276736, "blockId": "0x5ceb96d98aa1b4c1c2f2fa253ae9cdb1b04e0420c11bf795400e8762c0a1635c", "firstIndex": 18052284344} {"blockNumber": 22276736, "blockId": "0x5ceb96d98aa1b4c1c2f2fa253ae9cdb1b04e0420c11bf795400e8762c0a1635c", "firstIndex": 18052284344},
{"blockNumber": 22321282, "blockId": "0x8a601ebf6a757020c6d43a978f0bd2c150c4acc1ffdd50c7ee88afc78b0c11f8", "firstIndex": 18119392242},
{"blockNumber": 22349231, "blockId": "0xb751c026a92ba5be95ad7ea4e2729a175b0d0e11a4c108f47cab232b4715d1a2", "firstIndex": 18186501218},
{"blockNumber": 22377469, "blockId": "0xa47916860a22f7e26761ec2d7f717410791bd3ed0237b2f6266750214c7bbf08", "firstIndex": 18253610249},
{"blockNumber": 22422685, "blockId": "0x8beaee39603af55fad222730f556c840c41cd76a5eef0bad367ac94d3b86c7aa", "firstIndex": 18320716377},
{"blockNumber": 22462378, "blockId": "0x6dba9c5d2949f5a6a072267b590e8b15e6fb157a0fc22719387f1fd6bfcd8d5d", "firstIndex": 18387828426},
{"blockNumber": 22500185, "blockId": "0x2484c380df0a8f7edfdf8d917570d23fab8499aea80c35b6cf4e5fe1e34106e9", "firstIndex": 18454936227},
{"blockNumber": 22539624, "blockId": "0xd418071906803d25afc3842a6a6468ad3b5fea27107b314ce4e2ccf08b478acf", "firstIndex": 18522044531},
{"blockNumber": 22577021, "blockId": "0xff222982693f3ff60d2097822171f80a6ddd979080aeb7e995bfb1b973497c84", "firstIndex": 18589154438},
{"blockNumber": 22614525, "blockId": "0x9868da1fea2ffca3f67e35570f02eb5707b27f6967ea4a109eb4ddbf24566efd", "firstIndex": 18656264174},
{"blockNumber": 22652848, "blockId": "0x060a911da11ab0f1dda307f5196e622d23901d198925749e70ab58a439477c5a", "firstIndex": 18723372617},
{"blockNumber": 22692432, "blockId": "0x6a937f2c283aba8c778c1f5ef340b225fd820f8a7dfa6f24f5fe541994f32f2d", "firstIndex": 18790480232},
{"blockNumber": 22731200, "blockId": "0x00d57a9e7a2dad252436fe9f0382c6a8860d301a9f9ffe6d7ac64c82b95300f8", "firstIndex": 18857590076},
{"blockNumber": 22769000, "blockId": "0xa48db20307c19c373ef2d31d85088ea14b8df0450491c31982504c87b04edbc0", "firstIndex": 18924699130},
{"blockNumber": 22808126, "blockId": "0x1419c64ff003edca0586f1c8ec3063da5c54c57ff826cfb34bc866cc18949653", "firstIndex": 18991807807},
{"blockNumber": 22845231, "blockId": "0x691f87217e61c5d7ae9ad53a44d30e1ab6b1cc3f2b689b9fbf7c38fbacacfe3e", "firstIndex": 19058917062},
{"blockNumber": 22884189, "blockId": "0x7f102d44c0ea7803f5b0e1a98a6abf0e8383eb99fb114d6f7b4591753ce8bba3", "firstIndex": 19126024122},
{"blockNumber": 22920923, "blockId": "0x04fe6179495016fc3fe56d8ef5311c360a5761a898262173849c3494fdd73d92", "firstIndex": 19193134595},
{"blockNumber": 22958100, "blockId": "0xe38e0ff7b0c4065ca42ea577bc32f2566ca46f2ddeedcc4bc1f8fb00e7f26329", "firstIndex": 19260242424},
{"blockNumber": 22988600, "blockId": "0x04ca74758b22e0ea54b8c992022ff21c16a2af9c45144c3b0f80de921a7eee82", "firstIndex": 19327351273},
{"blockNumber": 23018392, "blockId": "0x61cc979b00bc97b48356f986a5b9ec997d674bc904c2a2e4b0f17de08e50b3bb", "firstIndex": 19394459627},
{"blockNumber": 23048524, "blockId": "0x489de15d95739ede4ab15e8b5151d80d4dc85ae10e7be800b1a4723094a678df", "firstIndex": 19461570073}
] ]

View file

@ -68,5 +68,32 @@
{"blockNumber": 7911722, "blockId": "0x9a85e48e3135c97c51fc1786f2af0596c802e021b6c53cfca65a129cafcd23ed", "firstIndex": 4496287265}, {"blockNumber": 7911722, "blockId": "0x9a85e48e3135c97c51fc1786f2af0596c802e021b6c53cfca65a129cafcd23ed", "firstIndex": 4496287265},
{"blockNumber": 7960147, "blockId": "0xc9359cc76d7090e1c8a031108f0ab7a8935d971efd4325fe53612a1d99562f6f", "firstIndex": 4563402388}, {"blockNumber": 7960147, "blockId": "0xc9359cc76d7090e1c8a031108f0ab7a8935d971efd4325fe53612a1d99562f6f", "firstIndex": 4563402388},
{"blockNumber": 8030418, "blockId": "0x21867e68cd8327aed2da2601399d60f7f9e41dca4a4f2f9be982e5a2b9304a88", "firstIndex": 4630511616}, {"blockNumber": 8030418, "blockId": "0x21867e68cd8327aed2da2601399d60f7f9e41dca4a4f2f9be982e5a2b9304a88", "firstIndex": 4630511616},
{"blockNumber": 8087701, "blockId": "0x0fa8c8d7549cc9a8d308262706fe248efe759f8b63511efb1e7f3926e9af2dcb", "firstIndex": 4697614758} {"blockNumber": 8087701, "blockId": "0x0fa8c8d7549cc9a8d308262706fe248efe759f8b63511efb1e7f3926e9af2dcb", "firstIndex": 4697614758},
{"blockNumber": 8149130, "blockId": "0x655ea638fd9e35cc25f4332f260d7bf98f4f6fa9a72e1bff861209f18659e94c", "firstIndex": 4764727744},
{"blockNumber": 8208672, "blockId": "0xb5847a670dc3b6181f9e2e40e4218548048366d237a0d12e938b9879bc8cf800", "firstIndex": 4831837882},
{"blockNumber": 8271345, "blockId": "0x96797214946f29093883b877ccb0f2a9f771a9a3db3794a642b5dcb781c4d194", "firstIndex": 4898942160},
{"blockNumber": 8302858, "blockId": "0x6a5977b3382ca69a9e0412333f97b911c1f69f857d8f31dd0fc930980e24f2fc", "firstIndex": 4966054626},
{"blockNumber": 8333618, "blockId": "0x2547294aa23b67c42adbdddfcf424b17a95c4ff0f352a6a2442c529cfb0c892a", "firstIndex": 5033163605},
{"blockNumber": 8360582, "blockId": "0xf34f5dceb0ef22e0f782b56c12790472acc675997b9c45075bd4e18a9dacd03c", "firstIndex": 5100273631},
{"blockNumber": 8387230, "blockId": "0x0fbea42e87620b5beeb76b67febc173847c54333d7dce9fa2f8f2a3fa9c8c22a", "firstIndex": 5167381673},
{"blockNumber": 8414795, "blockId": "0x6c9c000cf5e35da3a7e9e1cd56147c8ce9b43a76d6de945675efd9dc03b628c9", "firstIndex": 5234477010},
{"blockNumber": 8444749, "blockId": "0xba85f8c9abaddc34e2113eb49385667ba4b008168ae701f46aa7a7ce78c633a1", "firstIndex": 5301598562},
{"blockNumber": 8474551, "blockId": "0x720866a40242f087dd25b6f0dd79224884f435b114a39e60c5669f5c942c78c1", "firstIndex": 5368707262},
{"blockNumber": 8501310, "blockId": "0x2b6da233532c701202fb5ac67e005f7d3eb71f88a9fac10c25d24dd11ada05e5", "firstIndex": 5435803858},
{"blockNumber": 8526970, "blockId": "0x005f9bbad0a10234129d09894d7fcf04bf1398d326510eedb4195808c282802d", "firstIndex": 5502926509},
{"blockNumber": 8550412, "blockId": "0x37c9f3efc9f33cf62f590087c8c9ac70011883f75e648647a6fd0fec00ca627c", "firstIndex": 5570034950},
{"blockNumber": 8573540, "blockId": "0x81cfb46a07be7c70bb8a0f76b03a4cd502f92032bea68ad7ba10e26351673000", "firstIndex": 5637137662},
{"blockNumber": 8590416, "blockId": "0x5c223d58ef22d7b0dd8c498e8498da4787b5dc706681c2bc83849441f5d0922d", "firstIndex": 5704252906},
{"blockNumber": 8616793, "blockId": "0x9043ce02742fb5ec43a696602867b7ce6003a95b36cd28a37eeb9785a46ad49f", "firstIndex": 5771357264},
{"blockNumber": 8647290, "blockId": "0xd90115193764b0a33f3f2a719381b3ddbce2532607c72fb287a864eb391eeada", "firstIndex": 5838466144},
{"blockNumber": 8673192, "blockId": "0x9bc92d340cccaf4c8c03372efc24eb92c5159106729de8d2e9e064f5568d082b", "firstIndex": 5905577457},
{"blockNumber": 8700694, "blockId": "0xb3d656a173b962bc6825198e94a4974289db06a8998060bd0f5ee2044a7a7deb", "firstIndex": 5972679345},
{"blockNumber": 8724533, "blockId": "0x253ffc6d77b88fe18736e4c313e9930341c444bc87b2ee22b26cfe8d9d0b178d", "firstIndex": 6039795829},
{"blockNumber": 8743948, "blockId": "0x04eb66d0261705d31e629193148d0685058d7759ba5f95d2d38e412dbadb8256", "firstIndex": 6106901747},
{"blockNumber": 8758378, "blockId": "0x64adf54e662d11db716610157da672c3d8b45f001dbce40a269871b86a84d026", "firstIndex": 6174011544},
{"blockNumber": 8777722, "blockId": "0x0a7f9a956024b404c915e70b42221aa027b2dd715b0697f099dccefae0b9af97", "firstIndex": 6241124215},
{"blockNumber": 8800154, "blockId": "0x411f90dc18f2bca31fa63615c2866c907bbac1fae8c06782cabfaf788efba665", "firstIndex": 6308233216},
{"blockNumber": 8829725, "blockId": "0x5686f3a5eec1b070d0113c588f8f4a560d57ad96b8045cedb5c08bbadaa0273e", "firstIndex": 6375340033},
{"blockNumber": 8858036, "blockId": "0x4f9b5d9fac9c6f6e2224f613cda12e8ab95d636774ce87489dce8a9f805ee2e5", "firstIndex": 6442450330},
{"blockNumber": 8884811, "blockId": "0x9cf74f978872683802c065e72b5a5326fdad95f19733c34d927b575cd85fd0bd", "firstIndex": 6509559380}
] ]

View file

@ -241,9 +241,8 @@ func checksumToBytes(hash uint32) [4]byte {
// them, one for the block number based forks and the second for the timestamps. // them, one for the block number based forks and the second for the timestamps.
func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) { func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection // Gather all the fork block numbers via reflection
kind := reflect.TypeOf(params.ChainConfig{}) kind := reflect.TypeFor[params.ChainConfig]()
conf := reflect.ValueOf(config).Elem() conf := reflect.ValueOf(config).Elem()
x := uint64(0)
var ( var (
forksByBlock []uint64 forksByBlock []uint64
forksByTime []uint64 forksByTime []uint64
@ -258,12 +257,12 @@ func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64
} }
// Extract the fork rule block number or timestamp and aggregate it // Extract the fork rule block number or timestamp and aggregate it
if field.Type == reflect.TypeOf(&x) { if field.Type == reflect.TypeFor[*uint64]() {
if rule := conf.Field(i).Interface().(*uint64); rule != nil { if rule := conf.Field(i).Interface().(*uint64); rule != nil {
forksByTime = append(forksByTime, *rule) forksByTime = append(forksByTime, *rule)
} }
} }
if field.Type == reflect.TypeOf(new(big.Int)) { if field.Type == reflect.TypeFor[*big.Int]() {
if rule := conf.Field(i).Interface().(*big.Int); rule != nil { if rule := conf.Field(i).Interface().(*big.Int); rule != nil {
forksByBlock = append(forksByBlock, rule.Uint64()) forksByBlock = append(forksByBlock, rule.Uint64())
} }

View file

@ -0,0 +1,105 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package overlay
import (
"bytes"
"encoding/gob"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
// TransitionState is a structure that holds the progress markers of the
// translation process.
type TransitionState struct {
CurrentAccountAddress *common.Address // addresss of the last translated account
CurrentSlotHash common.Hash // hash of the last translated storage slot
CurrentPreimageOffset int64 // next byte to read from the preimage file
Started, Ended bool
// Mark whether the storage for an account has been processed. This is useful if the
// maximum number of leaves of the conversion is reached before the whole storage is
// processed.
StorageProcessed bool
BaseRoot common.Hash // hash of the last read-only MPT base tree
}
// InTransition returns true if the translation process is in progress.
func (ts *TransitionState) InTransition() bool {
return ts != nil && ts.Started && !ts.Ended
}
// Transitioned returns true if the translation process has been completed.
func (ts *TransitionState) Transitioned() bool {
return ts != nil && ts.Ended
}
// Copy returns a deep copy of the TransitionState object.
func (ts *TransitionState) Copy() *TransitionState {
ret := &TransitionState{
Started: ts.Started,
Ended: ts.Ended,
CurrentSlotHash: ts.CurrentSlotHash,
CurrentPreimageOffset: ts.CurrentPreimageOffset,
StorageProcessed: ts.StorageProcessed,
}
if ts.CurrentAccountAddress != nil {
addr := *ts.CurrentAccountAddress
ret.CurrentAccountAddress = &addr
}
return ret
}
// LoadTransitionState retrieves the Verkle transition state associated with
// the given state root hash from the database.
func LoadTransitionState(db ethdb.KeyValueReader, root common.Hash, isVerkle bool) *TransitionState {
var ts *TransitionState
data, _ := rawdb.ReadVerkleTransitionState(db, root)
// if a state could be read from the db, attempt to decode it
if len(data) > 0 {
var (
newts TransitionState
buf = bytes.NewBuffer(data[:])
dec = gob.NewDecoder(buf)
)
// Decode transition state
err := dec.Decode(&newts)
if err != nil {
log.Error("failed to decode transition state", "err", err)
return nil
}
ts = &newts
}
// Fallback that should only happen before the transition
if ts == nil {
// Initialize the first transition state, with the "ended"
// field set to true if the database was created
// as a verkle database.
log.Debug("no transition state found, starting fresh", "is verkle", db)
// Start with a fresh state
ts = &TransitionState{Ended: isVerkle}
}
return ts
}

View file

@ -0,0 +1,30 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)
func ReadVerkleTransitionState(db ethdb.KeyValueReader, hash common.Hash) ([]byte, error) {
return db.Get(transitionStateKey(hash))
}
func WriteVerkleTransitionState(db ethdb.KeyValueWriter, hash common.Hash, state []byte) error {
return db.Put(transitionStateKey(hash), state)
}

View file

@ -604,7 +604,7 @@ var knownMetadataKeys = [][]byte{
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
filterMapsRangeKey, headStateHistoryIndexKey, filterMapsRangeKey, headStateHistoryIndexKey, VerkleTransitionStatePrefix,
} }
// printChainMetadata prints out chain metadata to stderr. // printChainMetadata prints out chain metadata to stderr.

View file

@ -158,6 +158,9 @@ var (
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil) preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil)
// Verkle transition information
VerkleTransitionStatePrefix = []byte("verkle-transition-state-")
) )
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary // LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
@ -397,3 +400,8 @@ func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Has
binary.BigEndian.PutUint32(buf[:], blockID) binary.BigEndian.PutUint32(buf[:], blockID)
return append(append(append(StateHistoryStorageBlockPrefix, addressHash.Bytes()...), storageHash.Bytes()...), buf[:]...) return append(append(append(StateHistoryStorageBlockPrefix, addressHash.Bytes()...), storageHash.Bytes()...), buf[:]...)
} }
// transitionStateKey = transitionStatusKey + hash
func transitionStateKey(hash common.Hash) []byte {
return append(VerkleTransitionStatePrefix, hash.Bytes()...)
}

View file

@ -145,10 +145,7 @@ func (al *accessList) Equal(other *accessList) bool {
// PrettyPrint prints the contents of the access list in a human-readable form // PrettyPrint prints the contents of the access list in a human-readable form
func (al *accessList) PrettyPrint() string { func (al *accessList) PrettyPrint() string {
out := new(strings.Builder) out := new(strings.Builder)
var sortedAddrs []common.Address sortedAddrs := slices.Collect(maps.Keys(al.addresses))
for addr := range al.addresses {
sortedAddrs = append(sortedAddrs, addr)
}
slices.SortFunc(sortedAddrs, common.Address.Cmp) slices.SortFunc(sortedAddrs, common.Address.Cmp)
for _, addr := range sortedAddrs { for _, addr := range sortedAddrs {
idx := al.addresses[addr] idx := al.addresses[addr]

View file

@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -151,17 +152,21 @@ type CachingDB struct {
codeCache *lru.SizeConstrainedCache[common.Hash, []byte] codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int] codeSizeCache *lru.Cache[common.Hash, int]
pointCache *utils.PointCache pointCache *utils.PointCache
// Transition-specific fields
TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState]
} }
// NewDatabase creates a state database with the provided data sources. // NewDatabase creates a state database with the provided data sources.
func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB { func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
return &CachingDB{ return &CachingDB{
disk: triedb.Disk(), disk: triedb.Disk(),
triedb: triedb, triedb: triedb,
snap: snap, snap: snap,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
pointCache: utils.NewPointCache(pointCacheSize), pointCache: utils.NewPointCache(pointCacheSize),
TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000),
} }
} }
@ -224,7 +229,13 @@ func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (ReaderWithSta
// OpenTrie opens the main account trie at a specific root hash. // OpenTrie opens the main account trie at a specific root hash.
func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) { func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() { if db.triedb.IsVerkle() {
return trie.NewVerkleTrie(root, db.triedb, db.pointCache) ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle())
if ts.InTransition() {
panic("transition isn't supported yet")
}
if ts.Transitioned() {
return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
}
} }
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil { if err != nil {
@ -235,9 +246,6 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account. // OpenStorageTrie opens the storage trie of an account.
func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
// In the verkle case, there is only one tree. But the two-tree structure
// is hardcoded in the codebase. So we need to return the same trie in this
// case.
if db.triedb.IsVerkle() { if db.triedb.IsVerkle() {
return self, nil return self, nil
} }

View file

@ -241,6 +241,7 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
if !db.IsVerkle() { if !db.IsVerkle() {
tr, err = trie.NewStateTrie(trie.StateTrieID(root), db) tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
} else { } else {
// TODO @gballet determine the trie type (verkle or overlay) by transition state
tr, err = trie.NewVerkleTrie(root, db, cache) tr, err = trie.NewVerkleTrie(root, db, cache)
} }
if err != nil { if err != nil {

View file

@ -350,7 +350,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
} }
if len(destructs) > 0 { if len(destructs) > 0 {
log.Warn("Incompatible legacy journal detected", "version", journalV0) log.Warn("Incompatible legacy journal detected", "version", journalV0)
return fmt.Errorf("incompatible legacy journal detected") return errors.New("incompatible legacy journal detected")
} }
} }
if err := r.Decode(&accounts); err != nil { if err := r.Decode(&accounts); err != nil {

View file

@ -258,7 +258,7 @@ func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common
} }
func (s *StateDB) Logs() []*types.Log { func (s *StateDB) Logs() []*types.Log {
var logs []*types.Log logs := make([]*types.Log, 0, s.logSize)
for _, lgs := range s.logs { for _, lgs := range s.logs {
logs = append(logs, lgs...) logs = append(logs, lgs...)
} }

View file

@ -18,6 +18,7 @@ package state
import ( import (
"fmt" "fmt"
"maps"
"slices" "slices"
"strings" "strings"
@ -70,19 +71,13 @@ func (t transientStorage) Copy() transientStorage {
// PrettyPrint prints the contents of the access list in a human-readable form // PrettyPrint prints the contents of the access list in a human-readable form
func (t transientStorage) PrettyPrint() string { func (t transientStorage) PrettyPrint() string {
out := new(strings.Builder) out := new(strings.Builder)
var sortedAddrs []common.Address sortedAddrs := slices.Collect(maps.Keys(t))
for addr := range t { slices.SortFunc(sortedAddrs, common.Address.Cmp)
sortedAddrs = append(sortedAddrs, addr)
slices.SortFunc(sortedAddrs, common.Address.Cmp)
}
for _, addr := range sortedAddrs { for _, addr := range sortedAddrs {
fmt.Fprintf(out, "%#x:", addr) fmt.Fprintf(out, "%#x:", addr)
var sortedKeys []common.Hash
storage := t[addr] storage := t[addr]
for key := range storage { sortedKeys := slices.Collect(maps.Keys(storage))
sortedKeys = append(sortedKeys, key)
}
slices.SortFunc(sortedKeys, common.Hash.Cmp) slices.SortFunc(sortedKeys, common.Hash.Cmp)
for _, key := range sortedKeys { for _, key := range sortedKeys {
fmt.Fprintf(out, " %X : %X\n", key, storage[key]) fmt.Fprintf(out, " %X : %X\n", key, storage[key])

View file

@ -17,7 +17,7 @@
package tracing package tracing
import ( import (
"fmt" "errors"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -39,14 +39,14 @@ type entry interface {
// WrapWithJournal wraps the given tracer with a journaling layer. // WrapWithJournal wraps the given tracer with a journaling layer.
func WrapWithJournal(hooks *Hooks) (*Hooks, error) { func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks == nil { if hooks == nil {
return nil, fmt.Errorf("wrapping nil tracer") return nil, errors.New("wrapping nil tracer")
} }
// No state change to journal, return the wrapped hooks as is // No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil { if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil {
return hooks, nil return hooks, nil
} }
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil { if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
return nil, fmt.Errorf("cannot have both OnNonceChange and OnNonceChangeV2") return nil, errors.New("cannot have both OnNonceChange and OnNonceChangeV2")
} }
// Create a new Hooks instance and copy all hooks // Create a new Hooks instance and copy all hooks

View file

@ -293,7 +293,7 @@ func newTracerAllHooks() *tracerAllHooks {
t := &tracerAllHooks{hooksCalled: make(map[string]bool)} t := &tracerAllHooks{hooksCalled: make(map[string]bool)}
// Initialize all hooks to false. We will use this to // Initialize all hooks to false. We will use this to
// get total count of hooks. // get total count of hooks.
hooksType := reflect.TypeOf((*Hooks)(nil)).Elem() hooksType := reflect.TypeFor[Hooks]()
for i := 0; i < hooksType.NumField(); i++ { for i := 0; i < hooksType.NumField(); i++ {
t.hooksCalled[hooksType.Field(i).Name] = false t.hooksCalled[hooksType.Field(i).Name] = false
} }

View file

@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
@ -1299,32 +1300,86 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
// GetBlobs returns a number of blobs and proofs for the given versioned hashes. // GetBlobs returns a number of blobs and proofs for the given versioned hashes.
// This is a utility method for the engine API, enabling consensus clients to // This is a utility method for the engine API, enabling consensus clients to
// retrieve blobs from the pools directly instead of the network. // retrieve blobs from the pools directly instead of the network.
func (p *BlobPool) GetBlobs(vhashes []common.Hash) []*types.BlobTxSidecar { func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
sidecars := make([]*types.BlobTxSidecar, len(vhashes)) var (
for idx, vhash := range vhashes { blobs = make([]*kzg4844.Blob, len(vhashes))
// Retrieve the datastore item (in a short lock) commitments = make([]kzg4844.Commitment, len(vhashes))
p.lock.RLock() proofs = make([][]kzg4844.Proof, len(vhashes))
id, exists := p.lookup.storeidOfBlob(vhash)
if !exists {
p.lock.RUnlock()
continue
}
data, err := p.store.Get(id)
p.lock.RUnlock()
// After releasing the lock, try to fill any blobs requested indices = make(map[common.Hash][]int)
if err != nil { filled = make(map[common.Hash]struct{})
log.Error("Tracked blob transaction missing from store", "id", id, "err", err) )
continue for i, h := range vhashes {
} indices[h] = append(indices[h], i)
item := new(types.Transaction)
if err = rlp.DecodeBytes(data, item); err != nil {
log.Error("Blobs corrupted for traced transaction", "id", id, "err", err)
continue
}
sidecars[idx] = item.BlobTxSidecar()
} }
return sidecars for _, vhash := range vhashes {
// Skip duplicate vhash that was already resolved in a previous iteration
if _, ok := filled[vhash]; ok {
continue
}
// Retrieve the corresponding blob tx with the vhash
p.lock.RLock()
txID, exists := p.lookup.storeidOfBlob(vhash)
p.lock.RUnlock()
if !exists {
return nil, nil, nil, fmt.Errorf("blob with vhash %x is not found", vhash)
}
data, err := p.store.Get(txID)
if err != nil {
return nil, nil, nil, err
}
// Decode the blob transaction
tx := new(types.Transaction)
if err := rlp.DecodeBytes(data, tx); err != nil {
return nil, nil, nil, err
}
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return nil, nil, nil, fmt.Errorf("blob tx without sidecar %x", tx.Hash())
}
// Traverse the blobs in the transaction
for i, hash := range tx.BlobHashes() {
list, ok := indices[hash]
if !ok {
continue // non-interesting blob
}
var pf []kzg4844.Proof
switch version {
case types.BlobSidecarVersion0:
if sidecar.Version == types.BlobSidecarVersion0 {
pf = []kzg4844.Proof{sidecar.Proofs[i]}
} else {
proof, err := kzg4844.ComputeBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i])
if err != nil {
return nil, nil, nil, err
}
pf = []kzg4844.Proof{proof}
}
case types.BlobSidecarVersion1:
if sidecar.Version == types.BlobSidecarVersion0 {
cellProofs, err := kzg4844.ComputeCellProofs(&sidecar.Blobs[i])
if err != nil {
return nil, nil, nil, err
}
pf = cellProofs
} else {
cellProofs, err := sidecar.CellProofsAt(i)
if err != nil {
return nil, nil, nil, err
}
pf = cellProofs
}
}
for _, index := range list {
blobs[index] = &sidecar.Blobs[i]
commitments[index] = sidecar.Commitments[i]
proofs[index] = pf
}
filled[hash] = struct{}{}
}
}
return blobs, commitments, proofs, nil
} }
// AvailableBlobs returns the number of blobs that are available in the subpool. // AvailableBlobs returns the number of blobs that are available in the subpool.

View file

@ -26,6 +26,7 @@ import (
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"sync" "sync"
"testing" "testing"
@ -50,6 +51,7 @@ var (
testBlobCommits []kzg4844.Commitment testBlobCommits []kzg4844.Commitment
testBlobProofs []kzg4844.Proof testBlobProofs []kzg4844.Proof
testBlobVHashes [][32]byte testBlobVHashes [][32]byte
testBlobIndices = make(map[[32]byte]int)
) )
const testMaxBlobsPerBlock = 6 const testMaxBlobsPerBlock = 6
@ -66,6 +68,7 @@ func init() {
testBlobProofs = append(testBlobProofs, testBlobProof) testBlobProofs = append(testBlobProofs, testBlobProof)
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit) testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
testBlobIndices[testBlobVHash] = len(testBlobVHashes)
testBlobVHashes = append(testBlobVHashes, testBlobVHash) testBlobVHashes = append(testBlobVHashes, testBlobVHash)
} }
} }
@ -216,7 +219,7 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
// makeMultiBlobTx is a utility method to construct a ramdom blob tx with // makeMultiBlobTx is a utility method to construct a ramdom blob tx with
// certain number of blobs in its sidecar. // certain number of blobs in its sidecar.
func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, key *ecdsa.PrivateKey) *types.Transaction { func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, blobOffset int, key *ecdsa.PrivateKey, version byte) *types.Transaction {
var ( var (
blobs []kzg4844.Blob blobs []kzg4844.Blob
blobHashes []common.Hash blobHashes []common.Hash
@ -224,10 +227,15 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
proofs []kzg4844.Proof proofs []kzg4844.Proof
) )
for i := 0; i < blobCount; i++ { for i := 0; i < blobCount; i++ {
blobs = append(blobs, *testBlobs[i]) blobs = append(blobs, *testBlobs[blobOffset+i])
commitments = append(commitments, testBlobCommits[i]) commitments = append(commitments, testBlobCommits[blobOffset+i])
proofs = append(proofs, testBlobProofs[i]) if version == types.BlobSidecarVersion0 {
blobHashes = append(blobHashes, testBlobVHashes[i]) proofs = append(proofs, testBlobProofs[blobOffset+i])
} else {
cellProofs, _ := kzg4844.ComputeCellProofs(testBlobs[blobOffset+i])
proofs = append(proofs, cellProofs...)
}
blobHashes = append(blobHashes, testBlobVHashes[blobOffset+i])
} }
blobtx := &types.BlobTx{ blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
@ -238,7 +246,7 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
BlobFeeCap: uint256.NewInt(blobFeeCap), BlobFeeCap: uint256.NewInt(blobFeeCap),
BlobHashes: blobHashes, BlobHashes: blobHashes,
Value: uint256.NewInt(100), Value: uint256.NewInt(100),
Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, blobs, commitments, proofs), Sidecar: types.NewBlobTxSidecar(version, blobs, commitments, proofs),
} }
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
} }
@ -396,35 +404,21 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
// whatever is in the pool, it can be retrieved correctly. // whatever is in the pool, it can be retrieved correctly.
func verifyBlobRetrievals(t *testing.T, pool *BlobPool) { func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
// Collect all the blobs tracked by the pool // Collect all the blobs tracked by the pool
known := make(map[common.Hash]struct{}) var (
hashes []common.Hash
known = make(map[common.Hash]struct{})
)
for _, txs := range pool.index { for _, txs := range pool.index {
for _, tx := range txs { for _, tx := range txs {
for _, vhash := range tx.vhashes { for _, vhash := range tx.vhashes {
known[vhash] = struct{}{} known[vhash] = struct{}{}
} }
hashes = append(hashes, tx.vhashes...)
} }
} }
// Attempt to retrieve all test blobs blobs, _, proofs, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
hashes := make([]common.Hash, len(testBlobVHashes)) if err != nil {
for i := range testBlobVHashes { t.Fatal(err)
copy(hashes[i][:], testBlobVHashes[i][:])
}
sidecars := pool.GetBlobs(hashes)
var blobs []*kzg4844.Blob
var proofs []*kzg4844.Proof
for idx, sidecar := range sidecars {
if sidecar == nil {
blobs = append(blobs, nil)
proofs = append(proofs, nil)
continue
}
blobHashes := sidecar.BlobHashes()
for i, hash := range blobHashes {
if hash == hashes[idx] {
blobs = append(blobs, &sidecar.Blobs[i])
proofs = append(proofs, &sidecar.Proofs[i])
}
}
} }
// Cross validate what we received vs what we wanted // Cross validate what we received vs what we wanted
if len(blobs) != len(hashes) || len(proofs) != len(hashes) { if len(blobs) != len(hashes) || len(proofs) != len(hashes) {
@ -434,13 +428,12 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
for i, hash := range hashes { for i, hash := range hashes {
// If an item is missing, but shouldn't, error // If an item is missing, but shouldn't, error
if blobs[i] == nil || proofs[i] == nil { if blobs[i] == nil || proofs[i] == nil {
if _, ok := known[hash]; ok { t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
}
continue continue
} }
// Item retrieved, make sure it matches the expectation // Item retrieved, make sure it matches the expectation
if *blobs[i] != *testBlobs[i] || *proofs[i] != testBlobProofs[i] { index := testBlobIndices[hash]
if *blobs[i] != *testBlobs[index] || proofs[i][0] != testBlobProofs[index] {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash) t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue continue
} }
@ -1071,9 +1064,9 @@ func TestChangingSlotterSize(t *testing.T) {
addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey)
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1) tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, key2) tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, key3) tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
blob1, _ = rlp.EncodeToBytes(tx1) blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2) blob2, _ = rlp.EncodeToBytes(tx2)
@ -1191,8 +1184,8 @@ func TestBlobCountLimit(t *testing.T) {
// Attempt to add transactions. // Attempt to add transactions.
var ( var (
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1) tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, key2) tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, 0, key2, types.BlobSidecarVersion0)
) )
errs := pool.Add([]*types.Transaction{tx1, tx2}, true) errs := pool.Add([]*types.Transaction{tx1, tx2}, true)
@ -1675,6 +1668,181 @@ func TestAdd(t *testing.T) {
} }
} }
func TestGetBlobs(t *testing.T) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(params.BlobTxMaxBlobs), nil)
// Create transactions from a few accounts.
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()
key3, _ = crypto.GenerateKey()
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) // [0, 6)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion1) // [6, 12)
tx3 = makeMultiBlobTx(0, 1, 800, 110, 6, 12, key3, types.BlobSidecarVersion0) // [12, 18)
blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2)
blob3, _ = rlp.EncodeToBytes(tx3)
)
// Write the two safely sized txs to store. note: although the store is
// configured for a blob count of 6, it can also support around ~1mb of call
// data - all this to say that we aren't using the the absolute largest shelf
// available.
store.Put(blob1)
store.Put(blob2)
store.Put(blob3)
store.Close()
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true, false)
// Make custom chain config where the max blob count changes based on the loop variable.
cancunTime := uint64(0)
config := &params.ChainConfig{
ChainID: big.NewInt(1),
LondonBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CancunTime: &cancunTime,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: &params.BlobConfig{
Target: 12,
Max: 24,
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
},
},
}
chain := &testBlockChain{
config: config,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain, nil)
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
// Verify the regular three txs are always available.
if got := pool.Get(tx1.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
}
if got := pool.Get(tx2.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
}
if got := pool.Get(tx3.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx3.Hash(), addr3)
}
cases := []struct {
start int
limit int
version byte
expErr bool
}{
{
start: 0, limit: 6,
version: types.BlobSidecarVersion0,
},
{
start: 0, limit: 6,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 9,
version: types.BlobSidecarVersion0,
},
{
start: 3, limit: 9,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 15,
version: types.BlobSidecarVersion0,
},
{
start: 3, limit: 15,
version: types.BlobSidecarVersion1,
},
{
start: 0, limit: 18,
version: types.BlobSidecarVersion0,
},
{
start: 0, limit: 18,
version: types.BlobSidecarVersion1,
},
{
start: 18, limit: 20,
version: types.BlobSidecarVersion0,
expErr: true,
},
}
for i, c := range cases {
var vhashes []common.Hash
for j := c.start; j < c.limit; j++ {
vhashes = append(vhashes, testBlobVHashes[j])
}
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version)
if c.expErr {
if err == nil {
t.Errorf("Unexpected return, want error for case %d", i)
}
} else {
if err != nil {
t.Errorf("Unexpected error for case %d, %v", i, err)
}
// Cross validate what we received vs what we wanted
length := c.limit - c.start
if len(blobs) != length || len(proofs) != length {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), length)
continue
}
for j := 0; j < len(blobs); j++ {
// If an item is missing, but shouldn't, error
if blobs[j] == nil || proofs[j] == nil {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
continue
}
// Item retrieved, make sure the blob matches the expectation
if *blobs[j] != *testBlobs[c.start+j] {
t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
continue
}
// Item retrieved, make sure the proof matches the expectation
if c.version == types.BlobSidecarVersion0 {
if proofs[j][0] != testBlobProofs[c.start+j] {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
} else {
want, _ := kzg4844.ComputeCellProofs(blobs[j])
if !reflect.DeepEqual(want, proofs[j]) {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
}
}
}
}
pool.Close()
}
// fakeBilly is a billy.Database implementation which just drops data on the floor. // fakeBilly is a billy.Database implementation which just drops data on the floor.
type fakeBilly struct { type fakeBilly struct {
billy.Database billy.Database

View file

@ -145,7 +145,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
} }
if tx.Type() == types.SetCodeTxType { if tx.Type() == types.SetCodeTxType {
if len(tx.SetCodeAuthorizations()) == 0 { if len(tx.SetCodeAuthorizations()) == 0 {
return fmt.Errorf("set code tx must have at least one authorization tuple") return errors.New("set code tx must have at least one authorization tuple")
} }
} }
return nil return nil

View file

@ -169,7 +169,7 @@ func (e *AccountAccess) validate() error {
// Convert code change // Convert code change
if len(e.Code) == 1 { if len(e.Code) == 1 {
if len(e.Code[0].Code) > params.MaxCodeSize { if len(e.Code[0].Code) > params.MaxCodeSize {
return fmt.Errorf("code change contained oversized code") return errors.New("code change contained oversized code")
} }
} }
return nil return nil

View file

@ -128,7 +128,7 @@ func (h *Header) Hash() common.Hash {
return rlpHash(h) return rlpHash(h)
} }
var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) var headerSize = common.StorageSize(reflect.TypeFor[Header]().Size())
// Size returns the approximate memory used by all internal contents. It is used // Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches. // to approximate and limit the memory consumption of various caches.

View file

@ -89,7 +89,7 @@ type authorizationMarshaling struct {
// SignSetCode creates a signed the SetCode authorization. // SignSetCode creates a signed the SetCode authorization.
func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAuthorization, error) { func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAuthorization, error) {
sighash := auth.sigHash() sighash := auth.SigHash()
sig, err := crypto.Sign(sighash[:], prv) sig, err := crypto.Sign(sighash[:], prv)
if err != nil { if err != nil {
return SetCodeAuthorization{}, err return SetCodeAuthorization{}, err
@ -105,7 +105,8 @@ func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAutho
}, nil }, nil
} }
func (a *SetCodeAuthorization) sigHash() common.Hash { // SigHash returns the hash of SetCodeAuthorization for signing.
func (a *SetCodeAuthorization) SigHash() common.Hash {
return prefixedRlpHash(0x05, []any{ return prefixedRlpHash(0x05, []any{
a.ChainID, a.ChainID,
a.Address, a.Address,
@ -115,7 +116,7 @@ func (a *SetCodeAuthorization) sigHash() common.Hash {
// Authority recovers the the authorizing account of an authorization. // Authority recovers the the authorizing account of an authorization.
func (a *SetCodeAuthorization) Authority() (common.Address, error) { func (a *SetCodeAuthorization) Authority() (common.Address, error) {
sighash := a.sigHash() sighash := a.SigHash()
if !crypto.ValidateSignatureValues(a.V, a.R.ToBig(), a.S.ToBig(), true) { if !crypto.ValidateSignatureValues(a.V, a.R.ToBig(), a.S.ToBig(), true) {
return common.Address{}, ErrInvalidSig return common.Address{}, ErrInvalidSig
} }

View file

@ -49,7 +49,7 @@ type Withdrawals []*Withdrawal
// Len returns the length of s. // Len returns the length of s.
func (s Withdrawals) Len() int { return len(s) } func (s Withdrawals) Len() int { return len(s) }
var withdrawalSize = int(reflect.TypeOf(Withdrawal{}).Size()) var withdrawalSize = int(reflect.TypeFor[Withdrawal]().Size())
func (s Withdrawals) Size() int { func (s Withdrawals) Size() int {
return withdrawalSize * len(s) return withdrawalSize * len(s)

View file

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"fmt"
"math/big" "math/big"
"slices" "slices"
"testing" "testing"
@ -202,12 +203,15 @@ func TestProcessVerkle(t *testing.T) {
t.Log("verified verkle proof, inserting blocks into the chain") t.Log("verified verkle proof, inserting blocks into the chain")
for i, b := range chain {
fmt.Printf("%d %x\n", i, b.Root())
}
endnum, err := blockchain.InsertChain(chain) endnum, err := blockchain.InsertChain(chain)
if err != nil { if err != nil {
t.Fatalf("block %d imported with error: %v", endnum, err) t.Fatalf("block %d imported with error: %v", endnum, err)
} }
for i := 0; i < 2; i++ { for i := range 2 {
b := blockchain.GetBlockByNumber(uint64(i) + 1) b := blockchain.GetBlockByNumber(uint64(i) + 1)
if b == nil { if b == nil {
t.Fatalf("expected block %d to be present in chain", i+1) t.Fatalf("expected block %d to be present in chain", i+1)

View file

@ -25,16 +25,16 @@ const (
set7BitsMask = uint16(0b111_1111) set7BitsMask = uint16(0b111_1111)
) )
// bitvec is a bit vector which maps bytes in a program. // BitVec is a bit vector which maps bytes in a program.
// An unset bit means the byte is an opcode, a set bit means // An unset bit means the byte is an opcode, a set bit means
// it's data (i.e. argument of PUSHxx). // it's data (i.e. argument of PUSHxx).
type bitvec []byte type BitVec []byte
func (bits bitvec) set1(pos uint64) { func (bits BitVec) set1(pos uint64) {
bits[pos/8] |= 1 << (pos % 8) bits[pos/8] |= 1 << (pos % 8)
} }
func (bits bitvec) setN(flag uint16, pos uint64) { func (bits BitVec) setN(flag uint16, pos uint64) {
a := flag << (pos % 8) a := flag << (pos % 8)
bits[pos/8] |= byte(a) bits[pos/8] |= byte(a)
if b := byte(a >> 8); b != 0 { if b := byte(a >> 8); b != 0 {
@ -42,13 +42,13 @@ func (bits bitvec) setN(flag uint16, pos uint64) {
} }
} }
func (bits bitvec) set8(pos uint64) { func (bits BitVec) set8(pos uint64) {
a := byte(0xFF << (pos % 8)) a := byte(0xFF << (pos % 8))
bits[pos/8] |= a bits[pos/8] |= a
bits[pos/8+1] = ^a bits[pos/8+1] = ^a
} }
func (bits bitvec) set16(pos uint64) { func (bits BitVec) set16(pos uint64) {
a := byte(0xFF << (pos % 8)) a := byte(0xFF << (pos % 8))
bits[pos/8] |= a bits[pos/8] |= a
bits[pos/8+1] = 0xFF bits[pos/8+1] = 0xFF
@ -56,23 +56,23 @@ func (bits bitvec) set16(pos uint64) {
} }
// codeSegment checks if the position is in a code segment. // codeSegment checks if the position is in a code segment.
func (bits *bitvec) codeSegment(pos uint64) bool { func (bits *BitVec) codeSegment(pos uint64) bool {
return (((*bits)[pos/8] >> (pos % 8)) & 1) == 0 return (((*bits)[pos/8] >> (pos % 8)) & 1) == 0
} }
// codeBitmap collects data locations in code. // codeBitmap collects data locations in code.
func codeBitmap(code []byte) bitvec { func codeBitmap(code []byte) BitVec {
// The bitmap is 4 bytes longer than necessary, in case the code // The bitmap is 4 bytes longer than necessary, in case the code
// ends with a PUSH32, the algorithm will set bits on the // ends with a PUSH32, the algorithm will set bits on the
// bitvector outside the bounds of the actual code. // bitvector outside the bounds of the actual code.
bits := make(bitvec, len(code)/8+1+4) bits := make(BitVec, len(code)/8+1+4)
return codeBitmapInternal(code, bits) return codeBitmapInternal(code, bits)
} }
// codeBitmapInternal is the internal implementation of codeBitmap. // codeBitmapInternal is the internal implementation of codeBitmap.
// It exists for the purpose of being able to run benchmark tests // It exists for the purpose of being able to run benchmark tests
// without dynamic allocations affecting the results. // without dynamic allocations affecting the results.
func codeBitmapInternal(code, bits bitvec) bitvec { func codeBitmapInternal(code, bits BitVec) BitVec {
for pc := uint64(0); pc < uint64(len(code)); { for pc := uint64(0); pc < uint64(len(code)); {
op := OpCode(code[pc]) op := OpCode(code[pc])
pc++ pc++

View file

@ -90,7 +90,7 @@ func BenchmarkJumpdestOpAnalysis(bench *testing.B) {
for i := range code { for i := range code {
code[i] = byte(op) code[i] = byte(op)
} }
bits := make(bitvec, len(code)/8+1+4) bits := make(BitVec, len(code)/8+1+4)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
clear(bits) clear(bits)

View file

@ -31,8 +31,8 @@ type Contract struct {
caller common.Address caller common.Address
address common.Address address common.Address
jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis. jumpDests JumpDestCache // Aggregated result of JUMPDEST analysis.
analysis bitvec // Locally cached result of JUMPDEST analysis analysis BitVec // Locally cached result of JUMPDEST analysis
Code []byte Code []byte
CodeHash common.Hash CodeHash common.Hash
@ -47,15 +47,15 @@ type Contract struct {
} }
// NewContract returns a new contract environment for the execution of EVM. // NewContract returns a new contract environment for the execution of EVM.
func NewContract(caller common.Address, address common.Address, value *uint256.Int, gas uint64, jumpDests map[common.Hash]bitvec) *Contract { func NewContract(caller common.Address, address common.Address, value *uint256.Int, gas uint64, jumpDests JumpDestCache) *Contract {
// Initialize the jump analysis map if it's nil, mostly for tests // Initialize the jump analysis cache if it's nil, mostly for tests
if jumpDests == nil { if jumpDests == nil {
jumpDests = make(map[common.Hash]bitvec) jumpDests = newMapJumpDests()
} }
return &Contract{ return &Contract{
caller: caller, caller: caller,
address: address, address: address,
jumpdests: jumpDests, jumpDests: jumpDests,
Gas: gas, Gas: gas,
value: value, value: value,
} }
@ -87,12 +87,12 @@ func (c *Contract) isCode(udest uint64) bool {
// contracts ( not temporary initcode), we store the analysis in a map // contracts ( not temporary initcode), we store the analysis in a map
if c.CodeHash != (common.Hash{}) { if c.CodeHash != (common.Hash{}) {
// Does parent context have the analysis? // Does parent context have the analysis?
analysis, exist := c.jumpdests[c.CodeHash] analysis, exist := c.jumpDests.Load(c.CodeHash)
if !exist { if !exist {
// Do the analysis and save in parent context // Do the analysis and save in parent context
// We do not need to store it in c.analysis // We do not need to store it in c.analysis
analysis = codeBitmap(c.Code) analysis = codeBitmap(c.Code)
c.jumpdests[c.CodeHash] = analysis c.jumpDests.Store(c.CodeHash, analysis)
} }
// Also stash it in current contract for faster access // Also stash it in current contract for faster access
c.analysis = analysis c.analysis = analysis

View file

@ -515,7 +515,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
} }
// enforce size cap for inputs // enforce size cap for inputs
if c.eip7823 && max(baseLen, expLen, modLen) > 1024 { if c.eip7823 && max(baseLen, expLen, modLen) > 1024 {
return nil, fmt.Errorf("one or more of base/exponent/modulus length exceeded 1024 bytes") return nil, errors.New("one or more of base/exponent/modulus length exceeded 1024 bytes")
} }
// Retrieve the operands and execute the exponentiation // Retrieve the operands and execute the exponentiation
var ( var (

View file

@ -89,8 +89,8 @@ func enable1884(jt *JumpTable) {
} }
} }
func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSelfBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) balance := evm.StateDB.GetBalance(scope.Contract.Address())
scope.Stack.push(balance) scope.Stack.push(balance)
return nil, nil return nil, nil
} }
@ -108,8 +108,8 @@ func enable1344(jt *JumpTable) {
} }
// opChainID implements CHAINID opcode // opChainID implements CHAINID opcode
func opChainID(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opChainID(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
chainId, _ := uint256.FromBig(interpreter.evm.chainConfig.ChainID) chainId, _ := uint256.FromBig(evm.chainConfig.ChainID)
scope.Stack.push(chainId) scope.Stack.push(chainId)
return nil, nil return nil, nil
} }
@ -199,28 +199,28 @@ func enable1153(jt *JumpTable) {
} }
// opTload implements TLOAD opcode // opTload implements TLOAD opcode
func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opTload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek() loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32()) hash := common.Hash(loc.Bytes32())
val := interpreter.evm.StateDB.GetTransientState(scope.Contract.Address(), hash) val := evm.StateDB.GetTransientState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes()) loc.SetBytes(val.Bytes())
return nil, nil return nil, nil
} }
// opTstore implements TSTORE opcode // opTstore implements TSTORE opcode
func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opTstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
loc := scope.Stack.pop() loc := scope.Stack.pop()
val := scope.Stack.pop() val := scope.Stack.pop()
interpreter.evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil return nil, nil
} }
// opBaseFee implements BASEFEE opcode // opBaseFee implements BASEFEE opcode
func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee) baseFee, _ := uint256.FromBig(evm.Context.BaseFee)
scope.Stack.push(baseFee) scope.Stack.push(baseFee)
return nil, nil return nil, nil
} }
@ -237,7 +237,7 @@ func enable3855(jt *JumpTable) {
} }
// opPush0 implements the PUSH0 opcode // opPush0 implements the PUSH0 opcode
func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPush0(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int)) scope.Stack.push(new(uint256.Int))
return nil, nil return nil, nil
} }
@ -263,7 +263,7 @@ func enable5656(jt *JumpTable) {
} }
// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656) // opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656)
func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMcopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
dst = scope.Stack.pop() dst = scope.Stack.pop()
src = scope.Stack.pop() src = scope.Stack.pop()
@ -276,10 +276,10 @@ func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
} }
// opBlobHash implements the BLOBHASH opcode // opBlobHash implements the BLOBHASH opcode
func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opBlobHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
index := scope.Stack.peek() index := scope.Stack.peek()
if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) { if index.LtUint64(uint64(len(evm.TxContext.BlobHashes))) {
blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()] blobHash := evm.TxContext.BlobHashes[index.Uint64()]
index.SetBytes32(blobHash[:]) index.SetBytes32(blobHash[:])
} else { } else {
index.Clear() index.Clear()
@ -288,14 +288,14 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
} }
// opBlobBaseFee implements BLOBBASEFEE opcode // opBlobBaseFee implements BLOBBASEFEE opcode
func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opBlobBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee) blobBaseFee, _ := uint256.FromBig(evm.Context.BlobBaseFee)
scope.Stack.push(blobBaseFee) scope.Stack.push(blobBaseFee)
return nil, nil return nil, nil
} }
// opCLZ implements the CLZ opcode (count leading zero bytes) // opCLZ implements the CLZ opcode (count leading zero bytes)
func opCLZ(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCLZ(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek() x := scope.Stack.peek()
x.SetUint64(256 - uint64(x.BitLen())) x.SetUint64(256 - uint64(x.BitLen()))
return nil, nil return nil, nil
@ -342,7 +342,7 @@ func enable6780(jt *JumpTable) {
} }
} }
func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opExtCodeCopyEIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
stack = scope.Stack stack = scope.Stack
a = stack.pop() a = stack.pop()
@ -355,10 +355,10 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
uint64CodeOffset = math.MaxUint64 uint64CodeOffset = math.MaxUint64
} }
addr := common.Address(a.Bytes20()) addr := common.Address(a.Bytes20())
code := interpreter.evm.StateDB.GetCode(addr) code := evm.StateDB.GetCode(addr)
paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64())
consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas) consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas)
scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted { if consumed < wanted {
return nil, ErrOutOfGas return nil, ErrOutOfGas
} }
@ -370,7 +370,7 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
// opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which // opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which
// need not worry about the adjusted bound logic when adding the PUSHDATA to // need not worry about the adjusted bound logic when adding the PUSHDATA to
// the list of access events. // the list of access events.
func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPush1EIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
codeLen = uint64(len(scope.Contract.Code)) codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int) integer = new(uint256.Int)
@ -383,8 +383,8 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
// touch next chunk if PUSH1 is at the boundary. if so, *pc has // touch next chunk if PUSH1 is at the boundary. if so, *pc has
// advanced past this boundary. // advanced past this boundary.
contractAddr := scope.Contract.Address() contractAddr := scope.Contract.Address()
consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas) consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
scope.Contract.UseGas(wanted, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) scope.Contract.UseGas(wanted, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted { if consumed < wanted {
return nil, ErrOutOfGas return nil, ErrOutOfGas
} }
@ -396,7 +396,7 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
} }
func makePushEIP4762(size uint64, pushByteSize int) executionFunc { func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
codeLen = len(scope.Contract.Code) codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1)) start = min(codeLen, int(*pc+1))
@ -411,8 +411,8 @@ func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
if !scope.Contract.IsDeployment && !scope.Contract.IsSystemCall { if !scope.Contract.IsDeployment && !scope.Contract.IsSystemCall {
contractAddr := scope.Contract.Address() contractAddr := scope.Contract.Address()
consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas) consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted { if consumed < wanted {
return nil, ErrOutOfGas return nil, ErrOutOfGas
} }

View file

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -95,6 +96,9 @@ type EVM struct {
// StateDB gives access to the underlying state // StateDB gives access to the underlying state
StateDB StateDB StateDB StateDB
// table holds the opcode specific handlers
table *JumpTable
// depth is the current call stack // depth is the current call stack
depth int depth int
@ -107,10 +111,6 @@ type EVM struct {
// virtual machine configuration options used to initialise the evm // virtual machine configuration options used to initialise the evm
Config Config Config Config
// global (to this context) ethereum virtual machine used throughout
// the execution of the tx
interpreter *EVMInterpreter
// abort is used to abort the EVM calling operations // abort is used to abort the EVM calling operations
abort atomic.Bool abort atomic.Bool
@ -122,9 +122,14 @@ type EVM struct {
// precompiles holds the precompiled contracts for the current epoch // precompiles holds the precompiled contracts for the current epoch
precompiles map[common.Address]PrecompiledContract precompiles map[common.Address]PrecompiledContract
// jumpDests is the aggregated result of JUMPDEST analysis made through // jumpDests stores results of JUMPDEST analysis.
// the life cycle of EVM. jumpDests JumpDestCache
jumpDests map[common.Hash]bitvec
hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse
} }
// NewEVM constructs an EVM instance with the supplied block context, state // NewEVM constructs an EVM instance with the supplied block context, state
@ -138,10 +143,58 @@ func NewEVM(blockCtx BlockContext, statedb StateDB, chainConfig *params.ChainCon
Config: config, Config: config,
chainConfig: chainConfig, chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
jumpDests: make(map[common.Hash]bitvec), jumpDests: newMapJumpDests(),
hasher: crypto.NewKeccakState(),
} }
evm.precompiles = activePrecompiledContracts(evm.chainRules) evm.precompiles = activePrecompiledContracts(evm.chainRules)
evm.interpreter = NewEVMInterpreter(evm)
switch {
case evm.chainRules.IsOsaka:
evm.table = &osakaInstructionSet
case evm.chainRules.IsVerkle:
// TODO replace with proper instruction set when fork is specified
evm.table = &verkleInstructionSet
case evm.chainRules.IsPrague:
evm.table = &pragueInstructionSet
case evm.chainRules.IsCancun:
evm.table = &cancunInstructionSet
case evm.chainRules.IsShanghai:
evm.table = &shanghaiInstructionSet
case evm.chainRules.IsMerge:
evm.table = &mergeInstructionSet
case evm.chainRules.IsLondon:
evm.table = &londonInstructionSet
case evm.chainRules.IsBerlin:
evm.table = &berlinInstructionSet
case evm.chainRules.IsIstanbul:
evm.table = &istanbulInstructionSet
case evm.chainRules.IsConstantinople:
evm.table = &constantinopleInstructionSet
case evm.chainRules.IsByzantium:
evm.table = &byzantiumInstructionSet
case evm.chainRules.IsEIP158:
evm.table = &spuriousDragonInstructionSet
case evm.chainRules.IsEIP150:
evm.table = &tangerineWhistleInstructionSet
case evm.chainRules.IsHomestead:
evm.table = &homesteadInstructionSet
default:
evm.table = &frontierInstructionSet
}
var extraEips []int
if len(evm.Config.ExtraEips) > 0 {
// Deep-copy jumptable to prevent modification of opcodes in other tables
evm.table = copyJumpTable(evm.table)
}
for _, eip := range evm.Config.ExtraEips {
if err := EnableEIP(eip, evm.table); err != nil {
// Disable it, so caller can check if it's activated or not
log.Error("EIP activation failed", "eip", eip, "error", err)
} else {
extraEips = append(extraEips, eip)
}
}
evm.Config.ExtraEips = extraEips
return evm return evm
} }
@ -152,6 +205,11 @@ func (evm *EVM) SetPrecompiles(precompiles PrecompiledContracts) {
evm.precompiles = precompiles evm.precompiles = precompiles
} }
// SetJumpDestCache configures the analysis cache.
func (evm *EVM) SetJumpDestCache(jumpDests JumpDestCache) {
evm.jumpDests = jumpDests
}
// SetTxContext resets the EVM with a new transaction context. // SetTxContext resets the EVM with a new transaction context.
// This is not threadsafe and should only be done very cautiously. // This is not threadsafe and should only be done very cautiously.
func (evm *EVM) SetTxContext(txCtx TxContext) { func (evm *EVM) SetTxContext(txCtx TxContext) {
@ -172,11 +230,6 @@ func (evm *EVM) Cancelled() bool {
return evm.abort.Load() return evm.abort.Load()
} }
// Interpreter returns the current interpreter
func (evm *EVM) Interpreter() *EVMInterpreter {
return evm.interpreter
}
func isSystemCall(caller common.Address) bool { func isSystemCall(caller common.Address) bool {
return caller == params.SystemAddress return caller == params.SystemAddress
} }
@ -241,7 +294,7 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
contract := NewContract(caller, addr, value, gas, evm.jumpDests) contract := NewContract(caller, addr, value, gas, evm.jumpDests)
contract.IsSystemCall = isSystemCall(caller) contract.IsSystemCall = isSystemCall(caller)
contract.SetCallCode(evm.resolveCodeHash(addr), code) contract.SetCallCode(evm.resolveCodeHash(addr), code)
ret, err = evm.interpreter.Run(contract, input, false) ret, err = evm.Run(contract, input, false)
gas = contract.Gas gas = contract.Gas
} }
} }
@ -300,7 +353,7 @@ func (evm *EVM) CallCode(caller common.Address, addr common.Address, input []byt
// The contract is a scoped environment for this execution context only. // The contract is a scoped environment for this execution context only.
contract := NewContract(caller, caller, value, gas, evm.jumpDests) contract := NewContract(caller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr)) contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
ret, err = evm.interpreter.Run(contract, input, false) ret, err = evm.Run(contract, input, false)
gas = contract.Gas gas = contract.Gas
} }
if err != nil { if err != nil {
@ -344,7 +397,7 @@ func (evm *EVM) DelegateCall(originCaller common.Address, caller common.Address,
// Note: The value refers to the original value from the parent call. // Note: The value refers to the original value from the parent call.
contract := NewContract(originCaller, caller, value, gas, evm.jumpDests) contract := NewContract(originCaller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr)) contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
ret, err = evm.interpreter.Run(contract, input, false) ret, err = evm.Run(contract, input, false)
gas = contract.Gas gas = contract.Gas
} }
if err != nil { if err != nil {
@ -399,7 +452,7 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// When an error was returned by the EVM or when setting the creation code // When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally // above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors. // when we're in Homestead this also counts for code storage gas errors.
ret, err = evm.interpreter.Run(contract, input, true) ret, err = evm.Run(contract, input, true)
gas = contract.Gas gas = contract.Gas
} }
if err != nil { if err != nil {
@ -520,7 +573,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// initNewContract runs a new contract's creation code, performs checks on the // initNewContract runs a new contract's creation code, performs checks on the
// resulting code that is to be deployed, and consumes necessary gas. // resulting code that is to be deployed, and consumes necessary gas.
func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]byte, error) { func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]byte, error) {
ret, err := evm.interpreter.Run(contract, nil, false) ret, err := evm.Run(contract, nil, false)
if err != nil { if err != nil {
return ret, err return ret, err
} }
@ -563,7 +616,7 @@ func (evm *EVM) Create(caller common.Address, code []byte, gas uint64, value *ui
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:] // The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. // instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller common.Address, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { func (evm *EVM) Create2(caller common.Address, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
inithash := crypto.HashData(evm.interpreter.hasher, code) inithash := crypto.HashData(evm.hasher, code)
contractAddr = crypto.CreateAddress2(caller, salt.Bytes32(), inithash[:]) contractAddr = crypto.CreateAddress2(caller, salt.Bytes32(), inithash[:])
return evm.create(caller, code, gas, endowment, contractAddr, CREATE2) return evm.create(caller, code, gas, endowment, contractAddr, CREATE2)
} }

View file

@ -26,67 +26,67 @@ import (
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opAdd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Add(&x, y) y.Add(&x, y)
return nil, nil return nil, nil
} }
func opSub(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSub(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Sub(&x, y) y.Sub(&x, y)
return nil, nil return nil, nil
} }
func opMul(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMul(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mul(&x, y) y.Mul(&x, y)
return nil, nil return nil, nil
} }
func opDiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opDiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Div(&x, y) y.Div(&x, y)
return nil, nil return nil, nil
} }
func opSdiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSdiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.SDiv(&x, y) y.SDiv(&x, y)
return nil, nil return nil, nil
} }
func opMod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mod(&x, y) y.Mod(&x, y)
return nil, nil return nil, nil
} }
func opSmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.SMod(&x, y) y.SMod(&x, y)
return nil, nil return nil, nil
} }
func opExp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opExp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
base, exponent := scope.Stack.pop(), scope.Stack.peek() base, exponent := scope.Stack.pop(), scope.Stack.peek()
exponent.Exp(&base, exponent) exponent.Exp(&base, exponent)
return nil, nil return nil, nil
} }
func opSignExtend(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSignExtend(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
back, num := scope.Stack.pop(), scope.Stack.peek() back, num := scope.Stack.pop(), scope.Stack.peek()
num.ExtendSign(num, &back) num.ExtendSign(num, &back)
return nil, nil return nil, nil
} }
func opNot(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opNot(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek() x := scope.Stack.peek()
x.Not(x) x.Not(x)
return nil, nil return nil, nil
} }
func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opLt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Lt(y) { if x.Lt(y) {
y.SetOne() y.SetOne()
@ -96,7 +96,7 @@ func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil return nil, nil
} }
func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opGt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Gt(y) { if x.Gt(y) {
y.SetOne() y.SetOne()
@ -106,7 +106,7 @@ func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil return nil, nil
} }
func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSlt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Slt(y) { if x.Slt(y) {
y.SetOne() y.SetOne()
@ -116,7 +116,7 @@ func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil return nil, nil
} }
func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSgt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Sgt(y) { if x.Sgt(y) {
y.SetOne() y.SetOne()
@ -126,7 +126,7 @@ func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil return nil, nil
} }
func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opEq(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Eq(y) { if x.Eq(y) {
y.SetOne() y.SetOne()
@ -136,7 +136,7 @@ func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil return nil, nil
} }
func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opIszero(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek() x := scope.Stack.peek()
if x.IsZero() { if x.IsZero() {
x.SetOne() x.SetOne()
@ -146,37 +146,37 @@ func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
return nil, nil return nil, nil
} }
func opAnd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opAnd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.And(&x, y) y.And(&x, y)
return nil, nil return nil, nil
} }
func opOr(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opOr(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Or(&x, y) y.Or(&x, y)
return nil, nil return nil, nil
} }
func opXor(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opXor(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek() x, y := scope.Stack.pop(), scope.Stack.peek()
y.Xor(&x, y) y.Xor(&x, y)
return nil, nil return nil, nil
} }
func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opByte(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
th, val := scope.Stack.pop(), scope.Stack.peek() th, val := scope.Stack.pop(), scope.Stack.peek()
val.Byte(&th) val.Byte(&th)
return nil, nil return nil, nil
} }
func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opAddmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.AddMod(&x, &y, z) z.AddMod(&x, &y, z)
return nil, nil return nil, nil
} }
func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMulmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.MulMod(&x, &y, z) z.MulMod(&x, &y, z)
return nil, nil return nil, nil
@ -185,7 +185,7 @@ func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
// opSHL implements Shift Left // opSHL implements Shift Left
// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2, // The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the left by arg1 number of bits. // and pushes on the stack arg2 shifted to the left by arg1 number of bits.
func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSHL(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek() shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) { if shift.LtUint64(256) {
@ -199,7 +199,7 @@ func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSHR implements Logical Shift Right // opSHR implements Logical Shift Right
// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2, // The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill. // and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSHR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek() shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) { if shift.LtUint64(256) {
@ -213,7 +213,7 @@ func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSAR implements Arithmetic Shift Right // opSAR implements Arithmetic Shift Right
// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2, // The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension. // and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSAR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
shift, value := scope.Stack.pop(), scope.Stack.peek() shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.GtUint64(256) { if shift.GtUint64(256) {
if value.Sign() >= 0 { if value.Sign() >= 0 {
@ -229,50 +229,49 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil return nil, nil
} }
func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opKeccak256(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.peek() offset, size := scope.Stack.pop(), scope.Stack.peek()
data := scope.Memory.GetPtr(offset.Uint64(), size.Uint64()) data := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
interpreter.hasher.Reset() evm.hasher.Reset()
interpreter.hasher.Write(data) evm.hasher.Write(data)
interpreter.hasher.Read(interpreter.hasherBuf[:]) evm.hasher.Read(evm.hasherBuf[:])
evm := interpreter.evm
if evm.Config.EnablePreimageRecording { if evm.Config.EnablePreimageRecording {
evm.StateDB.AddPreimage(interpreter.hasherBuf, data) evm.StateDB.AddPreimage(evm.hasherBuf, data)
} }
size.SetBytes(interpreter.hasherBuf[:]) size.SetBytes(evm.hasherBuf[:])
return nil, nil return nil, nil
} }
func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opAddress(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes()))
return nil, nil return nil, nil
} }
func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek() slot := scope.Stack.peek()
address := common.Address(slot.Bytes20()) address := common.Address(slot.Bytes20())
slot.Set(interpreter.evm.StateDB.GetBalance(address)) slot.Set(evm.StateDB.GetBalance(address))
return nil, nil return nil, nil
} }
func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opOrigin(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(evm.Origin.Bytes()))
return nil, nil return nil, nil
} }
func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCaller(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes()))
return nil, nil return nil, nil
} }
func opCallValue(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCallValue(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(scope.Contract.value) scope.Stack.push(scope.Contract.value)
return nil, nil return nil, nil
} }
func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCallDataLoad(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek() x := scope.Stack.peek()
if offset, overflow := x.Uint64WithOverflow(); !overflow { if offset, overflow := x.Uint64WithOverflow(); !overflow {
data := getData(scope.Contract.Input, offset, 32) data := getData(scope.Contract.Input, offset, 32)
@ -283,12 +282,12 @@ func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil return nil, nil
} }
func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCallDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input)))) scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input))))
return nil, nil return nil, nil
} }
func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCallDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
memOffset = scope.Stack.pop() memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop() dataOffset = scope.Stack.pop()
@ -306,12 +305,12 @@ func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil return nil, nil
} }
func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opReturnDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData)))) scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(evm.returnData))))
return nil, nil return nil, nil
} }
func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opReturnDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
memOffset = scope.Stack.pop() memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop() dataOffset = scope.Stack.pop()
@ -326,25 +325,25 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
var end = dataOffset var end = dataOffset
end.Add(&dataOffset, &length) end.Add(&dataOffset, &length)
end64, overflow := end.Uint64WithOverflow() end64, overflow := end.Uint64WithOverflow()
if overflow || uint64(len(interpreter.returnData)) < end64 { if overflow || uint64(len(evm.returnData)) < end64 {
return nil, ErrReturnDataOutOfBounds return nil, ErrReturnDataOutOfBounds
} }
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) scope.Memory.Set(memOffset.Uint64(), length.Uint64(), evm.returnData[offset64:end64])
return nil, nil return nil, nil
} }
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opExtCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek() slot := scope.Stack.peek()
slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) slot.SetUint64(uint64(evm.StateDB.GetCodeSize(slot.Bytes20())))
return nil, nil return nil, nil
} }
func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code)))) scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code))))
return nil, nil return nil, nil
} }
func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
memOffset = scope.Stack.pop() memOffset = scope.Stack.pop()
codeOffset = scope.Stack.pop() codeOffset = scope.Stack.pop()
@ -360,7 +359,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil return nil, nil
} }
func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opExtCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
stack = scope.Stack stack = scope.Stack
a = stack.pop() a = stack.pop()
@ -373,7 +372,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
uint64CodeOffset = math.MaxUint64 uint64CodeOffset = math.MaxUint64
} }
addr := common.Address(a.Bytes20()) addr := common.Address(a.Bytes20())
code := interpreter.evm.StateDB.GetCode(addr) code := evm.StateDB.GetCode(addr)
codeCopy := getData(code, uint64CodeOffset, length.Uint64()) codeCopy := getData(code, uint64CodeOffset, length.Uint64())
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
@ -406,24 +405,24 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// //
// 6. Caller tries to get the code hash for an account which is marked as deleted, this // 6. Caller tries to get the code hash for an account which is marked as deleted, this
// account should be regarded as a non-existent account and zero should be returned. // account should be regarded as a non-existent account and zero should be returned.
func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opExtCodeHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek() slot := scope.Stack.peek()
address := common.Address(slot.Bytes20()) address := common.Address(slot.Bytes20())
if interpreter.evm.StateDB.Empty(address) { if evm.StateDB.Empty(address) {
slot.Clear() slot.Clear()
} else { } else {
slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(address).Bytes()) slot.SetBytes(evm.StateDB.GetCodeHash(address).Bytes())
} }
return nil, nil return nil, nil
} }
func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opGasprice(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v, _ := uint256.FromBig(interpreter.evm.GasPrice) v, _ := uint256.FromBig(evm.GasPrice)
scope.Stack.push(v) scope.Stack.push(v)
return nil, nil return nil, nil
} }
func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opBlockhash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
num := scope.Stack.peek() num := scope.Stack.peek()
num64, overflow := num.Uint64WithOverflow() num64, overflow := num.Uint64WithOverflow()
if overflow { if overflow {
@ -432,18 +431,18 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
} }
var upper, lower uint64 var upper, lower uint64
upper = interpreter.evm.Context.BlockNumber.Uint64() upper = evm.Context.BlockNumber.Uint64()
if upper < 257 { if upper < 257 {
lower = 0 lower = 0
} else { } else {
lower = upper - 256 lower = upper - 256
} }
if num64 >= lower && num64 < upper { if num64 >= lower && num64 < upper {
res := interpreter.evm.Context.GetHash(num64) res := evm.Context.GetHash(num64)
if witness := interpreter.evm.StateDB.Witness(); witness != nil { if witness := evm.StateDB.Witness(); witness != nil {
witness.AddBlockHash(num64) witness.AddBlockHash(num64)
} }
if tracer := interpreter.evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil { if tracer := evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
tracer.OnBlockHashRead(num64, res) tracer.OnBlockHashRead(num64, res)
} }
num.SetBytes(res[:]) num.SetBytes(res[:])
@ -453,83 +452,83 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
return nil, nil return nil, nil
} }
func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCoinbase(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(evm.Context.Coinbase.Bytes()))
return nil, nil return nil, nil
} }
func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opTimestamp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time)) scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.Time))
return nil, nil return nil, nil
} }
func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opNumber(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber) v, _ := uint256.FromBig(evm.Context.BlockNumber)
scope.Stack.push(v) scope.Stack.push(v)
return nil, nil return nil, nil
} }
func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opDifficulty(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty) v, _ := uint256.FromBig(evm.Context.Difficulty)
scope.Stack.push(v) scope.Stack.push(v)
return nil, nil return nil, nil
} }
func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opRandom(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v := new(uint256.Int).SetBytes(interpreter.evm.Context.Random.Bytes()) v := new(uint256.Int).SetBytes(evm.Context.Random.Bytes())
scope.Stack.push(v) scope.Stack.push(v)
return nil, nil return nil, nil
} }
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opGasLimit(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.GasLimit))
return nil, nil return nil, nil
} }
func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.pop() scope.Stack.pop()
return nil, nil return nil, nil
} }
func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v := scope.Stack.peek() v := scope.Stack.peek()
offset := v.Uint64() offset := v.Uint64()
v.SetBytes(scope.Memory.GetPtr(offset, 32)) v.SetBytes(scope.Memory.GetPtr(offset, 32))
return nil, nil return nil, nil
} }
func opMstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
mStart, val := scope.Stack.pop(), scope.Stack.pop() mStart, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.Set32(mStart.Uint64(), &val) scope.Memory.Set32(mStart.Uint64(), &val)
return nil, nil return nil, nil
} }
func opMstore8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMstore8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
off, val := scope.Stack.pop(), scope.Stack.pop() off, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.store[off.Uint64()] = byte(val.Uint64()) scope.Memory.store[off.Uint64()] = byte(val.Uint64())
return nil, nil return nil, nil
} }
func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek() loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32()) hash := common.Hash(loc.Bytes32())
val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash) val := evm.StateDB.GetState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes()) loc.SetBytes(val.Bytes())
return nil, nil return nil, nil
} }
func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
loc := scope.Stack.pop() loc := scope.Stack.pop()
val := scope.Stack.pop() val := scope.Stack.pop()
interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil return nil, nil
} }
func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opJump(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.evm.abort.Load() { if evm.abort.Load() {
return nil, errStopToken return nil, errStopToken
} }
pos := scope.Stack.pop() pos := scope.Stack.pop()
@ -540,8 +539,8 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
return nil, nil return nil, nil
} }
func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opJumpi(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.evm.abort.Load() { if evm.abort.Load() {
return nil, errStopToken return nil, errStopToken
} }
pos, cond := scope.Stack.pop(), scope.Stack.pop() pos, cond := scope.Stack.pop(), scope.Stack.pop()
@ -554,107 +553,107 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
return nil, nil return nil, nil
} }
func opJumpdest(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opJumpdest(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, nil return nil, nil
} }
func opPc(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPc(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(*pc)) scope.Stack.push(new(uint256.Int).SetUint64(*pc))
return nil, nil return nil, nil
} }
func opMsize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opMsize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len()))) scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len())))
return nil, nil return nil, nil
} }
func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opGas(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas)) scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas))
return nil, nil return nil, nil
} }
func opSwap1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap1() scope.Stack.swap1()
return nil, nil return nil, nil
} }
func opSwap2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap2() scope.Stack.swap2()
return nil, nil return nil, nil
} }
func opSwap3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap3(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap3() scope.Stack.swap3()
return nil, nil return nil, nil
} }
func opSwap4(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap4(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap4() scope.Stack.swap4()
return nil, nil return nil, nil
} }
func opSwap5(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap5(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap5() scope.Stack.swap5()
return nil, nil return nil, nil
} }
func opSwap6(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap6(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap6() scope.Stack.swap6()
return nil, nil return nil, nil
} }
func opSwap7(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap7(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap7() scope.Stack.swap7()
return nil, nil return nil, nil
} }
func opSwap8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap8() scope.Stack.swap8()
return nil, nil return nil, nil
} }
func opSwap9(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap9(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap9() scope.Stack.swap9()
return nil, nil return nil, nil
} }
func opSwap10(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap10(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap10() scope.Stack.swap10()
return nil, nil return nil, nil
} }
func opSwap11(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap11(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap11() scope.Stack.swap11()
return nil, nil return nil, nil
} }
func opSwap12(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap12(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap12() scope.Stack.swap12()
return nil, nil return nil, nil
} }
func opSwap13(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap13(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap13() scope.Stack.swap13()
return nil, nil return nil, nil
} }
func opSwap14(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap14(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap14() scope.Stack.swap14()
return nil, nil return nil, nil
} }
func opSwap15(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap15(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap15() scope.Stack.swap15()
return nil, nil return nil, nil
} }
func opSwap16(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSwap16(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap16() scope.Stack.swap16()
return nil, nil return nil, nil
} }
func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCreate(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
var ( var (
@ -663,21 +662,21 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64()) input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
gas = scope.Contract.Gas gas = scope.Contract.Gas
) )
if interpreter.evm.chainRules.IsEIP150 { if evm.chainRules.IsEIP150 {
gas -= gas / 64 gas -= gas / 64
} }
// reuse size int for stackvalue // reuse size int for stackvalue
stackvalue := size stackvalue := size
scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation) scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation)
res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract.Address(), input, gas, &value) res, addr, returnGas, suberr := evm.Create(scope.Contract.Address(), input, gas, &value)
// Push item on the stack based on the returned error. If the ruleset is // Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only // homestead we must check for CodeStoreOutOfGasError (homestead only
// rule) and treat as an error, if the ruleset is frontier we must // rule) and treat as an error, if the ruleset is frontier we must
// ignore this error and pretend the operation was successful. // ignore this error and pretend the operation was successful.
if interpreter.evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas { if evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas {
stackvalue.Clear() stackvalue.Clear()
} else if suberr != nil && suberr != ErrCodeStoreOutOfGas { } else if suberr != nil && suberr != ErrCodeStoreOutOfGas {
stackvalue.Clear() stackvalue.Clear()
@ -686,18 +685,18 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
} }
scope.Stack.push(&stackvalue) scope.Stack.push(&stackvalue)
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted { if suberr == ErrExecutionReverted {
interpreter.returnData = res // set REVERT data to return data buffer evm.returnData = res // set REVERT data to return data buffer
return res, nil return res, nil
} }
interpreter.returnData = nil // clear dirty return data buffer evm.returnData = nil // clear dirty return data buffer
return nil, nil return nil, nil
} }
func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCreate2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
var ( var (
@ -710,10 +709,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
// Apply EIP150 // Apply EIP150
gas -= gas / 64 gas -= gas / 64
scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation2) scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation2)
// reuse size int for stackvalue // reuse size int for stackvalue
stackvalue := size stackvalue := size
res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract.Address(), input, gas, res, addr, returnGas, suberr := evm.Create2(scope.Contract.Address(), input, gas,
&endowment, &salt) &endowment, &salt)
// Push item on the stack based on the returned error. // Push item on the stack based on the returned error.
if suberr != nil { if suberr != nil {
@ -722,35 +721,35 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
stackvalue.SetBytes(addr.Bytes()) stackvalue.SetBytes(addr.Bytes())
} }
scope.Stack.push(&stackvalue) scope.Stack.push(&stackvalue)
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted { if suberr == ErrExecutionReverted {
interpreter.returnData = res // set REVERT data to return data buffer evm.returnData = res // set REVERT data to return data buffer
return res, nil return res, nil
} }
interpreter.returnData = nil // clear dirty return data buffer evm.returnData = nil // clear dirty return data buffer
return nil, nil return nil, nil
} }
func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack stack := scope.Stack
// Pop gas. The actual gas in interpreter.evm.callGasTemp. // Pop gas. The actual gas in evm.callGasTemp.
// We can use this as a temporary value // We can use this as a temporary value
temp := stack.pop() temp := stack.pop()
gas := interpreter.evm.callGasTemp gas := evm.callGasTemp
// Pop other call parameters. // Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20()) toAddr := common.Address(addr.Bytes20())
// Get the arguments from the memory. // Get the arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64()) args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
if interpreter.readOnly && !value.IsZero() { if evm.readOnly && !value.IsZero() {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
if !value.IsZero() { if !value.IsZero() {
gas += params.CallStipend gas += params.CallStipend
} }
ret, returnGas, err := interpreter.evm.Call(scope.Contract.Address(), toAddr, args, gas, &value) ret, returnGas, err := evm.Call(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil { if err != nil {
temp.Clear() temp.Clear()
@ -762,18 +761,18 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
} }
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
interpreter.returnData = ret evm.returnData = ret
return ret, nil return ret, nil
} }
func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCallCode(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Pop gas. The actual gas is in interpreter.evm.callGasTemp. // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack stack := scope.Stack
// We use it as a temporary value // We use it as a temporary value
temp := stack.pop() temp := stack.pop()
gas := interpreter.evm.callGasTemp gas := evm.callGasTemp
// Pop other call parameters. // Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20()) toAddr := common.Address(addr.Bytes20())
@ -784,7 +783,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
gas += params.CallStipend gas += params.CallStipend
} }
ret, returnGas, err := interpreter.evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value) ret, returnGas, err := evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil { if err != nil {
temp.Clear() temp.Clear()
} else { } else {
@ -795,25 +794,25 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
} }
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
interpreter.returnData = ret evm.returnData = ret
return ret, nil return ret, nil
} }
func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opDelegateCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack stack := scope.Stack
// Pop gas. The actual gas is in interpreter.evm.callGasTemp. // Pop gas. The actual gas is in evm.callGasTemp.
// We use it as a temporary value // We use it as a temporary value
temp := stack.pop() temp := stack.pop()
gas := interpreter.evm.callGasTemp gas := evm.callGasTemp
// Pop other call parameters. // Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20()) toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory. // Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64()) args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value) ret, returnGas, err := evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value)
if err != nil { if err != nil {
temp.Clear() temp.Clear()
} else { } else {
@ -824,25 +823,25 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
} }
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
interpreter.returnData = ret evm.returnData = ret
return ret, nil return ret, nil
} }
func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opStaticCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Pop gas. The actual gas is in interpreter.evm.callGasTemp. // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack stack := scope.Stack
// We use it as a temporary value // We use it as a temporary value
temp := stack.pop() temp := stack.pop()
gas := interpreter.evm.callGasTemp gas := evm.callGasTemp
// Pop other call parameters. // Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20()) toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory. // Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64()) args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract.Address(), toAddr, args, gas) ret, returnGas, err := evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
if err != nil { if err != nil {
temp.Clear() temp.Clear()
} else { } else {
@ -853,69 +852,69 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
} }
scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
interpreter.returnData = ret evm.returnData = ret
return ret, nil return ret, nil
} }
func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opReturn(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop() offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64()) ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
return ret, errStopToken return ret, errStopToken
} }
func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opRevert(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop() offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64()) ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
interpreter.returnData = ret evm.returnData = ret
return ret, ErrExecutionReverted return ret, ErrExecutionReverted
} }
func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opUndefined(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])} return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])}
} }
func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, errStopToken return nil, errStopToken
} }
func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
beneficiary := scope.Stack.pop() beneficiary := scope.Stack.pop()
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) balance := evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct) evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address()) evm.StateDB.SelfDestruct(scope.Contract.Address())
if tracer := interpreter.evm.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil { if tracer.OnEnter != nil {
tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig()) tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
} }
if tracer.OnExit != nil { if tracer.OnExit != nil {
tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false) tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
} }
} }
return nil, errStopToken return nil, errStopToken
} }
func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
beneficiary := scope.Stack.pop() beneficiary := scope.Stack.pop()
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) balance := evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct) evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct) evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
interpreter.evm.StateDB.SelfDestruct6780(scope.Contract.Address()) evm.StateDB.SelfDestruct6780(scope.Contract.Address())
if tracer := interpreter.evm.Config.Tracer; tracer != nil { if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil { if tracer.OnEnter != nil {
tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig()) tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
} }
if tracer.OnExit != nil { if tracer.OnExit != nil {
tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false) tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
} }
} }
return nil, errStopToken return nil, errStopToken
@ -925,8 +924,8 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
// make log instruction function // make log instruction function
func makeLog(size int) executionFunc { func makeLog(size int) executionFunc {
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
if interpreter.readOnly { if evm.readOnly {
return nil, ErrWriteProtection return nil, ErrWriteProtection
} }
topics := make([]common.Hash, size) topics := make([]common.Hash, size)
@ -938,13 +937,13 @@ func makeLog(size int) executionFunc {
} }
d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64()) d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64())
interpreter.evm.StateDB.AddLog(&types.Log{ evm.StateDB.AddLog(&types.Log{
Address: scope.Contract.Address(), Address: scope.Contract.Address(),
Topics: topics, Topics: topics,
Data: d, Data: d,
// This is a non-consensus field, but assigned here because // This is a non-consensus field, but assigned here because
// core/state doesn't know the current block number. // core/state doesn't know the current block number.
BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(), BlockNumber: evm.Context.BlockNumber.Uint64(),
}) })
return nil, nil return nil, nil
@ -952,7 +951,7 @@ func makeLog(size int) executionFunc {
} }
// opPush1 is a specialized version of pushN // opPush1 is a specialized version of pushN
func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPush1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
codeLen = uint64(len(scope.Contract.Code)) codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int) integer = new(uint256.Int)
@ -967,7 +966,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
} }
// opPush2 is a specialized version of pushN // opPush2 is a specialized version of pushN
func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opPush2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
codeLen = uint64(len(scope.Contract.Code)) codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int) integer = new(uint256.Int)
@ -985,7 +984,7 @@ func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
// make push instruction function // make push instruction function
func makePush(size uint64, pushByteSize int) executionFunc { func makePush(size uint64, pushByteSize int) executionFunc {
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var ( var (
codeLen = len(scope.Contract.Code) codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1)) start = min(codeLen, int(*pc+1))
@ -1005,7 +1004,7 @@ func makePush(size uint64, pushByteSize int) executionFunc {
// make dup instruction function // make dup instruction function
func makeDup(size int64) executionFunc { func makeDup(size int64) executionFunc {
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.dup(int(size)) scope.Stack.dup(int(size))
return nil, nil return nil, nil
} }

View file

@ -107,7 +107,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected)) expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
stack.push(x) stack.push(x)
stack.push(y) stack.push(y)
opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil}) opFn(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data))
} }
@ -221,7 +221,7 @@ func TestAddMod(t *testing.T) {
stack.push(z) stack.push(z)
stack.push(y) stack.push(y)
stack.push(x) stack.push(x)
opAddmod(&pc, evm.interpreter, &ScopeContext{nil, stack, nil}) opAddmod(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop() actual := stack.pop()
if actual.Cmp(expected) != 0 { if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual) t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual)
@ -247,7 +247,7 @@ func TestWriteExpectedValues(t *testing.T) {
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y)) y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
stack.push(x) stack.push(x)
stack.push(y) stack.push(y)
opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil}) opFn(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop() actual := stack.pop()
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)} result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
} }
@ -296,7 +296,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
for _, arg := range intArgs { for _, arg := range intArgs {
stack.push(arg) stack.push(arg)
} }
op(&pc, evm.interpreter, scope) op(&pc, evm, scope)
stack.pop() stack.pop()
} }
bench.StopTimer() bench.StopTimer()
@ -528,13 +528,13 @@ func TestOpMstore(t *testing.T) {
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v))) stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v)))
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v) t.Fatalf("Mstore fail, got %v, expected %v", got, v)
} }
stack.push(new(uint256.Int).SetUint64(0x1)) stack.push(new(uint256.Int).SetUint64(0x1))
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
t.Fatalf("Mstore failed to overwrite previous value") t.Fatalf("Mstore failed to overwrite previous value")
} }
@ -555,7 +555,7 @@ func BenchmarkOpMstore(bench *testing.B) {
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
stack.push(value) stack.push(value)
stack.push(memStart) stack.push(memStart)
opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil}) opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
} }
} }
@ -581,14 +581,14 @@ func TestOpTstore(t *testing.T) {
stack.push(new(uint256.Int).SetBytes(value)) stack.push(new(uint256.Int).SetBytes(value))
// push the location to the stack // push the location to the stack
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opTstore(&pc, evm.interpreter, &scopeContext) opTstore(&pc, evm, &scopeContext)
// there should be no elements on the stack after TSTORE // there should be no elements on the stack after TSTORE
if stack.len() != 0 { if stack.len() != 0 {
t.Fatal("stack wrong size") t.Fatal("stack wrong size")
} }
// push the location to the stack // push the location to the stack
stack.push(new(uint256.Int)) stack.push(new(uint256.Int))
opTload(&pc, evm.interpreter, &scopeContext) opTload(&pc, evm, &scopeContext)
// there should be one element on the stack after TLOAD // there should be one element on the stack after TLOAD
if stack.len() != 1 { if stack.len() != 1 {
t.Fatal("stack wrong size") t.Fatal("stack wrong size")
@ -613,7 +613,7 @@ func BenchmarkOpKeccak256(bench *testing.B) {
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
stack.push(uint256.NewInt(32)) stack.push(uint256.NewInt(32))
stack.push(start) stack.push(start)
opKeccak256(&pc, evm.interpreter, &ScopeContext{mem, stack, nil}) opKeccak256(&pc, evm, &ScopeContext{mem, stack, nil})
} }
} }
@ -707,7 +707,7 @@ func TestRandom(t *testing.T) {
stack = newstack() stack = newstack()
pc = uint64(0) pc = uint64(0)
) )
opRandom(&pc, evm.interpreter, &ScopeContext{nil, stack, nil}) opRandom(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
} }
@ -749,7 +749,7 @@ func TestBlobHash(t *testing.T) {
) )
evm.SetTxContext(TxContext{BlobHashes: tt.hashes}) evm.SetTxContext(TxContext{BlobHashes: tt.hashes})
stack.push(uint256.NewInt(tt.idx)) stack.push(uint256.NewInt(tt.idx))
opBlobHash(&pc, evm.interpreter, &ScopeContext{nil, stack, nil}) opBlobHash(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 { if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
} }
@ -889,7 +889,7 @@ func TestOpMCopy(t *testing.T) {
mem.Resize(memorySize) mem.Resize(memorySize)
} }
// Do the copy // Do the copy
opMcopy(&pc, evm.interpreter, &ScopeContext{mem, stack, nil}) opMcopy(&pc, evm, &ScopeContext{mem, stack, nil})
want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
if have := mem.store; !bytes.Equal(want, have) { if have := mem.store; !bytes.Equal(want, have) {
t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have) t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)
@ -1001,7 +1001,7 @@ func TestOpCLZ(t *testing.T) {
} }
stack.push(val) stack.push(val)
opCLZ(&pc, evm.interpreter, &ScopeContext{Stack: stack}) opCLZ(&pc, evm, &ScopeContext{Stack: stack})
if gotLen := stack.len(); gotLen != 1 { if gotLen := stack.len(); gotLen != 1 {
t.Fatalf("stack length = %d; want 1", gotLen) t.Fatalf("stack length = %d; want 1", gotLen)

View file

@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -89,93 +87,27 @@ func (ctx *ScopeContext) ContractCode() []byte {
return ctx.Contract.Code return ctx.Contract.Code
} }
// EVMInterpreter represents an EVM interpreter
type EVMInterpreter struct {
evm *EVM
table *JumpTable
hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse
}
// NewEVMInterpreter returns a new instance of the Interpreter.
func NewEVMInterpreter(evm *EVM) *EVMInterpreter {
// If jump table was not initialised we set the default one.
var table *JumpTable
switch {
case evm.chainRules.IsOsaka:
table = &osakaInstructionSet
case evm.chainRules.IsVerkle:
// TODO replace with proper instruction set when fork is specified
table = &verkleInstructionSet
case evm.chainRules.IsPrague:
table = &pragueInstructionSet
case evm.chainRules.IsCancun:
table = &cancunInstructionSet
case evm.chainRules.IsShanghai:
table = &shanghaiInstructionSet
case evm.chainRules.IsMerge:
table = &mergeInstructionSet
case evm.chainRules.IsLondon:
table = &londonInstructionSet
case evm.chainRules.IsBerlin:
table = &berlinInstructionSet
case evm.chainRules.IsIstanbul:
table = &istanbulInstructionSet
case evm.chainRules.IsConstantinople:
table = &constantinopleInstructionSet
case evm.chainRules.IsByzantium:
table = &byzantiumInstructionSet
case evm.chainRules.IsEIP158:
table = &spuriousDragonInstructionSet
case evm.chainRules.IsEIP150:
table = &tangerineWhistleInstructionSet
case evm.chainRules.IsHomestead:
table = &homesteadInstructionSet
default:
table = &frontierInstructionSet
}
var extraEips []int
if len(evm.Config.ExtraEips) > 0 {
// Deep-copy jumptable to prevent modification of opcodes in other tables
table = copyJumpTable(table)
}
for _, eip := range evm.Config.ExtraEips {
if err := EnableEIP(eip, table); err != nil {
// Disable it, so caller can check if it's activated or not
log.Error("EIP activation failed", "eip", eip, "error", err)
} else {
extraEips = append(extraEips, eip)
}
}
evm.Config.ExtraEips = extraEips
return &EVMInterpreter{evm: evm, table: table, hasher: crypto.NewKeccakState()}
}
// Run loops and evaluates the contract's code with the given input data and returns // Run loops and evaluates the contract's code with the given input data and returns
// the return byte-slice and an error if one occurred. // the return byte-slice and an error if one occurred.
// //
// It's important to note that any errors returned by the interpreter should be // It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation except for // considered a revert-and-consume-all-gas operation except for
// ErrExecutionReverted which means revert-and-keep-gas-left. // ErrExecutionReverted which means revert-and-keep-gas-left.
func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) { func (evm *EVM) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
// Increment the call depth which is restricted to 1024 // Increment the call depth which is restricted to 1024
in.evm.depth++ evm.depth++
defer func() { in.evm.depth-- }() defer func() { evm.depth-- }()
// Make sure the readOnly is only set if we aren't in readOnly yet. // Make sure the readOnly is only set if we aren't in readOnly yet.
// This also makes sure that the readOnly flag isn't removed for child calls. // This also makes sure that the readOnly flag isn't removed for child calls.
if readOnly && !in.readOnly { if readOnly && !evm.readOnly {
in.readOnly = true evm.readOnly = true
defer func() { in.readOnly = false }() defer func() { evm.readOnly = false }()
} }
// Reset the previous call's return data. It's unimportant to preserve the old buffer // Reset the previous call's return data. It's unimportant to preserve the old buffer
// as every returning call will return new data anyway. // as every returning call will return new data anyway.
in.returnData = nil evm.returnData = nil
// Don't bother with the execution if there's no code. // Don't bother with the execution if there's no code.
if len(contract.Code) == 0 { if len(contract.Code) == 0 {
@ -184,7 +116,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
var ( var (
op OpCode // current opcode op OpCode // current opcode
jumpTable *JumpTable = in.table jumpTable *JumpTable = evm.table
mem = NewMemory() // bound memory mem = NewMemory() // bound memory
stack = newstack() // local stack stack = newstack() // local stack
callContext = &ScopeContext{ callContext = &ScopeContext{
@ -198,11 +130,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
pc = uint64(0) // program counter pc = uint64(0) // program counter
cost uint64 cost uint64
// copies used by tracer // copies used by tracer
pcCopy uint64 // needed for the deferred EVMLogger pcCopy uint64 // needed for the deferred EVMLogger
gasCopy uint64 // for EVMLogger to log gas remaining before execution gasCopy uint64 // for EVMLogger to log gas remaining before execution
logged bool // deferred EVMLogger should ignore already logged steps logged bool // deferred EVMLogger should ignore already logged steps
res []byte // result of the opcode execution function res []byte // result of the opcode execution function
debug = in.evm.Config.Tracer != nil debug = evm.Config.Tracer != nil
isEIP4762 = evm.chainRules.IsEIP4762
) )
// Don't move this deferred function, it's placed before the OnOpcode-deferred method, // Don't move this deferred function, it's placed before the OnOpcode-deferred method,
// so that it gets executed _after_: the OnOpcode needs the stacks before // so that it gets executed _after_: the OnOpcode needs the stacks before
@ -218,11 +151,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err == nil { if err == nil {
return return
} }
if !logged && in.evm.Config.Tracer.OnOpcode != nil { if !logged && evm.Config.Tracer.OnOpcode != nil {
in.evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err)) evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
} }
if logged && in.evm.Config.Tracer.OnFault != nil { if logged && evm.Config.Tracer.OnFault != nil {
in.evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, in.evm.depth, VMErrorFromErr(err)) evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, evm.depth, VMErrorFromErr(err))
} }
}() }()
} }
@ -237,12 +170,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
logged, pcCopy, gasCopy = false, pc, contract.Gas logged, pcCopy, gasCopy = false, pc, contract.Gas
} }
if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment && !contract.IsSystemCall { if isEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
// if the PC ends up in a new "chunk" of verkleized code, charge the // if the PC ends up in a new "chunk" of verkleized code, charge the
// associated costs. // associated costs.
contractAddr := contract.Address() contractAddr := contract.Address()
consumed, wanted := in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas) consumed, wanted := evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
contract.UseGas(consumed, in.evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk) contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk)
if consumed < wanted { if consumed < wanted {
return nil, ErrOutOfGas return nil, ErrOutOfGas
} }
@ -287,7 +220,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Consume the gas and return an error if not enough gas is available. // Consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method can get the proper cost // cost is explicitly set so that the capture state defer method can get the proper cost
var dynamicCost uint64 var dynamicCost uint64
dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize) dynamicCost, err = operation.dynamicGas(evm, contract, stack, mem, memorySize)
cost += dynamicCost // for tracing cost += dynamicCost // for tracing
if err != nil { if err != nil {
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err) return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
@ -302,11 +235,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Do tracing before potential memory expansion // Do tracing before potential memory expansion
if debug { if debug {
if in.evm.Config.Tracer.OnGasChange != nil { if evm.Config.Tracer.OnGasChange != nil {
in.evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode) evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode)
} }
if in.evm.Config.Tracer.OnOpcode != nil { if evm.Config.Tracer.OnOpcode != nil {
in.evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err)) evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
logged = true logged = true
} }
} }
@ -315,7 +248,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
} }
// execute the operation // execute the operation
res, err = operation.execute(&pc, in, callContext) res, err = operation.execute(&pc, evm, callContext)
if err != nil { if err != nil {
break break
} }

View file

@ -23,7 +23,7 @@ import (
) )
type ( type (
executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error) executionFunc func(pc *uint64, evm *EVM, callContext *ScopeContext) ([]byte, error)
gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64 gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64 // memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*Stack) (size uint64, overflow bool) memorySizeFunc func(*Stack) (size uint64, overflow bool)

47
core/vm/jumpdests.go Normal file
View file

@ -0,0 +1,47 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm
import "github.com/ethereum/go-ethereum/common"
// JumpDestCache represents the cache of jumpdest analysis results.
type JumpDestCache interface {
// Load retrieves the cached jumpdest analysis for the given code hash.
// Returns the BitVec and true if found, or nil and false if not cached.
Load(codeHash common.Hash) (BitVec, bool)
// Store saves the jumpdest analysis for the given code hash.
Store(codeHash common.Hash, vec BitVec)
}
// mapJumpDests is the default implementation of JumpDests using a map.
// This implementation is not thread-safe and is meant to be used per EVM instance.
type mapJumpDests map[common.Hash]BitVec
// newMapJumpDests creates a new map-based JumpDests implementation.
func newMapJumpDests() JumpDestCache {
return make(mapJumpDests)
}
func (j mapJumpDests) Load(codeHash common.Hash) (BitVec, bool) {
vec, ok := j[codeHash]
return vec, ok
}
func (j mapJumpDests) Store(codeHash common.Hash, vec BitVec) {
j[codeHash] = vec
}

View file

@ -31,9 +31,9 @@ import (
var content embed.FS var content embed.FS
var ( var (
blobT = reflect.TypeOf(Blob{}) blobT = reflect.TypeFor[Blob]()
commitmentT = reflect.TypeOf(Commitment{}) commitmentT = reflect.TypeFor[Commitment]()
proofT = reflect.TypeOf(Proof{}) proofT = reflect.TypeFor[Proof]()
CellProofsPerBlob = 128 CellProofsPerBlob = 128
) )

View file

@ -18,7 +18,6 @@
package catalyst package catalyst
import ( import (
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
@ -31,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/version" "github.com/ethereum/go-ethereum/internal/version"
@ -120,10 +118,13 @@ var caps = []string{
var ( var (
// Number of blobs requested via getBlobsV2 // Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil) getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
// Number of blobs requested via getBlobsV2 that are present in the blobpool // Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil) getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
// Number of times getBlobsV2 responded with “hit” // Number of times getBlobsV2 responded with “hit”
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
// Number of times getBlobsV2 responded with “miss” // Number of times getBlobsV2 responded with “miss”
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
) )
@ -494,29 +495,15 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
if len(hashes) > 128 { if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
} }
var ( blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0)
res = make([]*engine.BlobAndProofV1, len(hashes)) if err != nil {
hasher = sha256.New() return nil, engine.InvalidParams.With(err)
index = make(map[common.Hash]int)
sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
)
for i, hash := range hashes {
index[hash] = i
} }
for i, sidecar := range sidecars { res := make([]*engine.BlobAndProofV1, len(hashes))
if res[i] != nil || sidecar == nil { for i := 0; i < len(blobs); i++ {
// already filled res[i] = &engine.BlobAndProofV1{
continue Blob: blobs[i][:],
} Proof: proofs[i][0][:],
for cIdx, commitment := range sidecar.Commitments {
computed := kzg4844.CalcBlobHashV1(hasher, &commitment)
if idx, ok := index[computed]; ok {
res[idx] = &engine.BlobAndProofV1{
Blob: sidecar.Blobs[cIdx][:],
Proof: sidecar.Proofs[cIdx][:],
}
}
} }
} }
return res, nil return res, nil
@ -538,47 +525,19 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
} }
getBlobsV2RequestHit.Inc(1) getBlobsV2RequestHit.Inc(1)
// pull up the blob hashes blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1)
var ( if err != nil {
res = make([]*engine.BlobAndProofV2, len(hashes)) return nil, engine.InvalidParams.With(err)
index = make(map[common.Hash][]int)
sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
)
for i, hash := range hashes {
index[hash] = append(index[hash], i)
} }
for i, sidecar := range sidecars { res := make([]*engine.BlobAndProofV2, len(hashes))
if res[i] != nil { for i := 0; i < len(blobs); i++ {
// already filled var cellProofs []hexutil.Bytes
continue for _, proof := range proofs[i] {
cellProofs = append(cellProofs, proof[:])
} }
if sidecar == nil { res[i] = &engine.BlobAndProofV2{
// not found, return empty response Blob: blobs[i][:],
return nil, nil CellProofs: cellProofs,
}
if sidecar.Version != types.BlobSidecarVersion1 {
log.Info("GetBlobs queried V0 transaction: index %v, blobhashes %v", index, sidecar.BlobHashes())
return nil, nil
}
blobHashes := sidecar.BlobHashes()
for bIdx, hash := range blobHashes {
if idxes, ok := index[hash]; ok {
proofs, err := sidecar.CellProofsAt(bIdx)
if err != nil {
return nil, engine.InvalidParams.With(err)
}
var cellProofs []hexutil.Bytes
for _, proof := range proofs {
cellProofs = append(cellProofs, proof[:])
}
for _, idx := range idxes {
res[idx] = &engine.BlobAndProofV2{
Blob: sidecar.Blobs[bIdx][:],
CellProofs: cellProofs,
}
}
}
} }
} }
return res, nil return res, nil

View file

@ -1497,7 +1497,7 @@ func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
} }
} }
if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) { if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
return fmt.Errorf("withdrawals mismatch") return errors.New("withdrawals mismatch")
} }
return nil return nil
} }

View file

@ -1,103 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package catalyst
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
)
// FullSyncTester is an auxiliary service that allows Geth to perform full sync
// alone without consensus-layer attached. Users must specify a valid block hash
// as the sync target.
//
// This tester can be applied to different networks, no matter it's pre-merge or
// post-merge, but only for full-sync.
type FullSyncTester struct {
stack *node.Node
backend *eth.Ethereum
target common.Hash
closed chan struct{}
wg sync.WaitGroup
exitWhenSynced bool
}
// RegisterFullSyncTester registers the full-sync tester service into the node
// stack for launching and stopping the service controlled by node.
func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*FullSyncTester, error) {
cl := &FullSyncTester{
stack: stack,
backend: backend,
target: target,
closed: make(chan struct{}),
exitWhenSynced: exitWhenSynced,
}
stack.RegisterLifecycle(cl)
return cl, nil
}
// Start launches the beacon sync with provided sync target.
func (tester *FullSyncTester) Start() error {
tester.wg.Add(1)
go func() {
defer tester.wg.Done()
// Trigger beacon sync with the provided block hash as trusted
// chain head.
err := tester.backend.Downloader().BeaconDevSync(ethconfig.FullSync, tester.target, tester.closed)
if err != nil {
log.Info("Failed to trigger beacon sync", "err", err)
}
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Stop in case the target block is already stored locally.
if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
if tester.exitWhenSynced {
go tester.stack.Close() // async since we need to close ourselves
log.Info("Terminating the node")
}
return
}
case <-tester.closed:
return
}
}
}()
return nil
}
// Stop stops the full-sync tester to stop all background activities.
// This function can only be called for one time.
func (tester *FullSyncTester) Stop() error {
close(tester.closed)
tester.wg.Wait()
return nil
}

View file

@ -228,8 +228,8 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil beaconRoot post-cancun") return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil: case executionRequests == nil:
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil executionRequests post-prague") return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil executionRequests post-prague")
case !api.checkFork(params.Timestamp, forks.Prague): case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka):
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV3 must only be called for cancun payloads") return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV4 must only be called for prague payloads")
} }
requests := convertRequests(executionRequests) requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil { if err := validateRequests(requests); err != nil {

View file

@ -200,7 +200,7 @@ func (s *SyncStatusSubscription) Unsubscribe() {
} }
// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates. // SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
// The given channel must receive interface values, the result can either. // The given channel must receive interface values, the result can either be a SyncingResult or false.
func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription { func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription {
api.installSyncSubscription <- status api.installSyncSubscription <- status
return &SyncStatusSubscription{api: api, c: status} return &SyncStatusSubscription{api: api, c: status}

View file

@ -18,7 +18,6 @@ package downloader
import ( import (
"errors" "errors"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -34,28 +33,14 @@ import (
// Note, this must not be used in live code. If the forkchcoice endpoint where // Note, this must not be used in live code. If the forkchcoice endpoint where
// to use this instead of giving us the payload first, then essentially nobody // to use this instead of giving us the payload first, then essentially nobody
// in the network would have the block yet that we'd attempt to retrieve. // in the network would have the block yet that we'd attempt to retrieve.
func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error { func (d *Downloader) BeaconDevSync(mode SyncMode, header *types.Header) error {
// Be very loud that this code should not be used in a live node // Be very loud that this code should not be used in a live node
log.Warn("----------------------------------") log.Warn("----------------------------------")
log.Warn("Beacon syncing with hash as target", "hash", hash) log.Warn("Beacon syncing with hash as target", "number", header.Number, "hash", header.Hash())
log.Warn("This is unhealthy for a live node!") log.Warn("This is unhealthy for a live node!")
log.Warn("This is incompatible with the consensus layer!")
log.Warn("----------------------------------") log.Warn("----------------------------------")
return d.BeaconSync(mode, header, header)
log.Info("Waiting for peers to retrieve sync target")
for {
// If the node is going down, unblock
select {
case <-stop:
return errors.New("stop requested")
default:
}
header, err := d.GetHeader(hash)
if err != nil {
time.Sleep(time.Second)
continue
}
return d.BeaconSync(mode, header, header)
}
} }
// GetHeader tries to retrieve the header with a given hash from a random peer. // GetHeader tries to retrieve the header with a given hash from a random peer.

View file

@ -199,7 +199,7 @@ type BlockChain interface {
// InsertChain inserts a batch of blocks into the local chain. // InsertChain inserts a batch of blocks into the local chain.
InsertChain(types.Blocks) (int, error) InsertChain(types.Blocks) (int, error)
// InterruptInsert whether disables the chain insertion. // InterruptInsert disables or enables chain insertion.
InterruptInsert(on bool) InterruptInsert(on bool)
// InsertReceiptChain inserts a batch of blocks along with their receipts // InsertReceiptChain inserts a batch of blocks along with their receipts
@ -513,7 +513,7 @@ func (d *Downloader) syncToHead() (err error) {
// //
// For non-merged networks, if there is a checkpoint available, then calculate // For non-merged networks, if there is a checkpoint available, then calculate
// the ancientLimit through that. Otherwise calculate the ancient limit through // the ancientLimit through that. Otherwise calculate the ancient limit through
// the advertised height of the remote peer. This most is mostly a fallback for // the advertised height of the remote peer. This is mostly a fallback for
// legacy networks, but should eventually be dropped. TODO(karalabe). // legacy networks, but should eventually be dropped. TODO(karalabe).
// //
// Beacon sync, use the latest finalized block as the ancient limit // Beacon sync, use the latest finalized block as the ancient limit
@ -946,7 +946,7 @@ func (d *Downloader) processSnapSyncContent() error {
if !d.committed.Load() { if !d.committed.Load() {
latest := results[len(results)-1].Header latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot // If the height is above the pivot block by 2 sets, it means the pivot
// become stale in the network, and it was garbage collected, move to a // became stale in the network, and it was garbage collected, move to a
// new pivot. // new pivot.
// //
// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
@ -1043,7 +1043,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state
first, last := results[0].Header, results[len(results)-1].Header first, last := results[0].Header, results[len(results)-1].Header
log.Debug("Inserting snap-sync blocks", "items", len(results), log.Debug("Inserting snap-sync blocks", "items", len(results),
"firstnum", first.Number, "firsthash", first.Hash(), "firstnum", first.Number, "firsthash", first.Hash(),
"lastnumn", last.Number, "lasthash", last.Hash(), "lastnum", last.Number, "lasthash", last.Hash(),
) )
blocks := make([]*types.Block, len(results)) blocks := make([]*types.Block, len(results))
receipts := make([]rlp.RawValue, len(results)) receipts := make([]rlp.RawValue, len(results))

View file

@ -544,7 +544,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
t.Fatalf("failed to start beacon sync: #{err}") t.Fatalf("failed to start beacon sync: %v", err)
} }
select { select {
case <-complete: case <-complete:

View file

@ -45,9 +45,6 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
defer timeoutTimer.Stop() defer timeoutTimer.Stop()
select { select {
case <-d.cancelCh:
return nil, nil, errCanceled
case <-timeoutTimer.C: case <-timeoutTimer.C:
// Header retrieval timed out, update the metrics // Header retrieval timed out, update the metrics
p.log.Debug("Header request timed out", "elapsed", ttl) p.log.Debug("Header request timed out", "elapsed", ttl)

View file

@ -18,7 +18,7 @@
package ethconfig package ethconfig
import ( import (
"fmt" "errors"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -154,7 +154,7 @@ type Config struct {
// RPCEVMTimeout is the global timeout for eth-call. // RPCEVMTimeout is the global timeout for eth-call.
RPCEVMTimeout time.Duration RPCEVMTimeout time.Duration
// RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for // RPCTxFeeCap is the global transaction fee (price * gas limit) cap for
// send-transaction variants. The unit is ether. // send-transaction variants. The unit is ether.
RPCTxFeeCap float64 RPCTxFeeCap float64
@ -171,7 +171,7 @@ type Config struct {
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) { func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
if config.TerminalTotalDifficulty == nil { if config.TerminalTotalDifficulty == nil {
log.Error("Geth only supports PoS networks. Please transition legacy networks using Geth v1.13.x.") log.Error("Geth only supports PoS networks. Please transition legacy networks using Geth v1.13.x.")
return nil, fmt.Errorf("'terminalTotalDifficulty' is not set in genesis block") return nil, errors.New("'terminalTotalDifficulty' is not set in genesis block")
} }
// Wrap previously supported consensus engines into their post-merge counterpart // Wrap previously supported consensus engines into their post-merge counterpart
if config.Clique != nil { if config.Clique != nil {

View file

@ -38,6 +38,8 @@ var (
errInvalidTopic = errors.New("invalid topic(s)") errInvalidTopic = errors.New("invalid topic(s)")
errFilterNotFound = errors.New("filter not found") errFilterNotFound = errors.New("filter not found")
errInvalidBlockRange = errors.New("invalid block range params") errInvalidBlockRange = errors.New("invalid block range params")
errUnknownBlock = errors.New("unknown block")
errBlockHashWithRange = errors.New("can't specify fromBlock/toBlock with blockHash")
errPendingLogsUnsupported = errors.New("pending logs are not supported") errPendingLogsUnsupported = errors.New("pending logs are not supported")
errExceedMaxTopics = errors.New("exceed max topics") errExceedMaxTopics = errors.New("exceed max topics")
errExceedMaxAddresses = errors.New("exceed max addresses") errExceedMaxAddresses = errors.New("exceed max addresses")
@ -348,8 +350,13 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type
if len(crit.Addresses) > maxAddresses { if len(crit.Addresses) > maxAddresses {
return nil, errExceedMaxAddresses return nil, errExceedMaxAddresses
} }
var filter *Filter var filter *Filter
if crit.BlockHash != nil { if crit.BlockHash != nil {
if crit.FromBlock != nil || crit.ToBlock != nil {
return nil, errBlockHashWithRange
}
// Block filter requested, construct a single-shot filter // Block filter requested, construct a single-shot filter
filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics) filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics)
} else { } else {
@ -372,6 +379,7 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type
// Construct the range filter // Construct the range filter
filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics)
} }
// Run the filter and return all the logs // Run the filter and return all the logs
logs, err := filter.Logs(ctx) logs, err := filter.Logs(ctx)
if err != nil { if err != nil {

View file

@ -85,7 +85,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
return nil, err return nil, err
} }
if header == nil { if header == nil {
return nil, errors.New("unknown block") return nil, errUnknownBlock
} }
if header.Number.Uint64() < f.sys.backend.HistoryPruningCutoff() { if header.Number.Uint64() < f.sys.backend.HistoryPruningCutoff() {
return nil, &history.PrunedHistoryError{} return nil, &history.PrunedHistoryError{}
@ -456,7 +456,6 @@ func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.
// checkMatches checks if the receipts belonging to the given header contain any log events that // checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match. // match the filter criteria. This function is called when the bloom filter signals a potential match.
// skipFilter signals all logs of the given block are requested.
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
hash := header.Hash() hash := header.Hash()
// Logs in cache are partially filled with context data // Logs in cache are partially filled with context data

View file

@ -207,7 +207,7 @@ type EventSystem struct {
} }
// NewEventSystem creates a new manager that listens for event on the given mux, // NewEventSystem creates a new manager that listens for event on the given mux,
// parses and filters them. It uses the all map to retrieve filter changes. The // parses and filters them. It uses an internal map to retrieve filter changes. The
// work loop holds its own index that is used to forward events to filters. // work loop holds its own index that is used to forward events to filters.
// //
// The returned manager has a loop that needs to be stopped with the Stop function // The returned manager has a loop that needs to be stopped with the Stop function

View file

@ -450,24 +450,65 @@ func TestInvalidGetLogsRequest(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
db = rawdb.NewMemoryDatabase() genesis = &core.Genesis{
_, sys = newTestFilterSystem(db, Config{}) Config: params.TestChainConfig,
api = NewFilterAPI(sys) BaseFee: big.NewInt(params.InitialBaseFee),
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") }
db, blocks, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {})
_, sys = newTestFilterSystem(db, Config{})
api = NewFilterAPI(sys)
blockHash = blocks[0].Hash()
unknownBlockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
) )
// Reason: Cannot specify both BlockHash and FromBlock/ToBlock) // Insert the blocks into the chain so filter can look them up
testCases := []FilterCriteria{ blockchain, err := core.NewBlockChain(db, genesis, ethash.NewFaker(), nil)
0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, if err != nil {
1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, t.Fatalf("failed to create tester chain: %v", err)
2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, }
3: {BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, if n, err := blockchain.InsertChain(blocks); err != nil {
4: {BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)}, t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
type testcase struct {
f FilterCriteria
err error
}
testCases := []testcase{
{
f: FilterCriteria{BlockHash: &blockHash, FromBlock: big.NewInt(100)},
err: errBlockHashWithRange,
},
{
f: FilterCriteria{BlockHash: &blockHash, ToBlock: big.NewInt(500)},
err: errBlockHashWithRange,
},
{
f: FilterCriteria{BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
err: errBlockHashWithRange,
},
{
f: FilterCriteria{BlockHash: &unknownBlockHash},
err: errUnknownBlock,
},
{
f: FilterCriteria{BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
err: errExceedMaxTopics,
},
{
f: FilterCriteria{BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
err: errExceedMaxTopics,
},
{
f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)},
err: errExceedMaxAddresses,
},
} }
for i, test := range testCases { for i, test := range testCases {
if _, err := api.GetLogs(context.Background(), test); err == nil { _, err := api.GetLogs(context.Background(), test.f)
t.Errorf("Expected Logs for case #%d to fail", i) if !errors.Is(err, test.err) {
t.Errorf("case %d: wrong error: %q\nwant: %q", i, err, test.err)
} }
} }
} }

View file

@ -62,6 +62,23 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
if call.GasLimit >= params.TxGas { if call.GasLimit >= params.TxGas {
hi = call.GasLimit hi = call.GasLimit
} }
// Cap the maximum gas allowance according to EIP-7825 if the estimation targets Osaka
if hi > params.MaxTxGas {
blockNumber, blockTime := opts.Header.Number, opts.Header.Time
if opts.BlockOverrides != nil {
if opts.BlockOverrides.Number != nil {
blockNumber = opts.BlockOverrides.Number.ToInt()
}
if opts.BlockOverrides.Time != nil {
blockTime = uint64(*opts.BlockOverrides.Time)
}
}
if opts.Config.IsOsaka(blockNumber, blockTime) {
hi = params.MaxTxGas
}
}
// Normalize the max fee per gas the call is willing to spend. // Normalize the max fee per gas the call is willing to spend.
var feeCap *big.Int var feeCap *big.Int
if call.GasFeeCap != nil { if call.GasFeeCap != nil {
@ -209,6 +226,9 @@ func execute(ctx context.Context, call *core.Message, opts *Options, gasLimit ui
if errors.Is(err, core.ErrIntrinsicGas) { if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit return true, nil, nil // Special case, raise gas limit
} }
if errors.Is(err, core.ErrGasLimitTooHigh) {
return true, nil, nil // Special case, lower gas limit
}
return true, nil, err // Bail out return true, nil, err // Bail out
} }
return result.Failed(), result, nil return result.Failed(), result, nil

View file

@ -66,4 +66,7 @@ var (
// discarded during the snap sync. // discarded during the snap sync.
largeStorageDiscardGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/discard", nil) largeStorageDiscardGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/discard", nil)
largeStorageResumedGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/resume", nil) largeStorageResumedGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/resume", nil)
stateSyncTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/statesync", nil)
stateHealTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/stateheal", nil)
) )

View file

@ -502,8 +502,10 @@ type Syncer struct {
storageHealed uint64 // Number of storage slots downloaded during the healing stage storageHealed uint64 // Number of storage slots downloaded during the healing stage
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
startTime time.Time // Time instance when snapshot sync started startTime time.Time // Time instance when snapshot sync started
logTime time.Time // Time instance when status was last reported healStartTime time.Time // Time instance when the state healing started
syncTimeOnce sync.Once // Ensure that the state sync time is uploaded only once
logTime time.Time // Time instance when status was last reported
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root) lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
@ -685,6 +687,14 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.cleanStorageTasks() s.cleanStorageTasks()
s.cleanAccountTasks() s.cleanAccountTasks()
if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 { if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
// State healing phase completed, record the elapsed time in metrics.
// Note: healing may be rerun in subsequent cycles to fill gaps between
// pivot states (e.g., if chain sync takes longer).
if !s.healStartTime.IsZero() {
stateHealTimeGauge.Inc(int64(time.Since(s.healStartTime)))
log.Info("State healing phase is completed", "elapsed", common.PrettyDuration(time.Since(s.healStartTime)))
s.healStartTime = time.Time{}
}
return nil return nil
} }
// Assign all the data retrieval tasks to any free peers // Assign all the data retrieval tasks to any free peers
@ -693,7 +703,17 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.assignStorageTasks(storageResps, storageReqFails, cancel) s.assignStorageTasks(storageResps, storageReqFails, cancel)
if len(s.tasks) == 0 { if len(s.tasks) == 0 {
// Sync phase done, run heal phase // State sync phase completed, record the elapsed time in metrics.
// Note: the initial state sync runs only once, regardless of whether
// a new cycle is started later. Any state differences in subsequent
// cycles will be handled by the state healer.
s.syncTimeOnce.Do(func() {
stateSyncTimeGauge.Update(int64(time.Since(s.startTime)))
log.Info("State sync phase is completed", "elapsed", common.PrettyDuration(time.Since(s.startTime)))
})
if s.healStartTime.IsZero() {
s.healStartTime = time.Now()
}
s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel) s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel) s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
} }

197
eth/syncer/syncer.go Normal file
View file

@ -0,0 +1,197 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package syncer
import (
"errors"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
)
type syncReq struct {
hash common.Hash
errc chan error
}
// Syncer is an auxiliary service that allows Geth to perform full sync
// alone without consensus-layer attached. Users must specify a valid block hash
// as the sync target.
//
// This tool can be applied to different networks, no matter it's pre-merge or
// post-merge, but only for full-sync.
type Syncer struct {
stack *node.Node
backend *eth.Ethereum
target common.Hash
request chan *syncReq
closed chan struct{}
wg sync.WaitGroup
exitWhenSynced bool
}
// Register registers the synchronization override service into the node
// stack for launching and stopping the service controlled by node.
func Register(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*Syncer, error) {
s := &Syncer{
stack: stack,
backend: backend,
target: target,
request: make(chan *syncReq),
closed: make(chan struct{}),
exitWhenSynced: exitWhenSynced,
}
stack.RegisterAPIs(s.APIs())
stack.RegisterLifecycle(s)
return s, nil
}
// APIs return the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *Syncer) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "debug",
Service: NewAPI(s),
},
}
}
// run is the main loop that monitors sync requests from users and initiates
// sync operations when necessary. It also checks whether the specified target
// has been reached and shuts down Geth if requested by the user.
func (s *Syncer) run() {
defer s.wg.Done()
var (
target *types.Header
ticker = time.NewTicker(time.Second * 5)
)
for {
select {
case req := <-s.request:
var (
resync bool
retries int
logged bool
)
for {
if retries >= 10 {
req.errc <- fmt.Errorf("sync target is not avaibale, %x", req.hash)
break
}
select {
case <-s.closed:
req.errc <- errors.New("syncer closed")
return
default:
}
header, err := s.backend.Downloader().GetHeader(req.hash)
if err != nil {
if !logged {
logged = true
log.Info("Waiting for peers to retrieve sync target", "hash", req.hash)
}
time.Sleep(time.Second * time.Duration(retries+1))
retries++
continue
}
if target != nil && header.Number.Cmp(target.Number) <= 0 {
req.errc <- fmt.Errorf("stale sync target, current: %d, received: %d", target.Number, header.Number)
break
}
target = header
resync = true
break
}
if resync {
req.errc <- s.backend.Downloader().BeaconDevSync(ethconfig.FullSync, target)
}
case <-ticker.C:
if target == nil || !s.exitWhenSynced {
continue
}
if block := s.backend.BlockChain().GetBlockByHash(target.Hash()); block != nil {
log.Info("Sync target reached", "number", block.NumberU64(), "hash", block.Hash())
go s.stack.Close() // async since we need to close ourselves
return
}
case <-s.closed:
return
}
}
}
// Start launches the synchronization service.
func (s *Syncer) Start() error {
s.wg.Add(1)
go s.run()
if s.target == (common.Hash{}) {
return nil
}
return s.Sync(s.target)
}
// Stop terminates the synchronization service and stop all background activities.
// This function can only be called for one time.
func (s *Syncer) Stop() error {
close(s.closed)
s.wg.Wait()
return nil
}
// Sync sets the synchronization target. Notably, setting a target lower than the
// previous one is not allowed, as backward synchronization is not supported.
func (s *Syncer) Sync(hash common.Hash) error {
req := &syncReq{
hash: hash,
errc: make(chan error, 1),
}
select {
case s.request <- req:
return <-req.errc
case <-s.closed:
return errors.New("syncer is closed")
}
}
// API is the collection of synchronization service APIs for debugging the
// protocol.
type API struct {
s *Syncer
}
// NewAPI creates a new debug API instance.
func NewAPI(s *Syncer) *API {
return &API{s: s}
}
// Sync initiates a full sync to the target block hash.
func (api *API) Sync(target common.Hash) error {
return api.s.Sync(target)
}

View file

@ -65,7 +65,7 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit, GasPrice: vmctx.txCtx.GasPrice}), contract.Caller()) tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit, GasPrice: vmctx.txCtx.GasPrice}), contract.Caller())
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig()) tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
ret, err := evm.Interpreter().Run(contract, []byte{}, false) ret, err := evm.Run(contract, []byte{}, false)
tracer.OnExit(0, ret, startGas-contract.Gas, err, true) tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
// Rest gas assumes no refund // Rest gas assumes no refund
tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil) tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil)

View file

@ -52,7 +52,7 @@ func TestStoreCapture(t *testing.T) {
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)} contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
var index common.Hash var index common.Hash
logger.OnTxStart(evm.GetVMContext(), nil, common.Address{}) logger.OnTxStart(evm.GetVMContext(), nil, common.Address{})
_, err := evm.Interpreter().Run(contract, []byte{}, false) _, err := evm.Run(contract, []byte{}, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -52,7 +52,7 @@ func simTestBackend(testAddr common.Address) *Backend {
) )
} }
func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) { func newBlobTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client() client := sim.Client()
testBlob := &kzg4844.Blob{0x00} testBlob := &kzg4844.Blob{0x00}
@ -67,12 +67,8 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background()) chainid, _ := client.ChainID(context.Background())
nonce, err := client.PendingNonceAt(context.Background(), addr)
if err != nil {
return nil, err
}
chainidU256, _ := uint256.FromBig(chainid) chainidU256, _ := uint256.FromBig(chainid)
tx := types.NewTx(&types.BlobTx{ tx := types.NewTx(&types.BlobTx{
ChainID: chainidU256, ChainID: chainidU256,
GasTipCap: gasTipCapU256, GasTipCap: gasTipCapU256,
@ -88,7 +84,7 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key) return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
} }
func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) { func newTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client() client := sim.Client()
// create a signed transaction to send // create a signed transaction to send
@ -96,10 +92,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei)) gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background()) chainid, _ := client.ChainID(context.Background())
nonce, err := client.PendingNonceAt(context.Background(), addr)
if err != nil {
return nil, err
}
tx := types.NewTx(&types.DynamicFeeTx{ tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainid, ChainID: chainid,
Nonce: nonce, Nonce: nonce,
@ -161,7 +154,7 @@ func TestSendTransaction(t *testing.T) {
client := sim.Client() client := sim.Client()
ctx := context.Background() ctx := context.Background()
signedTx, err := newTx(sim, testKey) signedTx, err := newTx(sim, testKey, 0)
if err != nil { if err != nil {
t.Errorf("could not create transaction: %v", err) t.Errorf("could not create transaction: %v", err)
} }
@ -252,7 +245,7 @@ func TestForkResendTx(t *testing.T) {
parent, _ := client.HeaderByNumber(ctx, nil) parent, _ := client.HeaderByNumber(ctx, nil)
// 2. // 2.
tx, err := newTx(sim, testKey) tx, err := newTx(sim, testKey, 0)
if err != nil { if err != nil {
t.Fatalf("could not create transaction: %v", err) t.Fatalf("could not create transaction: %v", err)
} }
@ -297,7 +290,7 @@ func TestCommitReturnValue(t *testing.T) {
} }
// Create a block in the original chain (containing a transaction to force different block hashes) // Create a block in the original chain (containing a transaction to force different block hashes)
tx, _ := newTx(sim, testKey) tx, _ := newTx(sim, testKey, 0)
if err := client.SendTransaction(ctx, tx); err != nil { if err := client.SendTransaction(ctx, tx); err != nil {
t.Errorf("sending transaction: %v", err) t.Errorf("sending transaction: %v", err)
} }

View file

@ -38,9 +38,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
defer sim.Close() defer sim.Close()
client := sim.Client() client := sim.Client()
btx0 := testSendSignedTx(t, testKey, sim, true) btx0 := testSendSignedTx(t, testKey, sim, true, 0)
tx0 := testSendSignedTx(t, testKey2, sim, false) tx0 := testSendSignedTx(t, testKey2, sim, false, 0)
tx1 := testSendSignedTx(t, testKey2, sim, false) tx1 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Rollback() sim.Rollback()
@ -48,9 +48,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
t.Fatalf("all transactions were not rolled back") t.Fatalf("all transactions were not rolled back")
} }
btx2 := testSendSignedTx(t, testKey, sim, true) btx2 := testSendSignedTx(t, testKey, sim, true, 0)
tx2 := testSendSignedTx(t, testKey2, sim, false) tx2 := testSendSignedTx(t, testKey2, sim, false, 0)
tx3 := testSendSignedTx(t, testKey2, sim, false) tx3 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Commit() sim.Commit()
@ -61,7 +61,7 @@ func TestTransactionRollbackBehavior(t *testing.T) {
// testSendSignedTx sends a signed transaction to the simulated backend. // testSendSignedTx sends a signed transaction to the simulated backend.
// It does not commit the block. // It does not commit the block.
func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool) *types.Transaction { func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool, nonce uint64) *types.Transaction {
t.Helper() t.Helper()
client := sim.Client() client := sim.Client()
ctx := context.Background() ctx := context.Background()
@ -71,9 +71,9 @@ func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobT
signedTx *types.Transaction signedTx *types.Transaction
) )
if isBlobTx { if isBlobTx {
signedTx, err = newBlobTx(sim, key) signedTx, err = newBlobTx(sim, key, nonce)
} else { } else {
signedTx, err = newTx(sim, key) signedTx, err = newTx(sim, key, nonce)
} }
if err != nil { if err != nil {
t.Fatalf("failed to create transaction: %v", err) t.Fatalf("failed to create transaction: %v", err)
@ -96,13 +96,13 @@ func pendingStateHasTx(client Client, tx *types.Transaction) bool {
) )
// Poll for receipt with timeout // Poll for receipt with timeout
deadline := time.Now().Add(2 * time.Second) deadline := time.Now().Add(200 * time.Millisecond)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
receipt, err = client.TransactionReceipt(ctx, tx.Hash()) receipt, err = client.TransactionReceipt(ctx, tx.Hash())
if err == nil && receipt != nil { if err == nil && receipt != nil {
break break
} }
time.Sleep(100 * time.Millisecond) time.Sleep(5 * time.Millisecond)
} }
if err != nil { if err != nil {

View file

@ -22,6 +22,7 @@ package leveldb
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -31,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors" lerrors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
@ -120,7 +121,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
// Open the db and recover any potential corruptions // Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, options) db, err := leveldb.OpenFile(file, options)
if _, corrupted := err.(*errors.ErrCorrupted); corrupted { if _, corrupted := err.(*lerrors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil) db, err = leveldb.RecoverFile(file, nil)
} }
if err != nil { if err != nil {
@ -548,7 +549,7 @@ func (r *replayer) DeleteRange(start, end []byte) {
if rangeDeleter, ok := r.writer.(ethdb.KeyValueRangeDeleter); ok { if rangeDeleter, ok := r.writer.(ethdb.KeyValueRangeDeleter); ok {
r.failure = rangeDeleter.DeleteRange(start, end) r.failure = rangeDeleter.DeleteRange(start, end)
} else { } else {
r.failure = fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange") r.failure = errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
} }
} }

View file

@ -20,7 +20,6 @@ package memorydb
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -327,7 +326,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err return err
} }
} else { } else {
return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange") return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
} }
} }
continue continue

View file

@ -18,6 +18,7 @@
package pebble package pebble
import ( import (
"errors"
"fmt" "fmt"
"runtime" "runtime"
"strings" "strings"
@ -705,7 +706,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err return err
} }
} else { } else {
return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange") return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
} }
} else { } else {
return fmt.Errorf("unhandled operation, keytype: %v", kind) return fmt.Errorf("unhandled operation, keytype: %v", kind)

2
go.mod
View file

@ -29,7 +29,7 @@ require (
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/gofrs/flock v0.12.1 github.com/gofrs/flock v0.12.1
github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0 github.com/google/gofuzz v1.2.0
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0

4
go.sum
View file

@ -148,8 +148,8 @@ github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=

View file

@ -31,6 +31,10 @@ import (
type precompileContract struct{} type precompileContract struct{}
func (p *precompileContract) Name() string {
panic("implement me")
}
func (p *precompileContract) RequiredGas(input []byte) uint64 { return 0 } func (p *precompileContract) RequiredGas(input []byte) uint64 { return 0 }
func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil } func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil }

View file

@ -468,6 +468,11 @@ web3._extend({
call: 'debug_getTrieFlushInterval', call: 'debug_getTrieFlushInterval',
params: 0 params: 0
}), }),
new web3._extend.Method({
name: 'sync',
call: 'debug_sync',
params: 1
}),
], ],
properties: [] properties: []
}); });

View file

@ -14,7 +14,7 @@ func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runt
// runtimeHistogram wraps a runtime/metrics histogram. // runtimeHistogram wraps a runtime/metrics histogram.
type runtimeHistogram struct { type runtimeHistogram struct {
v atomic.Value // v is a pointer to a metrics.Float64Histogram v atomic.Pointer[metrics.Float64Histogram]
scaleFactor float64 scaleFactor float64
} }
@ -58,7 +58,7 @@ func (h *runtimeHistogram) Update(int64) {
// Snapshot returns a non-changing copy of the histogram. // Snapshot returns a non-changing copy of the histogram.
func (h *runtimeHistogram) Snapshot() HistogramSnapshot { func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
hist := h.v.Load().(*metrics.Float64Histogram) hist := h.v.Load()
return newRuntimeHistogramSnapshot(hist) return newRuntimeHistogramSnapshot(hist)
} }

View file

@ -45,7 +45,7 @@ const (
// current process. Setting ENR entries via the Set method updates the record. A new version // current process. Setting ENR entries via the Set method updates the record. A new version
// of the record is signed on demand when the Node method is called. // of the record is signed on demand when the Node method is called.
type LocalNode struct { type LocalNode struct {
cur atomic.Value // holds a non-nil node pointer while the record is up-to-date cur atomic.Pointer[Node] // holds a non-nil node pointer while the record is up-to-date
id ID id ID
key *ecdsa.PrivateKey key *ecdsa.PrivateKey
@ -82,7 +82,7 @@ func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
} }
ln.seq = db.localSeq(ln.id) ln.seq = db.localSeq(ln.id)
ln.update = time.Now() ln.update = time.Now()
ln.cur.Store((*Node)(nil)) ln.cur.Store(nil)
return ln return ln
} }
@ -94,7 +94,7 @@ func (ln *LocalNode) Database() *DB {
// Node returns the current version of the local node record. // Node returns the current version of the local node record.
func (ln *LocalNode) Node() *Node { func (ln *LocalNode) Node() *Node {
// If we have a valid record, return that // If we have a valid record, return that
n := ln.cur.Load().(*Node) n := ln.cur.Load()
if n != nil { if n != nil {
return n return n
} }
@ -105,7 +105,7 @@ func (ln *LocalNode) Node() *Node {
// Double check the current record, since multiple goroutines might be waiting // Double check the current record, since multiple goroutines might be waiting
// on the write mutex. // on the write mutex.
if n = ln.cur.Load().(*Node); n != nil { if n = ln.cur.Load(); n != nil {
return n return n
} }
@ -121,7 +121,7 @@ func (ln *LocalNode) Node() *Node {
ln.sign() ln.sign()
ln.update = time.Now() ln.update = time.Now()
return ln.cur.Load().(*Node) return ln.cur.Load()
} }
// Seq returns the current sequence number of the local node record. // Seq returns the current sequence number of the local node record.
@ -276,11 +276,11 @@ func (e *lnEndpoint) get() (newIP net.IP, newPort uint16) {
} }
func (ln *LocalNode) invalidate() { func (ln *LocalNode) invalidate() {
ln.cur.Store((*Node)(nil)) ln.cur.Store(nil)
} }
func (ln *LocalNode) sign() { func (ln *LocalNode) sign() {
if n := ln.cur.Load().(*Node); n != nil { if n := ln.cur.Load(); n != nil {
return // no changes return // no changes
} }

View file

@ -148,9 +148,9 @@ func addErrorContext(err error, ctx string) error {
} }
var ( var (
decoderInterface = reflect.TypeOf(new(Decoder)).Elem() decoderInterface = reflect.TypeFor[Decoder]()
bigInt = reflect.TypeOf(big.Int{}) bigInt = reflect.TypeFor[big.Int]()
u256Int = reflect.TypeOf(uint256.Int{}) u256Int = reflect.TypeFor[uint256.Int]()
) )
func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) { func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) {
@ -512,7 +512,7 @@ func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tag
} }
} }
var ifsliceType = reflect.TypeOf([]interface{}{}) var ifsliceType = reflect.TypeFor[[]any]()
func decodeInterface(s *Stream, val reflect.Value) error { func decodeInterface(s *Stream, val reflect.Value) error {
if val.Type().NumMethod() != 0 { if val.Type().NumMethod() != 0 {

View file

@ -133,7 +133,7 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
return sizesize + 1 return sizesize + 1
} }
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem() var encoderInterface = reflect.TypeFor[Encoder]()
// makeWriter creates a writer function for the given type. // makeWriter creates a writer function for the given type.
func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) { func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {

View file

@ -26,7 +26,7 @@ import (
// not verify whether the content of RawValues is valid RLP. // not verify whether the content of RawValues is valid RLP.
type RawValue []byte type RawValue []byte
var rawValueType = reflect.TypeOf(RawValue{}) var rawValueType = reflect.TypeFor[RawValue]()
// StringSize returns the encoded size of a string. // StringSize returns the encoded size of a string.
func StringSize(s string) uint64 { func StringSize(s string) uint64 {

View file

@ -24,6 +24,7 @@ import (
"sort" "sort"
"github.com/ethereum/go-ethereum/rlp/internal/rlpstruct" "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
"golang.org/x/tools/go/packages"
) )
// buildContext keeps the data needed for make*Op. // buildContext keeps the data needed for make*Op.
@ -96,14 +97,20 @@ func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type {
// file and assigns unique names of temporary variables. // file and assigns unique names of temporary variables.
type genContext struct { type genContext struct {
inPackage *types.Package inPackage *types.Package
imports map[string]struct{} imports map[string]genImportPackage
tempCounter int tempCounter int
} }
type genImportPackage struct {
alias string
pkg *types.Package
}
func newGenContext(inPackage *types.Package) *genContext { func newGenContext(inPackage *types.Package) *genContext {
return &genContext{ return &genContext{
inPackage: inPackage, inPackage: inPackage,
imports: make(map[string]struct{}), imports: make(map[string]genImportPackage),
tempCounter: 0,
} }
} }
@ -117,32 +124,78 @@ func (ctx *genContext) resetTemp() {
ctx.tempCounter = 0 ctx.tempCounter = 0
} }
func (ctx *genContext) addImport(path string) { func (ctx *genContext) addImportPath(path string) {
if path == ctx.inPackage.Path() { pkg, err := ctx.loadPackage(path)
return // avoid importing the package that we're generating in. if err != nil {
panic(fmt.Sprintf("can't load package %q: %v", path, err))
} }
// TODO: renaming? ctx.addImport(pkg)
ctx.imports[path] = struct{}{}
} }
// importsList returns all packages that need to be imported. func (ctx *genContext) addImport(pkg *types.Package) string {
func (ctx *genContext) importsList() []string { if pkg.Path() == ctx.inPackage.Path() {
imp := make([]string, 0, len(ctx.imports)) return "" // avoid importing the package that we're generating in
for k := range ctx.imports {
imp = append(imp, k)
} }
sort.Strings(imp) if p, exists := ctx.imports[pkg.Path()]; exists {
return imp return p.alias
}
var (
baseName = pkg.Name()
alias = baseName
counter = 1
)
// If the base name conflicts with an existing import, add a numeric suffix.
for ctx.hasAlias(alias) {
alias = fmt.Sprintf("%s%d", baseName, counter)
counter++
}
ctx.imports[pkg.Path()] = genImportPackage{alias, pkg}
return alias
} }
// qualify is the types.Qualifier used for printing types. // hasAlias checks if an alias is already in use
func (ctx *genContext) hasAlias(alias string) bool {
for _, p := range ctx.imports {
if p.alias == alias {
return true
}
}
return false
}
// loadPackage attempts to load package information
func (ctx *genContext) loadPackage(path string) (*types.Package, error) {
cfg := &packages.Config{Mode: packages.NeedName}
pkgs, err := packages.Load(cfg, path)
if err != nil {
return nil, err
}
if len(pkgs) == 0 {
return nil, fmt.Errorf("no package found for path %s", path)
}
return types.NewPackage(path, pkgs[0].Name), nil
}
// qualify is the types.Qualifier used for printing types
func (ctx *genContext) qualify(pkg *types.Package) string { func (ctx *genContext) qualify(pkg *types.Package) string {
if pkg.Path() == ctx.inPackage.Path() { if pkg.Path() == ctx.inPackage.Path() {
return "" return ""
} }
ctx.addImport(pkg.Path()) return ctx.addImport(pkg)
// TODO: renaming? }
return pkg.Name()
// importsList returns all packages that need to be imported
func (ctx *genContext) importsList() []string {
imp := make([]string, 0, len(ctx.imports))
for path, p := range ctx.imports {
if p.alias == p.pkg.Name() {
imp = append(imp, fmt.Sprintf("%q", path))
} else {
imp = append(imp, fmt.Sprintf("%s %q", p.alias, path))
}
}
sort.Strings(imp)
return imp
} }
type op interface { type op interface {
@ -359,7 +412,7 @@ func (op uint256Op) genWrite(ctx *genContext, v string) string {
} }
func (op uint256Op) genDecode(ctx *genContext) (string, string) { func (op uint256Op) genDecode(ctx *genContext) (string, string) {
ctx.addImport("github.com/holiman/uint256") ctx.addImportPath("github.com/holiman/uint256")
var b bytes.Buffer var b bytes.Buffer
resultV := ctx.temp() resultV := ctx.temp()
@ -732,7 +785,7 @@ func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstru
// generateDecoder generates the DecodeRLP method on 'typ'. // generateDecoder generates the DecodeRLP method on 'typ'.
func generateDecoder(ctx *genContext, typ string, op op) []byte { func generateDecoder(ctx *genContext, typ string, op op) []byte {
ctx.resetTemp() ctx.resetTemp()
ctx.addImport(pathOfPackageRLP) ctx.addImportPath(pathOfPackageRLP)
result, code := op.genDecode(ctx) result, code := op.genDecode(ctx)
var b bytes.Buffer var b bytes.Buffer
@ -747,8 +800,8 @@ func generateDecoder(ctx *genContext, typ string, op op) []byte {
// generateEncoder generates the EncodeRLP method on 'typ'. // generateEncoder generates the EncodeRLP method on 'typ'.
func generateEncoder(ctx *genContext, typ string, op op) []byte { func generateEncoder(ctx *genContext, typ string, op op) []byte {
ctx.resetTemp() ctx.resetTemp()
ctx.addImport("io") ctx.addImportPath("io")
ctx.addImport(pathOfPackageRLP) ctx.addImportPath(pathOfPackageRLP)
var b bytes.Buffer var b bytes.Buffer
fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ) fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ)
@ -783,7 +836,7 @@ func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]b
var b bytes.Buffer var b bytes.Buffer
fmt.Fprintf(&b, "package %s\n\n", pkg.Name()) fmt.Fprintf(&b, "package %s\n\n", pkg.Name())
for _, imp := range ctx.importsList() { for _, imp := range ctx.importsList() {
fmt.Fprintf(&b, "import %q\n", imp) fmt.Fprintf(&b, "import %s\n", imp)
} }
if encoder { if encoder {
fmt.Fprintln(&b) fmt.Fprintln(&b)

View file

@ -47,7 +47,7 @@ func init() {
} }
} }
var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"} var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256", "pkgclash"}
func TestOutput(t *testing.T) { func TestOutput(t *testing.T) {
for _, test := range tests { for _, test := range tests {

13
rlp/rlpgen/testdata/pkgclash.in.txt vendored Normal file
View file

@ -0,0 +1,13 @@
// -*- mode: go -*-
package test
import (
eth1 "github.com/ethereum/go-ethereum/eth"
eth2 "github.com/ethereum/go-ethereum/eth/protocols/eth"
)
type Test struct {
A eth1.MinerAPI
B eth2.GetReceiptsPacket
}

82
rlp/rlpgen/testdata/pkgclash.out.txt vendored Normal file
View file

@ -0,0 +1,82 @@
package test
import "github.com/ethereum/go-ethereum/common"
import "github.com/ethereum/go-ethereum/eth"
import "github.com/ethereum/go-ethereum/rlp"
import "io"
import eth1 "github.com/ethereum/go-ethereum/eth/protocols/eth"
func (obj *Test) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
_tmp1 := w.List()
w.ListEnd(_tmp1)
_tmp2 := w.List()
w.WriteUint64(obj.B.RequestId)
_tmp3 := w.List()
for _, _tmp4 := range obj.B.GetReceiptsRequest {
w.WriteBytes(_tmp4[:])
}
w.ListEnd(_tmp3)
w.ListEnd(_tmp2)
w.ListEnd(_tmp0)
return w.Flush()
}
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
var _tmp0 Test
{
if _, err := dec.List(); err != nil {
return err
}
// A:
var _tmp1 eth.MinerAPI
{
if _, err := dec.List(); err != nil {
return err
}
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp0.A = _tmp1
// B:
var _tmp2 eth1.GetReceiptsPacket
{
if _, err := dec.List(); err != nil {
return err
}
// RequestId:
_tmp3, err := dec.Uint64()
if err != nil {
return err
}
_tmp2.RequestId = _tmp3
// GetReceiptsRequest:
var _tmp4 []common.Hash
if _, err := dec.List(); err != nil {
return err
}
for dec.MoreDataInList() {
var _tmp5 common.Hash
if err := dec.ReadBytes(_tmp5[:]); err != nil {
return err
}
_tmp4 = append(_tmp4, _tmp5)
}
if err := dec.ListEnd(); err != nil {
return err
}
_tmp2.GetReceiptsRequest = _tmp4
if err := dec.ListEnd(); err != nil {
return err
}
}
_tmp0.B = _tmp2
if err := dec.ListEnd(); err != nil {
return err
}
}
*obj = _tmp0
return nil
}

View file

@ -29,10 +29,10 @@ import (
) )
var ( var (
contextType = reflect.TypeOf((*context.Context)(nil)).Elem() contextType = reflect.TypeFor[context.Context]()
errorType = reflect.TypeOf((*error)(nil)).Elem() errorType = reflect.TypeFor[error]()
subscriptionType = reflect.TypeOf(Subscription{}) subscriptionType = reflect.TypeFor[Subscription]()
stringType = reflect.TypeOf("") stringType = reflect.TypeFor[string]()
) )
type serviceRegistry struct { type serviceRegistry struct {

View file

@ -151,7 +151,7 @@ func (args *SendTxArgs) ToTransaction() (*types.Transaction, error) {
al = *args.AccessList al = *args.AccessList
} }
if to == nil { if to == nil {
return nil, fmt.Errorf("transaction recipient must be set for blob transactions") return nil, errors.New("transaction recipient must be set for blob transactions")
} }
data = &types.BlobTx{ data = &types.BlobTx{
To: *to, To: *to,
@ -544,7 +544,7 @@ func parseBytes(encType interface{}) ([]byte, bool) {
// Handle array types. // Handle array types.
val := reflect.ValueOf(encType) val := reflect.ValueOf(encType)
if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 { if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 {
v := reflect.MakeSlice(reflect.TypeOf([]byte{}), val.Len(), val.Len()) v := reflect.ValueOf(make([]byte, val.Len()))
reflect.Copy(v, val) reflect.Copy(v, val)
return v.Bytes(), true return v.Bytes(), true
} }

View file

@ -17,6 +17,7 @@
package tests package tests
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
@ -43,7 +44,7 @@ type ttFork struct {
func (tt *TransactionTest) validate() error { func (tt *TransactionTest) validate() error {
if tt.Txbytes == nil { if tt.Txbytes == nil {
return fmt.Errorf("missing txbytes") return errors.New("missing txbytes")
} }
for name, fork := range tt.Result { for name, fork := range tt.Result {
if err := tt.validateFork(fork); err != nil { if err := tt.validateFork(fork); err != nil {
@ -58,10 +59,10 @@ func (tt *TransactionTest) validateFork(fork *ttFork) error {
return nil return nil
} }
if fork.Hash == nil && fork.Exception == nil { if fork.Hash == nil && fork.Exception == nil {
return fmt.Errorf("missing hash and exception") return errors.New("missing hash and exception")
} }
if fork.Hash != nil && fork.Sender == nil { if fork.Hash != nil && fork.Sender == nil {
return fmt.Errorf("missing sender") return errors.New("missing sender")
} }
return nil return nil
} }

View file

@ -17,6 +17,8 @@
package trie package trie
import ( import (
"bytes"
"fmt"
"sync" "sync"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
@ -54,7 +56,7 @@ func returnHasherToPool(h *hasher) {
} }
// hash collapses a node down into a hash node. // hash collapses a node down into a hash node.
func (h *hasher) hash(n node, force bool) node { func (h *hasher) hash(n node, force bool) []byte {
// Return the cached hash if it's available // Return the cached hash if it's available
if hash, _ := n.cache(); hash != nil { if hash, _ := n.cache(); hash != nil {
return hash return hash
@ -62,101 +64,110 @@ func (h *hasher) hash(n node, force bool) node {
// Trie not processed yet, walk the children // Trie not processed yet, walk the children
switch n := n.(type) { switch n := n.(type) {
case *shortNode: case *shortNode:
collapsed := h.hashShortNodeChildren(n) enc := h.encodeShortNode(n)
hashed := h.shortnodeToHash(collapsed, force) if len(enc) < 32 && !force {
if hn, ok := hashed.(hashNode); ok { // Nodes smaller than 32 bytes are embedded directly in their parent.
n.flags.hash = hn // In such cases, return the raw encoded blob instead of the node hash.
} else { // It's essential to deep-copy the node blob, as the underlying buffer
n.flags.hash = nil // of enc will be reused later.
buf := make([]byte, len(enc))
copy(buf, enc)
return buf
} }
return hashed hash := h.hashData(enc)
n.flags.hash = hash
return hash
case *fullNode: case *fullNode:
collapsed := h.hashFullNodeChildren(n) enc := h.encodeFullNode(n)
hashed := h.fullnodeToHash(collapsed, force) if len(enc) < 32 && !force {
if hn, ok := hashed.(hashNode); ok { // Nodes smaller than 32 bytes are embedded directly in their parent.
n.flags.hash = hn // In such cases, return the raw encoded blob instead of the node hash.
} else { // It's essential to deep-copy the node blob, as the underlying buffer
n.flags.hash = nil // of enc will be reused later.
buf := make([]byte, len(enc))
copy(buf, enc)
return buf
} }
return hashed hash := h.hashData(enc)
default: n.flags.hash = hash
// Value and hash nodes don't have children, so they're left as were return hash
case hashNode:
// hash nodes don't have children, so they're left as were
return n return n
}
}
// hashShortNodeChildren returns a copy of the supplied shortNode, with its child
// being replaced by either the hash or an embedded node if the child is small.
func (h *hasher) hashShortNodeChildren(n *shortNode) *shortNode {
var collapsed shortNode
collapsed.Key = hexToCompact(n.Key)
switch n.Val.(type) {
case *fullNode, *shortNode:
collapsed.Val = h.hash(n.Val, false)
default: default:
collapsed.Val = n.Val panic(fmt.Errorf("unexpected node type, %T", n))
} }
return &collapsed
} }
// hashFullNodeChildren returns a copy of the supplied fullNode, with its child // encodeShortNode encodes the provided shortNode into the bytes. Notably, the
// being replaced by either the hash or an embedded node if the child is small. // return slice must be deep-copied explicitly, otherwise the underlying slice
func (h *hasher) hashFullNodeChildren(n *fullNode) *fullNode { // will be reused later.
var children [17]node func (h *hasher) encodeShortNode(n *shortNode) []byte {
// Encode leaf node
if hasTerm(n.Key) {
var ln leafNodeEncoder
ln.Key = hexToCompact(n.Key)
ln.Val = n.Val.(valueNode)
ln.encode(h.encbuf)
return h.encodedBytes()
}
// Encode extension node
var en extNodeEncoder
en.Key = hexToCompact(n.Key)
en.Val = h.hash(n.Val, false)
en.encode(h.encbuf)
return h.encodedBytes()
}
// fnEncoderPool is the pool for storing shared fullNode encoder to mitigate
// the significant memory allocation overhead.
var fnEncoderPool = sync.Pool{
New: func() interface{} {
var enc fullnodeEncoder
return &enc
},
}
// encodeFullNode encodes the provided fullNode into the bytes. Notably, the
// return slice must be deep-copied explicitly, otherwise the underlying slice
// will be reused later.
func (h *hasher) encodeFullNode(n *fullNode) []byte {
fn := fnEncoderPool.Get().(*fullnodeEncoder)
fn.reset()
if h.parallel { if h.parallel {
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < 16; i++ { for i := 0; i < 16; i++ {
if child := n.Children[i]; child != nil { if n.Children[i] == nil {
wg.Add(1) continue
go func(i int) {
hasher := newHasher(false)
children[i] = hasher.hash(child, false)
returnHasherToPool(hasher)
wg.Done()
}(i)
} else {
children[i] = nilValueNode
} }
wg.Add(1)
go func(i int) {
defer wg.Done()
h := newHasher(false)
fn.Children[i] = h.hash(n.Children[i], false)
returnHasherToPool(h)
}(i)
} }
wg.Wait() wg.Wait()
} else { } else {
for i := 0; i < 16; i++ { for i := 0; i < 16; i++ {
if child := n.Children[i]; child != nil { if child := n.Children[i]; child != nil {
children[i] = h.hash(child, false) fn.Children[i] = h.hash(child, false)
} else {
children[i] = nilValueNode
} }
} }
} }
if n.Children[16] != nil { if n.Children[16] != nil {
children[16] = n.Children[16] fn.Children[16] = n.Children[16].(valueNode)
} }
return &fullNode{flags: nodeFlag{}, Children: children} fn.encode(h.encbuf)
} fnEncoderPool.Put(fn)
// shortNodeToHash computes the hash of the given shortNode. The shortNode must return h.encodedBytes()
// first be collapsed, with its key converted to compact form. If the RLP-encoded
// node data is smaller than 32 bytes, the node itself is returned.
func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
n.encode(h.encbuf)
enc := h.encodedBytes()
if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
return h.hashData(enc)
}
// fullnodeToHash computes the hash of the given fullNode. If the RLP-encoded
// node data is smaller than 32 bytes, the node itself is returned.
func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
n.encode(h.encbuf)
enc := h.encodedBytes()
if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
return h.hashData(enc)
} }
// encodedBytes returns the result of the last encoding operation on h.encbuf. // encodedBytes returns the result of the last encoding operation on h.encbuf.
@ -175,9 +186,10 @@ func (h *hasher) encodedBytes() []byte {
return h.tmp return h.tmp
} }
// hashData hashes the provided data // hashData hashes the provided data. It is safe to modify the returned slice after
func (h *hasher) hashData(data []byte) hashNode { // the function returns.
n := make(hashNode, 32) func (h *hasher) hashData(data []byte) []byte {
n := make([]byte, 32)
h.sha.Reset() h.sha.Reset()
h.sha.Write(data) h.sha.Write(data)
h.sha.Read(n) h.sha.Read(n)
@ -192,20 +204,17 @@ func (h *hasher) hashDataTo(dst, data []byte) {
h.sha.Read(dst) h.sha.Read(dst)
} }
// proofHash is used to construct trie proofs, and returns the 'collapsed' // proofHash is used to construct trie proofs, returning the rlp-encoded node blobs.
// node (for later RLP encoding) as well as the hashed node -- unless the // Note, only resolved node (shortNode or fullNode) is expected for proofing.
// node is smaller than 32 bytes, in which case it will be returned as is. //
// This method does not do anything on value- or hash-nodes. // It is safe to modify the returned slice after the function returns.
func (h *hasher) proofHash(original node) (collapsed, hashed node) { func (h *hasher) proofHash(original node) []byte {
switch n := original.(type) { switch n := original.(type) {
case *shortNode: case *shortNode:
sn := h.hashShortNodeChildren(n) return bytes.Clone(h.encodeShortNode(n))
return sn, h.shortnodeToHash(sn, false)
case *fullNode: case *fullNode:
fn := h.hashFullNodeChildren(n) return bytes.Clone(h.encodeFullNode(n))
return fn, h.fullnodeToHash(fn, false)
default: default:
// Value and hash nodes don't have children, so they're left as were panic(fmt.Errorf("unexpected node type, %T", original))
return n, n
} }
} }

View file

@ -240,9 +240,9 @@ func (it *nodeIterator) LeafProof() [][]byte {
for i, item := range it.stack[:len(it.stack)-1] { for i, item := range it.stack[:len(it.stack)-1] {
// Gather nodes that end up as hash nodes (or the root) // Gather nodes that end up as hash nodes (or the root)
node, hashed := hasher.proofHash(item.node) enc := hasher.proofHash(item.node)
if _, ok := hashed.(hashNode); ok || i == 0 { if len(enc) >= 32 || i == 0 {
proofs = append(proofs, nodeToBytes(node)) proofs = append(proofs, enc)
} }
} }
return proofs return proofs

Some files were not shown because too many files have changed in this diff Show more