diff --git a/accounts/abi/abigen/bind.go b/accounts/abi/abigen/bind.go
index 56e5e214de..08624b04ba 100644
--- a/accounts/abi/abigen/bind.go
+++ b/accounts/abi/abigen/bind.go
@@ -33,6 +33,10 @@ import (
"github.com/ethereum/go-ethereum/log"
)
+var (
+ intRegex = regexp.MustCompile(`(u)?int([0-9]*)`)
+)
+
func isKeyWord(arg string) bool {
switch arg {
case "break":
@@ -299,7 +303,7 @@ func bindBasicType(kind abi.Type) string {
case abi.AddressTy:
return "common.Address"
case abi.IntTy, abi.UintTy:
- parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String())
+ parts := intRegex.FindStringSubmatch(kind.String())
switch parts[2] {
case "8", "16", "32", "64":
return fmt.Sprintf("%sint%s", parts[1], parts[2])
diff --git a/beacon/merkle/merkle.go b/beacon/merkle/merkle.go
index 30896f9b01..64dfadfab5 100644
--- a/beacon/merkle/merkle.go
+++ b/beacon/merkle/merkle.go
@@ -32,7 +32,7 @@ type Value [32]byte
// Values represent a series of merkle tree leaves/nodes.
type Values []Value
-var valueT = reflect.TypeOf(Value{})
+var valueT = reflect.TypeFor[Value]()
// UnmarshalJSON parses a merkle value in hex syntax.
func (m *Value) UnmarshalJSON(input []byte) error {
diff --git a/beacon/params/checkpoint_holesky.hex b/beacon/params/checkpoint_holesky.hex
index 740d7aba21..f4667305b4 100644
--- a/beacon/params/checkpoint_holesky.hex
+++ b/beacon/params/checkpoint_holesky.hex
@@ -1 +1 @@
-0xd60e5310c5d52ced44cfb13be4e9f22a1e6a6dc56964c3cccd429182d26d72d0
\ No newline at end of file
+0x4bae4b97deda095724560012cab1f80a5221ce0a37a4b5236d8ab63f595f29d9
\ No newline at end of file
diff --git a/beacon/params/checkpoint_hoodi.hex b/beacon/params/checkpoint_hoodi.hex
new file mode 100644
index 0000000000..2885d7c996
--- /dev/null
+++ b/beacon/params/checkpoint_hoodi.hex
@@ -0,0 +1 @@
+0x1bbf958008172591b6cbdb3d8d52e26998258e83d4bdb9eec10969d84519a6bd
\ No newline at end of file
diff --git a/beacon/params/checkpoint_mainnet.hex b/beacon/params/checkpoint_mainnet.hex
index 45f065ca15..417e69a24b 100644
--- a/beacon/params/checkpoint_mainnet.hex
+++ b/beacon/params/checkpoint_mainnet.hex
@@ -1 +1 @@
-0x02f0bb348b0d45f95a9b7e2bb5705768ad06548876cee03d880a2c9dabb1ff88
\ No newline at end of file
+0x2fe39a39b6f7cbd549e0f74d259de6db486005a65bd3bd92840dd6ce21d6f4c8
\ No newline at end of file
diff --git a/beacon/params/checkpoint_sepolia.hex b/beacon/params/checkpoint_sepolia.hex
index 3d1b2885b3..02faf72187 100644
--- a/beacon/params/checkpoint_sepolia.hex
+++ b/beacon/params/checkpoint_sepolia.hex
@@ -1 +1 @@
-0xa0dad451a230c01be6f2492980ec5bb412d8cf33351a75e8b172b5b84a5fd03a
\ No newline at end of file
+0x86686b2b366e24134e0e3969a9c5f3759f92e5d2b04785b42e22cc7d468c2107
\ No newline at end of file
diff --git a/beacon/params/networks.go b/beacon/params/networks.go
index 51f67e0c97..b35db34fd6 100644
--- a/beacon/params/networks.go
+++ b/beacon/params/networks.go
@@ -31,6 +31,9 @@ var checkpointSepolia string
//go:embed checkpoint_holesky.hex
var checkpointHolesky string
+//go:embed checkpoint_hoodi.hex
+var checkpointHoodi string
+
var (
MainnetLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
@@ -71,7 +74,7 @@ var (
HoodiLightConfig = (&ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"),
GenesisTime: 1742212800,
- Checkpoint: common.HexToHash(""),
+ Checkpoint: common.HexToHash(checkpointHoodi),
}).
AddFork("GENESIS", 0, common.FromHex("0x10000910")).
AddFork("ALTAIR", 0, common.FromHex("0x20000910")).
diff --git a/cmd/devp2p/internal/ethtest/conn.go b/cmd/devp2p/internal/ethtest/conn.go
index 4a7a2c76d8..5182d71ce1 100644
--- a/cmd/devp2p/internal/ethtest/conn.go
+++ b/cmd/devp2p/internal/ethtest/conn.go
@@ -129,7 +129,7 @@ func (c *Conn) Write(proto Proto, code uint64, msg any) error {
return err
}
-var errDisc error = fmt.Errorf("disconnect")
+var errDisc error = errors.New("disconnect")
// ReadEth reads an Eth sub-protocol wire message.
func (c *Conn) ReadEth() (any, error) {
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index b5a346c074..47d00761f3 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -19,6 +19,7 @@ package ethtest
import (
"context"
"crypto/rand"
+ "errors"
"fmt"
"reflect"
"sync"
@@ -1092,7 +1093,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return
}
if !readUntilDisconnect(conn) {
- errc <- fmt.Errorf("expected bad peer to be disconnected")
+ errc <- errors.New("expected bad peer to be disconnected")
return
}
stage3.Done()
@@ -1139,7 +1140,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
}
if req.GetPooledTransactionsRequest[0] != tx.Hash() {
- errc <- fmt.Errorf("requested unknown tx hash")
+ errc <- errors.New("requested unknown tx hash")
return
}
@@ -1149,7 +1150,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return
}
if readUntilDisconnect(conn) {
- errc <- fmt.Errorf("unexpected disconnect")
+ errc <- errors.New("unexpected disconnect")
return
}
close(errc)
diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go
index 963df6cdbc..de97d7a276 100644
--- a/cmd/devp2p/internal/v4test/discv4tests.go
+++ b/cmd/devp2p/internal/v4test/discv4tests.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
+ "github.com/ethereum/go-ethereum/p2p/enode"
)
const (
@@ -501,6 +502,36 @@ func FindnodeAmplificationWrongIP(t *utesting.T) {
}
}
+func ENRRequest(t *utesting.T) {
+ t.Log(`This test sends an ENRRequest packet and expects a response containing a valid ENR.`)
+
+ te := newTestEnv(Remote, Listen1, Listen2)
+ defer te.close()
+ bond(t, te)
+
+ req := &v4wire.ENRRequest{Expiration: futureExpiration()}
+ hash := te.send(te.l1, req)
+
+ response, _, err := te.read(te.l1)
+ if err != nil {
+ t.Fatal("read error:", err)
+ }
+ enrResp, ok := response.(*v4wire.ENRResponse)
+ if !ok {
+ t.Fatalf("expected ENRResponse packet, got %T", response)
+ }
+ if !bytes.Equal(enrResp.ReplyTok, hash) {
+ t.Errorf("wrong hash in response packet: got %x, want %x", enrResp.ReplyTok, hash)
+ }
+ node, err := enode.New(enode.ValidSchemes, &enrResp.Record)
+ if err != nil {
+ t.Errorf("invalid record in response: %v", err)
+ }
+ if node.ID() != te.remote.ID() {
+ t.Errorf("wrong node ID in response: got %v, want %v", node.ID(), te.remote.ID())
+ }
+}
+
var AllTests = []utesting.Test{
{Name: "Ping/Basic", Fn: BasicPing},
{Name: "Ping/WrongTo", Fn: PingWrongTo},
@@ -510,6 +541,7 @@ var AllTests = []utesting.Test{
{Name: "Ping/PastExpiration", Fn: PingPastExpiration},
{Name: "Ping/WrongPacketType", Fn: WrongPacketType},
{Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom},
+ {Name: "ENRRequest", Fn: ENRRequest},
{Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof},
{Name: "Findnode/BasicFindnode", Fn: BasicFindnode},
{Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors},
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 44e11dbf06..112d1a539b 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -716,7 +716,7 @@ func downloadEra(ctx *cli.Context) error {
case ctx.IsSet(utils.SepoliaFlag.Name):
network = "sepolia"
default:
- return fmt.Errorf("unsupported network, no known era1 checksums")
+ return errors.New("unsupported network, no known era1 checksums")
}
}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index d7c354ff9f..96bd715e88 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -262,14 +262,16 @@ func makeFullNode(ctx *cli.Context) *node.Node {
if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
}
- // Configure full-sync tester service if requested
+ // Configure synchronization override service
+ var synctarget common.Hash
if ctx.IsSet(utils.SyncTargetFlag.Name) {
hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
if len(hex) != common.HashLength {
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
}
- utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex), ctx.Bool(utils.ExitWhenSyncedFlag.Name))
+ synctarget = common.BytesToHash(hex)
}
+ utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name))
if ctx.IsSet(utils.DeveloperFlag.Name) {
// Start dev mode.
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index b86970651f..cbc1d925e4 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -49,10 +49,10 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
+ "github.com/ethereum/go-ethereum/eth/syncer"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/remotedb"
@@ -1997,10 +1997,14 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
return filterSystem
}
-// RegisterFullSyncTester adds the full-sync tester service into node.
-func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) {
- catalyst.RegisterFullSyncTester(stack, eth, target, exitWhenSynced)
- log.Info("Registered full-sync tester", "hash", target, "exitWhenSynced", exitWhenSynced)
+// RegisterSyncOverrideService adds the synchronization override service into node.
+func RegisterSyncOverrideService(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) {
+ if target != (common.Hash{}) {
+ log.Info("Registered sync override service", "hash", target, "exitWhenSynced", exitWhenSynced)
+ } else {
+ log.Info("Registered sync override service")
+ }
+ syncer.Register(stack, eth, target, exitWhenSynced)
}
// SetupMetrics configures the metrics system.
diff --git a/cmd/workload/testsuite.go b/cmd/workload/testsuite.go
index 39eeb8e3c2..25dc17a49e 100644
--- a/cmd/workload/testsuite.go
+++ b/cmd/workload/testsuite.go
@@ -18,7 +18,7 @@ package main
import (
"embed"
- "fmt"
+ "errors"
"io/fs"
"os"
@@ -97,7 +97,7 @@ type testConfig struct {
traceTestFile string
}
-var errPrunedHistory = fmt.Errorf("attempt to access pruned history")
+var errPrunedHistory = errors.New("attempt to access pruned history")
// validateHistoryPruneErr checks whether the given error is caused by access
// to history before the pruning threshold block (it is an rpc.Error with code 4444).
@@ -109,7 +109,7 @@ func validateHistoryPruneErr(err error, blockNum uint64, historyPruneBlock *uint
if err != nil {
if rpcErr, ok := err.(rpc.Error); ok && rpcErr.ErrorCode() == 4444 {
if historyPruneBlock != nil && blockNum > *historyPruneBlock {
- return fmt.Errorf("pruned history error returned after pruning threshold")
+ return errors.New("pruned history error returned after pruning threshold")
}
return errPrunedHistory
}
diff --git a/common/hexutil/hexutil.go b/common/hexutil/hexutil.go
index d3201850a8..d6b6b867f2 100644
--- a/common/hexutil/hexutil.go
+++ b/common/hexutil/hexutil.go
@@ -34,11 +34,10 @@ import (
"encoding/hex"
"fmt"
"math/big"
+ "math/bits"
"strconv"
)
-const uintBits = 32 << (uint64(^uint(0)) >> 63)
-
// Errors
var (
ErrEmptyString = &decError{"empty hex string"}
@@ -48,7 +47,7 @@ var (
ErrEmptyNumber = &decError{"hex string \"0x\""}
ErrLeadingZero = &decError{"hex number with leading zero digits"}
ErrUint64Range = &decError{"hex number > 64 bits"}
- ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
+ ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", bits.UintSize)}
ErrBig256Range = &decError{"hex number > 256 bits"}
)
diff --git a/common/hexutil/json.go b/common/hexutil/json.go
index e0ac98f52d..6b9f412078 100644
--- a/common/hexutil/json.go
+++ b/common/hexutil/json.go
@@ -28,11 +28,11 @@ import (
)
var (
- bytesT = reflect.TypeOf(Bytes(nil))
- bigT = reflect.TypeOf((*Big)(nil))
- uintT = reflect.TypeOf(Uint(0))
- uint64T = reflect.TypeOf(Uint64(0))
- u256T = reflect.TypeOf((*uint256.Int)(nil))
+ bytesT = reflect.TypeFor[Bytes]()
+ bigT = reflect.TypeFor[*Big]()
+ uintT = reflect.TypeFor[Uint]()
+ uint64T = reflect.TypeFor[Uint64]()
+ u256T = reflect.TypeFor[*uint256.Int]()
)
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
diff --git a/common/hexutil/json_test.go b/common/hexutil/json_test.go
index 7cca300951..a014438458 100644
--- a/common/hexutil/json_test.go
+++ b/common/hexutil/json_test.go
@@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"math/big"
+ "math/bits"
"testing"
"github.com/holiman/uint256"
@@ -384,7 +385,7 @@ func TestUnmarshalUint(t *testing.T) {
for _, test := range unmarshalUintTests {
var v Uint
err := json.Unmarshal([]byte(test.input), &v)
- if uintBits == 32 && test.wantErr32bit != nil {
+ if bits.UintSize == 32 && test.wantErr32bit != nil {
checkError(t, test.input, err, test.wantErr32bit)
continue
}
diff --git a/common/types.go b/common/types.go
index fdb25f1b34..db4de8bcbd 100644
--- a/common/types.go
+++ b/common/types.go
@@ -42,8 +42,8 @@ const (
)
var (
- hashT = reflect.TypeOf(Hash{})
- addressT = reflect.TypeOf(Address{})
+ hashT = reflect.TypeFor[Hash]()
+ addressT = reflect.TypeFor[Address]()
// MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
@@ -466,7 +466,7 @@ func isString(input []byte) bool {
// UnmarshalJSON parses a hash in hex syntax.
func (d *Decimal) UnmarshalJSON(input []byte) error {
if !isString(input) {
- return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(uint64(0))}
+ return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeFor[uint64]()}
}
if i, err := strconv.ParseUint(string(input[1:len(input)-1]), 10, 64); err == nil {
*d = Decimal(i)
diff --git a/core/blockchain.go b/core/blockchain.go
index d52990ec5a..0b92a94b6c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -682,7 +682,7 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil || freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned with unknown configuration", "tail", freezerTail)
- return fmt.Errorf("unexpected database tail")
+ return errors.New("unexpected database tail")
}
bc.historyPrunePoint.Store(predefinedPoint)
return nil
@@ -695,15 +695,15 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
// action to happen. So just tell them how to do it.
log.Error(fmt.Sprintf("Chain history mode is configured as %q, but database is not pruned.", bc.cfg.ChainHistoryMode.String()))
log.Error(fmt.Sprintf("Run 'geth prune-history' to prune pre-merge history."))
- return fmt.Errorf("history pruning requested via configuration")
+ return errors.New("history pruning requested via configuration")
}
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil {
log.Error("Chain history pruning is not supported for this network", "genesis", bc.genesisBlock.Hash())
- return fmt.Errorf("history pruning requested for unknown network")
+ return errors.New("history pruning requested for unknown network")
} else if freezerTail > 0 && freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned to unknown block", "tail", freezerTail)
- return fmt.Errorf("unexpected database tail")
+ return errors.New("unexpected database tail")
}
bc.historyPrunePoint.Store(predefinedPoint)
return nil
diff --git a/core/chain_makers.go b/core/chain_makers.go
index b2559495a1..af55716cca 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -540,8 +540,10 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
return block, b.receipts
}
+ sdb := state.NewDatabase(trdb, nil)
+
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), state.NewDatabase(trdb, nil))
+ statedb, err := state.New(parent.Root(), sdb)
if err != nil {
panic(err)
}
diff --git a/core/filtermaps/chain_view.go b/core/filtermaps/chain_view.go
index 7c48048ad9..35c5ed22a5 100644
--- a/core/filtermaps/chain_view.go
+++ b/core/filtermaps/chain_view.go
@@ -124,19 +124,12 @@ func (cv *ChainView) RawReceipts(number uint64) types.Receipts {
// SharedRange returns the block range shared by two chain views.
func (cv *ChainView) SharedRange(cv2 *ChainView) common.Range[uint64] {
- cv.lock.Lock()
- defer cv.lock.Unlock()
-
- if cv == nil || cv2 == nil || !cv.extendNonCanonical() || !cv2.extendNonCanonical() {
+ if cv == nil || cv2 == nil {
return common.Range[uint64]{}
}
- var sharedLen uint64
- for n := min(cv.headNumber+1-uint64(len(cv.hashes)), cv2.headNumber+1-uint64(len(cv2.hashes))); n <= cv.headNumber && n <= cv2.headNumber; n++ {
- h1, h2 := cv.blockHash(n), cv2.blockHash(n)
- if h1 != h2 || h1 == (common.Hash{}) {
- break
- }
- sharedLen = n + 1
+ sharedLen := min(cv.headNumber, cv2.headNumber) + 1
+ for sharedLen > 0 && cv.BlockId(sharedLen-1) != cv2.BlockId(sharedLen-1) {
+ sharedLen--
}
return common.NewRange(0, sharedLen)
}
diff --git a/core/filtermaps/checkpoints_holesky.json b/core/filtermaps/checkpoints_holesky.json
index a56611cc8e..481c71a114 100644
--- a/core/filtermaps/checkpoints_holesky.json
+++ b/core/filtermaps/checkpoints_holesky.json
@@ -18,5 +18,11 @@
{"blockNumber": 3221725, "blockId": "0xe771f897dece48b1583cc1d1d10de8015da57407eb1fdf239fdbe46eaab85143", "firstIndex": 1140850137},
{"blockNumber": 3357164, "blockId": "0x6252d0aa54c79623b0680069c88d7b5c47983f0d5c4845b6c811b8d9b5e8ff3c", "firstIndex": 1207959453},
{"blockNumber": 3447019, "blockId": "0xeb7d585e1e063f3cc05ed399fbf6c2df63c271f62f030acb804e9fb1e74b6dc1", "firstIndex": 1275067542},
-{"blockNumber": 3546397, "blockId": "0xdabdef7defa4281180a57c5af121877b82274f15ccf074ea0096146f4c246df2", "firstIndex": 1342176778}
+{"blockNumber": 3546397, "blockId": "0xdabdef7defa4281180a57c5af121877b82274f15ccf074ea0096146f4c246df2", "firstIndex": 1342176778},
+{"blockNumber": 3867885, "blockId": "0x8be069dd7a3e2ffb869ee164d11b28555233d2510b134ab9d5484fdae55d2225", "firstIndex": 1409285539},
+{"blockNumber": 3935446, "blockId": "0xc91a61bc215bbcccc3020c62e5c8153162df0d8bcc59813d74671b2d24903ed7", "firstIndex": 1476394742},
+{"blockNumber": 3989508, "blockId": "0xc85dec36a767e44237842ef51915944c2a49780c8c394a3aa6cfb013c99cf58b", "firstIndex": 1543503452},
+{"blockNumber": 4057078, "blockId": "0xccdb79f6705629cb6ab1667a1244938f60911236549143fcff23a3989213e67e", "firstIndex": 1610612030},
+{"blockNumber": 4126499, "blockId": "0x92f2ef21fc911e87e81e38373d5f2915587b9648a0ab3cf4fcfe3e5aaffe7b85", "firstIndex": 1677720416},
+{"blockNumber": 4239335, "blockId": "0x64fbd22965eb583a584552b7edb9b7ce26fb6aad247c1063d0d5a4d11cbcc58c", "firstIndex": 1744830176}
]
diff --git a/core/filtermaps/checkpoints_mainnet.json b/core/filtermaps/checkpoints_mainnet.json
index 70a08b1aaf..2ea065ddb7 100644
--- a/core/filtermaps/checkpoints_mainnet.json
+++ b/core/filtermaps/checkpoints_mainnet.json
@@ -267,5 +267,26 @@
{"blockNumber": 22168652, "blockId": "0x6ae43618c915e636794e2cc2d75dde9992766881c7405fe6479c045ed4bee57e", "firstIndex": 17850956277},
{"blockNumber": 22190975, "blockId": "0x9437121647899a4b7b84d67fbea7cc6ff967481c2eab4328ccd86e2cefe19420", "firstIndex": 17918066140},
{"blockNumber": 22234357, "blockId": "0x036030830134f9224160d5a0b62da35ec7813dc8855d554bd22e9d38545243ed", "firstIndex": 17985175075},
-{"blockNumber": 22276736, "blockId": "0x5ceb96d98aa1b4c1c2f2fa253ae9cdb1b04e0420c11bf795400e8762c0a1635c", "firstIndex": 18052284344}
+{"blockNumber": 22276736, "blockId": "0x5ceb96d98aa1b4c1c2f2fa253ae9cdb1b04e0420c11bf795400e8762c0a1635c", "firstIndex": 18052284344},
+{"blockNumber": 22321282, "blockId": "0x8a601ebf6a757020c6d43a978f0bd2c150c4acc1ffdd50c7ee88afc78b0c11f8", "firstIndex": 18119392242},
+{"blockNumber": 22349231, "blockId": "0xb751c026a92ba5be95ad7ea4e2729a175b0d0e11a4c108f47cab232b4715d1a2", "firstIndex": 18186501218},
+{"blockNumber": 22377469, "blockId": "0xa47916860a22f7e26761ec2d7f717410791bd3ed0237b2f6266750214c7bbf08", "firstIndex": 18253610249},
+{"blockNumber": 22422685, "blockId": "0x8beaee39603af55fad222730f556c840c41cd76a5eef0bad367ac94d3b86c7aa", "firstIndex": 18320716377},
+{"blockNumber": 22462378, "blockId": "0x6dba9c5d2949f5a6a072267b590e8b15e6fb157a0fc22719387f1fd6bfcd8d5d", "firstIndex": 18387828426},
+{"blockNumber": 22500185, "blockId": "0x2484c380df0a8f7edfdf8d917570d23fab8499aea80c35b6cf4e5fe1e34106e9", "firstIndex": 18454936227},
+{"blockNumber": 22539624, "blockId": "0xd418071906803d25afc3842a6a6468ad3b5fea27107b314ce4e2ccf08b478acf", "firstIndex": 18522044531},
+{"blockNumber": 22577021, "blockId": "0xff222982693f3ff60d2097822171f80a6ddd979080aeb7e995bfb1b973497c84", "firstIndex": 18589154438},
+{"blockNumber": 22614525, "blockId": "0x9868da1fea2ffca3f67e35570f02eb5707b27f6967ea4a109eb4ddbf24566efd", "firstIndex": 18656264174},
+{"blockNumber": 22652848, "blockId": "0x060a911da11ab0f1dda307f5196e622d23901d198925749e70ab58a439477c5a", "firstIndex": 18723372617},
+{"blockNumber": 22692432, "blockId": "0x6a937f2c283aba8c778c1f5ef340b225fd820f8a7dfa6f24f5fe541994f32f2d", "firstIndex": 18790480232},
+{"blockNumber": 22731200, "blockId": "0x00d57a9e7a2dad252436fe9f0382c6a8860d301a9f9ffe6d7ac64c82b95300f8", "firstIndex": 18857590076},
+{"blockNumber": 22769000, "blockId": "0xa48db20307c19c373ef2d31d85088ea14b8df0450491c31982504c87b04edbc0", "firstIndex": 18924699130},
+{"blockNumber": 22808126, "blockId": "0x1419c64ff003edca0586f1c8ec3063da5c54c57ff826cfb34bc866cc18949653", "firstIndex": 18991807807},
+{"blockNumber": 22845231, "blockId": "0x691f87217e61c5d7ae9ad53a44d30e1ab6b1cc3f2b689b9fbf7c38fbacacfe3e", "firstIndex": 19058917062},
+{"blockNumber": 22884189, "blockId": "0x7f102d44c0ea7803f5b0e1a98a6abf0e8383eb99fb114d6f7b4591753ce8bba3", "firstIndex": 19126024122},
+{"blockNumber": 22920923, "blockId": "0x04fe6179495016fc3fe56d8ef5311c360a5761a898262173849c3494fdd73d92", "firstIndex": 19193134595},
+{"blockNumber": 22958100, "blockId": "0xe38e0ff7b0c4065ca42ea577bc32f2566ca46f2ddeedcc4bc1f8fb00e7f26329", "firstIndex": 19260242424},
+{"blockNumber": 22988600, "blockId": "0x04ca74758b22e0ea54b8c992022ff21c16a2af9c45144c3b0f80de921a7eee82", "firstIndex": 19327351273},
+{"blockNumber": 23018392, "blockId": "0x61cc979b00bc97b48356f986a5b9ec997d674bc904c2a2e4b0f17de08e50b3bb", "firstIndex": 19394459627},
+{"blockNumber": 23048524, "blockId": "0x489de15d95739ede4ab15e8b5151d80d4dc85ae10e7be800b1a4723094a678df", "firstIndex": 19461570073}
]
diff --git a/core/filtermaps/checkpoints_sepolia.json b/core/filtermaps/checkpoints_sepolia.json
index 8d799daefd..234af955e8 100644
--- a/core/filtermaps/checkpoints_sepolia.json
+++ b/core/filtermaps/checkpoints_sepolia.json
@@ -68,5 +68,32 @@
{"blockNumber": 7911722, "blockId": "0x9a85e48e3135c97c51fc1786f2af0596c802e021b6c53cfca65a129cafcd23ed", "firstIndex": 4496287265},
{"blockNumber": 7960147, "blockId": "0xc9359cc76d7090e1c8a031108f0ab7a8935d971efd4325fe53612a1d99562f6f", "firstIndex": 4563402388},
{"blockNumber": 8030418, "blockId": "0x21867e68cd8327aed2da2601399d60f7f9e41dca4a4f2f9be982e5a2b9304a88", "firstIndex": 4630511616},
-{"blockNumber": 8087701, "blockId": "0x0fa8c8d7549cc9a8d308262706fe248efe759f8b63511efb1e7f3926e9af2dcb", "firstIndex": 4697614758}
+{"blockNumber": 8087701, "blockId": "0x0fa8c8d7549cc9a8d308262706fe248efe759f8b63511efb1e7f3926e9af2dcb", "firstIndex": 4697614758},
+{"blockNumber": 8149130, "blockId": "0x655ea638fd9e35cc25f4332f260d7bf98f4f6fa9a72e1bff861209f18659e94c", "firstIndex": 4764727744},
+{"blockNumber": 8208672, "blockId": "0xb5847a670dc3b6181f9e2e40e4218548048366d237a0d12e938b9879bc8cf800", "firstIndex": 4831837882},
+{"blockNumber": 8271345, "blockId": "0x96797214946f29093883b877ccb0f2a9f771a9a3db3794a642b5dcb781c4d194", "firstIndex": 4898942160},
+{"blockNumber": 8302858, "blockId": "0x6a5977b3382ca69a9e0412333f97b911c1f69f857d8f31dd0fc930980e24f2fc", "firstIndex": 4966054626},
+{"blockNumber": 8333618, "blockId": "0x2547294aa23b67c42adbdddfcf424b17a95c4ff0f352a6a2442c529cfb0c892a", "firstIndex": 5033163605},
+{"blockNumber": 8360582, "blockId": "0xf34f5dceb0ef22e0f782b56c12790472acc675997b9c45075bd4e18a9dacd03c", "firstIndex": 5100273631},
+{"blockNumber": 8387230, "blockId": "0x0fbea42e87620b5beeb76b67febc173847c54333d7dce9fa2f8f2a3fa9c8c22a", "firstIndex": 5167381673},
+{"blockNumber": 8414795, "blockId": "0x6c9c000cf5e35da3a7e9e1cd56147c8ce9b43a76d6de945675efd9dc03b628c9", "firstIndex": 5234477010},
+{"blockNumber": 8444749, "blockId": "0xba85f8c9abaddc34e2113eb49385667ba4b008168ae701f46aa7a7ce78c633a1", "firstIndex": 5301598562},
+{"blockNumber": 8474551, "blockId": "0x720866a40242f087dd25b6f0dd79224884f435b114a39e60c5669f5c942c78c1", "firstIndex": 5368707262},
+{"blockNumber": 8501310, "blockId": "0x2b6da233532c701202fb5ac67e005f7d3eb71f88a9fac10c25d24dd11ada05e5", "firstIndex": 5435803858},
+{"blockNumber": 8526970, "blockId": "0x005f9bbad0a10234129d09894d7fcf04bf1398d326510eedb4195808c282802d", "firstIndex": 5502926509},
+{"blockNumber": 8550412, "blockId": "0x37c9f3efc9f33cf62f590087c8c9ac70011883f75e648647a6fd0fec00ca627c", "firstIndex": 5570034950},
+{"blockNumber": 8573540, "blockId": "0x81cfb46a07be7c70bb8a0f76b03a4cd502f92032bea68ad7ba10e26351673000", "firstIndex": 5637137662},
+{"blockNumber": 8590416, "blockId": "0x5c223d58ef22d7b0dd8c498e8498da4787b5dc706681c2bc83849441f5d0922d", "firstIndex": 5704252906},
+{"blockNumber": 8616793, "blockId": "0x9043ce02742fb5ec43a696602867b7ce6003a95b36cd28a37eeb9785a46ad49f", "firstIndex": 5771357264},
+{"blockNumber": 8647290, "blockId": "0xd90115193764b0a33f3f2a719381b3ddbce2532607c72fb287a864eb391eeada", "firstIndex": 5838466144},
+{"blockNumber": 8673192, "blockId": "0x9bc92d340cccaf4c8c03372efc24eb92c5159106729de8d2e9e064f5568d082b", "firstIndex": 5905577457},
+{"blockNumber": 8700694, "blockId": "0xb3d656a173b962bc6825198e94a4974289db06a8998060bd0f5ee2044a7a7deb", "firstIndex": 5972679345},
+{"blockNumber": 8724533, "blockId": "0x253ffc6d77b88fe18736e4c313e9930341c444bc87b2ee22b26cfe8d9d0b178d", "firstIndex": 6039795829},
+{"blockNumber": 8743948, "blockId": "0x04eb66d0261705d31e629193148d0685058d7759ba5f95d2d38e412dbadb8256", "firstIndex": 6106901747},
+{"blockNumber": 8758378, "blockId": "0x64adf54e662d11db716610157da672c3d8b45f001dbce40a269871b86a84d026", "firstIndex": 6174011544},
+{"blockNumber": 8777722, "blockId": "0x0a7f9a956024b404c915e70b42221aa027b2dd715b0697f099dccefae0b9af97", "firstIndex": 6241124215},
+{"blockNumber": 8800154, "blockId": "0x411f90dc18f2bca31fa63615c2866c907bbac1fae8c06782cabfaf788efba665", "firstIndex": 6308233216},
+{"blockNumber": 8829725, "blockId": "0x5686f3a5eec1b070d0113c588f8f4a560d57ad96b8045cedb5c08bbadaa0273e", "firstIndex": 6375340033},
+{"blockNumber": 8858036, "blockId": "0x4f9b5d9fac9c6f6e2224f613cda12e8ab95d636774ce87489dce8a9f805ee2e5", "firstIndex": 6442450330},
+{"blockNumber": 8884811, "blockId": "0x9cf74f978872683802c065e72b5a5326fdad95f19733c34d927b575cd85fd0bd", "firstIndex": 6509559380}
]
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
index 4db366da82..7406a3f53a 100644
--- a/core/forkid/forkid.go
+++ b/core/forkid/forkid.go
@@ -241,9 +241,8 @@ func checksumToBytes(hash uint32) [4]byte {
// them, one for the block number based forks and the second for the timestamps.
func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection
- kind := reflect.TypeOf(params.ChainConfig{})
+ kind := reflect.TypeFor[params.ChainConfig]()
conf := reflect.ValueOf(config).Elem()
- x := uint64(0)
var (
forksByBlock []uint64
forksByTime []uint64
@@ -258,12 +257,12 @@ func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64
}
// Extract the fork rule block number or timestamp and aggregate it
- if field.Type == reflect.TypeOf(&x) {
+ if field.Type == reflect.TypeFor[*uint64]() {
if rule := conf.Field(i).Interface().(*uint64); rule != nil {
forksByTime = append(forksByTime, *rule)
}
}
- if field.Type == reflect.TypeOf(new(big.Int)) {
+ if field.Type == reflect.TypeFor[*big.Int]() {
if rule := conf.Field(i).Interface().(*big.Int); rule != nil {
forksByBlock = append(forksByBlock, rule.Uint64())
}
diff --git a/core/overlay/state_transition.go b/core/overlay/state_transition.go
new file mode 100644
index 0000000000..90b5c9431a
--- /dev/null
+++ b/core/overlay/state_transition.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package overlay
+
+import (
+ "bytes"
+ "encoding/gob"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// TransitionState is a structure that holds the progress markers of the
+// translation process.
+type TransitionState struct {
+ CurrentAccountAddress *common.Address // addresss of the last translated account
+ CurrentSlotHash common.Hash // hash of the last translated storage slot
+ CurrentPreimageOffset int64 // next byte to read from the preimage file
+ Started, Ended bool
+
+ // Mark whether the storage for an account has been processed. This is useful if the
+ // maximum number of leaves of the conversion is reached before the whole storage is
+ // processed.
+ StorageProcessed bool
+
+ BaseRoot common.Hash // hash of the last read-only MPT base tree
+}
+
+// InTransition returns true if the translation process is in progress.
+func (ts *TransitionState) InTransition() bool {
+ return ts != nil && ts.Started && !ts.Ended
+}
+
+// Transitioned returns true if the translation process has been completed.
+func (ts *TransitionState) Transitioned() bool {
+ return ts != nil && ts.Ended
+}
+
+// Copy returns a deep copy of the TransitionState object.
+func (ts *TransitionState) Copy() *TransitionState {
+ ret := &TransitionState{
+ Started: ts.Started,
+ Ended: ts.Ended,
+ CurrentSlotHash: ts.CurrentSlotHash,
+ CurrentPreimageOffset: ts.CurrentPreimageOffset,
+ StorageProcessed: ts.StorageProcessed,
+ }
+ if ts.CurrentAccountAddress != nil {
+ addr := *ts.CurrentAccountAddress
+ ret.CurrentAccountAddress = &addr
+ }
+ return ret
+}
+
+// LoadTransitionState retrieves the Verkle transition state associated with
+// the given state root hash from the database.
+func LoadTransitionState(db ethdb.KeyValueReader, root common.Hash, isVerkle bool) *TransitionState {
+ var ts *TransitionState
+
+ data, _ := rawdb.ReadVerkleTransitionState(db, root)
+
+ // if a state could be read from the db, attempt to decode it
+ if len(data) > 0 {
+ var (
+ newts TransitionState
+ buf = bytes.NewBuffer(data[:])
+ dec = gob.NewDecoder(buf)
+ )
+ // Decode transition state
+ err := dec.Decode(&newts)
+ if err != nil {
+ log.Error("failed to decode transition state", "err", err)
+ return nil
+ }
+ ts = &newts
+ }
+
+ // Fallback that should only happen before the transition
+ if ts == nil {
+ // Initialize the first transition state, with the "ended"
+ // field set to true if the database was created
+ // as a verkle database.
+ log.Debug("no transition state found, starting fresh", "is verkle", db)
+
+ // Start with a fresh state
+ ts = &TransitionState{Ended: isVerkle}
+ }
+ return ts
+}
diff --git a/core/rawdb/accessors_overlay.go b/core/rawdb/accessors_overlay.go
new file mode 100644
index 0000000000..364cc889d1
--- /dev/null
+++ b/core/rawdb/accessors_overlay.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+func ReadVerkleTransitionState(db ethdb.KeyValueReader, hash common.Hash) ([]byte, error) {
+ return db.Get(transitionStateKey(hash))
+}
+
+func WriteVerkleTransitionState(db ethdb.KeyValueWriter, hash common.Hash, state []byte) error {
+ return db.Put(transitionStateKey(hash), state)
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 2ebdf360b5..25cd20d164 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -604,7 +604,7 @@ var knownMetadataKeys = [][]byte{
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
- filterMapsRangeKey, headStateHistoryIndexKey,
+ filterMapsRangeKey, headStateHistoryIndexKey, VerkleTransitionStatePrefix,
}
// printChainMetadata prints out chain metadata to stderr.
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 388a08f243..72f9bd34ec 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -158,6 +158,9 @@ var (
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil)
+
+ // Verkle transition information
+ VerkleTransitionStatePrefix = []byte("verkle-transition-state-")
)
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
@@ -397,3 +400,8 @@ func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Has
binary.BigEndian.PutUint32(buf[:], blockID)
return append(append(append(StateHistoryStorageBlockPrefix, addressHash.Bytes()...), storageHash.Bytes()...), buf[:]...)
}
+
+// transitionStateKey = transitionStatusKey + hash
+func transitionStateKey(hash common.Hash) []byte {
+ return append(VerkleTransitionStatePrefix, hash.Bytes()...)
+}
diff --git a/core/state/access_list.go b/core/state/access_list.go
index a58c2b20ea..e3f1738864 100644
--- a/core/state/access_list.go
+++ b/core/state/access_list.go
@@ -145,10 +145,7 @@ func (al *accessList) Equal(other *accessList) bool {
// PrettyPrint prints the contents of the access list in a human-readable form
func (al *accessList) PrettyPrint() string {
out := new(strings.Builder)
- var sortedAddrs []common.Address
- for addr := range al.addresses {
- sortedAddrs = append(sortedAddrs, addr)
- }
+ sortedAddrs := slices.Collect(maps.Keys(al.addresses))
slices.SortFunc(sortedAddrs, common.Address.Cmp)
for _, addr := range sortedAddrs {
idx := al.addresses[addr]
diff --git a/core/state/database.go b/core/state/database.go
index 5fb198a629..b46e5d500d 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
@@ -151,17 +152,21 @@ type CachingDB struct {
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
pointCache *utils.PointCache
+
+ // Transition-specific fields
+ TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState]
}
// NewDatabase creates a state database with the provided data sources.
func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
return &CachingDB{
- disk: triedb.Disk(),
- triedb: triedb,
- snap: snap,
- codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- pointCache: utils.NewPointCache(pointCacheSize),
+ disk: triedb.Disk(),
+ triedb: triedb,
+ snap: snap,
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ pointCache: utils.NewPointCache(pointCacheSize),
+ TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000),
}
}
@@ -224,7 +229,13 @@ func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (ReaderWithSta
// OpenTrie opens the main account trie at a specific root hash.
func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {
- return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
+ ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle())
+ if ts.InTransition() {
+ panic("transition isn't supported yet")
+ }
+ if ts.Transitioned() {
+ return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
+ }
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil {
@@ -235,9 +246,6 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account.
func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
- // In the verkle case, there is only one tree. But the two-tree structure
- // is hardcoded in the codebase. So we need to return the same trie in this
- // case.
if db.triedb.IsVerkle() {
return self, nil
}
diff --git a/core/state/reader.go b/core/state/reader.go
index 4628f4d5db..f56a1bfae1 100644
--- a/core/state/reader.go
+++ b/core/state/reader.go
@@ -241,6 +241,7 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
if !db.IsVerkle() {
tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
} else {
+ // TODO @gballet determine the trie type (verkle or overlay) by transition state
tr, err = trie.NewVerkleTrie(root, db, cache)
}
if err != nil {
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index e4b396b990..004dd5298a 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -350,7 +350,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
}
if len(destructs) > 0 {
log.Warn("Incompatible legacy journal detected", "version", journalV0)
- return fmt.Errorf("incompatible legacy journal detected")
+ return errors.New("incompatible legacy journal detected")
}
}
if err := r.Decode(&accounts); err != nil {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e805885079..7aa6780cfa 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -258,7 +258,7 @@ func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common
}
func (s *StateDB) Logs() []*types.Log {
- var logs []*types.Log
+ logs := make([]*types.Log, 0, s.logSize)
for _, lgs := range s.logs {
logs = append(logs, lgs...)
}
diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go
index e63db39eba..3bb4955425 100644
--- a/core/state/transient_storage.go
+++ b/core/state/transient_storage.go
@@ -18,6 +18,7 @@ package state
import (
"fmt"
+ "maps"
"slices"
"strings"
@@ -70,19 +71,13 @@ func (t transientStorage) Copy() transientStorage {
// PrettyPrint prints the contents of the access list in a human-readable form
func (t transientStorage) PrettyPrint() string {
out := new(strings.Builder)
- var sortedAddrs []common.Address
- for addr := range t {
- sortedAddrs = append(sortedAddrs, addr)
- slices.SortFunc(sortedAddrs, common.Address.Cmp)
- }
+ sortedAddrs := slices.Collect(maps.Keys(t))
+ slices.SortFunc(sortedAddrs, common.Address.Cmp)
for _, addr := range sortedAddrs {
fmt.Fprintf(out, "%#x:", addr)
- var sortedKeys []common.Hash
storage := t[addr]
- for key := range storage {
- sortedKeys = append(sortedKeys, key)
- }
+ sortedKeys := slices.Collect(maps.Keys(storage))
slices.SortFunc(sortedKeys, common.Hash.Cmp)
for _, key := range sortedKeys {
fmt.Fprintf(out, " %X : %X\n", key, storage[key])
diff --git a/core/tracing/journal.go b/core/tracing/journal.go
index 8937d4c5ae..a402f1ac09 100644
--- a/core/tracing/journal.go
+++ b/core/tracing/journal.go
@@ -17,7 +17,7 @@
package tracing
import (
- "fmt"
+ "errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -39,14 +39,14 @@ type entry interface {
// WrapWithJournal wraps the given tracer with a journaling layer.
func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks == nil {
- return nil, fmt.Errorf("wrapping nil tracer")
+ return nil, errors.New("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil {
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
- return nil, fmt.Errorf("cannot have both OnNonceChange and OnNonceChangeV2")
+ return nil, errors.New("cannot have both OnNonceChange and OnNonceChangeV2")
}
// Create a new Hooks instance and copy all hooks
diff --git a/core/tracing/journal_test.go b/core/tracing/journal_test.go
index d9616a2ce8..99447e1e1d 100644
--- a/core/tracing/journal_test.go
+++ b/core/tracing/journal_test.go
@@ -293,7 +293,7 @@ func newTracerAllHooks() *tracerAllHooks {
t := &tracerAllHooks{hooksCalled: make(map[string]bool)}
// Initialize all hooks to false. We will use this to
// get total count of hooks.
- hooksType := reflect.TypeOf((*Hooks)(nil)).Elem()
+ hooksType := reflect.TypeFor[Hooks]()
for i := 0; i < hooksType.NumField(); i++ {
t.hooksCalled[hooksType.Field(i).Name] = false
}
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index 078af34864..948ecd14c3 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -1299,32 +1300,86 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
// GetBlobs returns a number of blobs and proofs for the given versioned hashes.
// This is a utility method for the engine API, enabling consensus clients to
// retrieve blobs from the pools directly instead of the network.
-func (p *BlobPool) GetBlobs(vhashes []common.Hash) []*types.BlobTxSidecar {
- sidecars := make([]*types.BlobTxSidecar, len(vhashes))
- for idx, vhash := range vhashes {
- // Retrieve the datastore item (in a short lock)
- p.lock.RLock()
- id, exists := p.lookup.storeidOfBlob(vhash)
- if !exists {
- p.lock.RUnlock()
- continue
- }
- data, err := p.store.Get(id)
- p.lock.RUnlock()
+func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
+ var (
+ blobs = make([]*kzg4844.Blob, len(vhashes))
+ commitments = make([]kzg4844.Commitment, len(vhashes))
+ proofs = make([][]kzg4844.Proof, len(vhashes))
- // After releasing the lock, try to fill any blobs requested
- if err != nil {
- log.Error("Tracked blob transaction missing from store", "id", id, "err", err)
- continue
- }
- item := new(types.Transaction)
- if err = rlp.DecodeBytes(data, item); err != nil {
- log.Error("Blobs corrupted for traced transaction", "id", id, "err", err)
- continue
- }
- sidecars[idx] = item.BlobTxSidecar()
+ indices = make(map[common.Hash][]int)
+ filled = make(map[common.Hash]struct{})
+ )
+ for i, h := range vhashes {
+ indices[h] = append(indices[h], i)
}
- return sidecars
+ for _, vhash := range vhashes {
+ // Skip duplicate vhash that was already resolved in a previous iteration
+ if _, ok := filled[vhash]; ok {
+ continue
+ }
+ // Retrieve the corresponding blob tx with the vhash
+ p.lock.RLock()
+ txID, exists := p.lookup.storeidOfBlob(vhash)
+ p.lock.RUnlock()
+ if !exists {
+ return nil, nil, nil, fmt.Errorf("blob with vhash %x is not found", vhash)
+ }
+ data, err := p.store.Get(txID)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Decode the blob transaction
+ tx := new(types.Transaction)
+ if err := rlp.DecodeBytes(data, tx); err != nil {
+ return nil, nil, nil, err
+ }
+ sidecar := tx.BlobTxSidecar()
+ if sidecar == nil {
+ return nil, nil, nil, fmt.Errorf("blob tx without sidecar %x", tx.Hash())
+ }
+ // Traverse the blobs in the transaction
+ for i, hash := range tx.BlobHashes() {
+ list, ok := indices[hash]
+ if !ok {
+ continue // non-interesting blob
+ }
+ var pf []kzg4844.Proof
+ switch version {
+ case types.BlobSidecarVersion0:
+ if sidecar.Version == types.BlobSidecarVersion0 {
+ pf = []kzg4844.Proof{sidecar.Proofs[i]}
+ } else {
+ proof, err := kzg4844.ComputeBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = []kzg4844.Proof{proof}
+ }
+ case types.BlobSidecarVersion1:
+ if sidecar.Version == types.BlobSidecarVersion0 {
+ cellProofs, err := kzg4844.ComputeCellProofs(&sidecar.Blobs[i])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = cellProofs
+ } else {
+ cellProofs, err := sidecar.CellProofsAt(i)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = cellProofs
+ }
+ }
+ for _, index := range list {
+ blobs[index] = &sidecar.Blobs[i]
+ commitments[index] = sidecar.Commitments[i]
+ proofs[index] = pf
+ }
+ filled[hash] = struct{}{}
+ }
+ }
+ return blobs, commitments, proofs, nil
}
// AvailableBlobs returns the number of blobs that are available in the subpool.
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 422c35f6d2..55eed86cff 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -26,6 +26,7 @@ import (
"math/big"
"os"
"path/filepath"
+ "reflect"
"sync"
"testing"
@@ -50,6 +51,7 @@ var (
testBlobCommits []kzg4844.Commitment
testBlobProofs []kzg4844.Proof
testBlobVHashes [][32]byte
+ testBlobIndices = make(map[[32]byte]int)
)
const testMaxBlobsPerBlock = 6
@@ -66,6 +68,7 @@ func init() {
testBlobProofs = append(testBlobProofs, testBlobProof)
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
+ testBlobIndices[testBlobVHash] = len(testBlobVHashes)
testBlobVHashes = append(testBlobVHashes, testBlobVHash)
}
}
@@ -216,7 +219,7 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
// makeMultiBlobTx is a utility method to construct a ramdom blob tx with
// certain number of blobs in its sidecar.
-func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, key *ecdsa.PrivateKey) *types.Transaction {
+func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, blobOffset int, key *ecdsa.PrivateKey, version byte) *types.Transaction {
var (
blobs []kzg4844.Blob
blobHashes []common.Hash
@@ -224,10 +227,15 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
proofs []kzg4844.Proof
)
for i := 0; i < blobCount; i++ {
- blobs = append(blobs, *testBlobs[i])
- commitments = append(commitments, testBlobCommits[i])
- proofs = append(proofs, testBlobProofs[i])
- blobHashes = append(blobHashes, testBlobVHashes[i])
+ blobs = append(blobs, *testBlobs[blobOffset+i])
+ commitments = append(commitments, testBlobCommits[blobOffset+i])
+ if version == types.BlobSidecarVersion0 {
+ proofs = append(proofs, testBlobProofs[blobOffset+i])
+ } else {
+ cellProofs, _ := kzg4844.ComputeCellProofs(testBlobs[blobOffset+i])
+ proofs = append(proofs, cellProofs...)
+ }
+ blobHashes = append(blobHashes, testBlobVHashes[blobOffset+i])
}
blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
@@ -238,7 +246,7 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
BlobFeeCap: uint256.NewInt(blobFeeCap),
BlobHashes: blobHashes,
Value: uint256.NewInt(100),
- Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, blobs, commitments, proofs),
+ Sidecar: types.NewBlobTxSidecar(version, blobs, commitments, proofs),
}
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
@@ -396,35 +404,21 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
// whatever is in the pool, it can be retrieved correctly.
func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
// Collect all the blobs tracked by the pool
- known := make(map[common.Hash]struct{})
+ var (
+ hashes []common.Hash
+ known = make(map[common.Hash]struct{})
+ )
for _, txs := range pool.index {
for _, tx := range txs {
for _, vhash := range tx.vhashes {
known[vhash] = struct{}{}
}
+ hashes = append(hashes, tx.vhashes...)
}
}
- // Attempt to retrieve all test blobs
- hashes := make([]common.Hash, len(testBlobVHashes))
- for i := range testBlobVHashes {
- copy(hashes[i][:], testBlobVHashes[i][:])
- }
- sidecars := pool.GetBlobs(hashes)
- var blobs []*kzg4844.Blob
- var proofs []*kzg4844.Proof
- for idx, sidecar := range sidecars {
- if sidecar == nil {
- blobs = append(blobs, nil)
- proofs = append(proofs, nil)
- continue
- }
- blobHashes := sidecar.BlobHashes()
- for i, hash := range blobHashes {
- if hash == hashes[idx] {
- blobs = append(blobs, &sidecar.Blobs[i])
- proofs = append(proofs, &sidecar.Proofs[i])
- }
- }
+ blobs, _, proofs, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
+ if err != nil {
+ t.Fatal(err)
}
// Cross validate what we received vs what we wanted
if len(blobs) != len(hashes) || len(proofs) != len(hashes) {
@@ -434,13 +428,12 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
for i, hash := range hashes {
// If an item is missing, but shouldn't, error
if blobs[i] == nil || proofs[i] == nil {
- if _, ok := known[hash]; ok {
- t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
- }
+ t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
// Item retrieved, make sure it matches the expectation
- if *blobs[i] != *testBlobs[i] || *proofs[i] != testBlobProofs[i] {
+ index := testBlobIndices[hash]
+ if *blobs[i] != *testBlobs[index] || proofs[i][0] != testBlobProofs[index] {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
@@ -1071,9 +1064,9 @@ func TestChangingSlotterSize(t *testing.T) {
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
- tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1)
- tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, key2)
- tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, key3)
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
+ tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2)
@@ -1191,8 +1184,8 @@ func TestBlobCountLimit(t *testing.T) {
// Attempt to add transactions.
var (
- tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1)
- tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, key2)
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, 0, key2, types.BlobSidecarVersion0)
)
errs := pool.Add([]*types.Transaction{tx1, tx2}, true)
@@ -1675,6 +1668,181 @@ func TestAdd(t *testing.T) {
}
}
+func TestGetBlobs(t *testing.T) {
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+
+ // Create a temporary folder for the persistent backend
+ storage := t.TempDir()
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(params.BlobTxMaxBlobs), nil)
+
+ // Create transactions from a few accounts.
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) // [0, 6)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion1) // [6, 12)
+ tx3 = makeMultiBlobTx(0, 1, 800, 110, 6, 12, key3, types.BlobSidecarVersion0) // [12, 18)
+
+ blob1, _ = rlp.EncodeToBytes(tx1)
+ blob2, _ = rlp.EncodeToBytes(tx2)
+ blob3, _ = rlp.EncodeToBytes(tx3)
+ )
+
+ // Write the two safely sized txs to store. note: although the store is
+ // configured for a blob count of 6, it can also support around ~1mb of call
+ // data - all this to say that we aren't using the the absolute largest shelf
+ // available.
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
+ statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.Commit(0, true, false)
+
+ // Make custom chain config where the max blob count changes based on the loop variable.
+ cancunTime := uint64(0)
+ config := ¶ms.ChainConfig{
+ ChainID: big.NewInt(1),
+ LondonBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ CancunTime: &cancunTime,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: ¶ms.BlobConfig{
+ Target: 12,
+ Max: 24,
+ UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
+ },
+ },
+ }
+ chain := &testBlockChain{
+ config: config,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain, nil)
+ if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+
+ // Verify the regular three txs are always available.
+ if got := pool.Get(tx1.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
+ }
+ if got := pool.Get(tx2.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
+ }
+ if got := pool.Get(tx3.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx3.Hash(), addr3)
+ }
+
+ cases := []struct {
+ start int
+ limit int
+ version byte
+ expErr bool
+ }{
+ {
+ start: 0, limit: 6,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 0, limit: 6,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 3, limit: 9,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 3, limit: 9,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 3, limit: 15,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 3, limit: 15,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 0, limit: 18,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 0, limit: 18,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 18, limit: 20,
+ version: types.BlobSidecarVersion0,
+ expErr: true,
+ },
+ }
+ for i, c := range cases {
+ var vhashes []common.Hash
+ for j := c.start; j < c.limit; j++ {
+ vhashes = append(vhashes, testBlobVHashes[j])
+ }
+ blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version)
+
+ if c.expErr {
+ if err == nil {
+ t.Errorf("Unexpected return, want error for case %d", i)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for case %d, %v", i, err)
+ }
+ // Cross validate what we received vs what we wanted
+ length := c.limit - c.start
+ if len(blobs) != length || len(proofs) != length {
+ t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), length)
+ continue
+ }
+ for j := 0; j < len(blobs); j++ {
+ // If an item is missing, but shouldn't, error
+ if blobs[j] == nil || proofs[j] == nil {
+ t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
+ continue
+ }
+ // Item retrieved, make sure the blob matches the expectation
+ if *blobs[j] != *testBlobs[c.start+j] {
+ t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
+ continue
+ }
+ // Item retrieved, make sure the proof matches the expectation
+ if c.version == types.BlobSidecarVersion0 {
+ if proofs[j][0] != testBlobProofs[c.start+j] {
+ t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
+ }
+ } else {
+ want, _ := kzg4844.ComputeCellProofs(blobs[j])
+ if !reflect.DeepEqual(want, proofs[j]) {
+ t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
+ }
+ }
+ }
+ }
+ }
+
+ pool.Close()
+}
+
// fakeBilly is a billy.Database implementation which just drops data on the floor.
type fakeBilly struct {
billy.Database
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index 80ba994d1a..d4f3401086 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -145,7 +145,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
}
if tx.Type() == types.SetCodeTxType {
if len(tx.SetCodeAuthorizations()) == 0 {
- return fmt.Errorf("set code tx must have at least one authorization tuple")
+ return errors.New("set code tx must have at least one authorization tuple")
}
}
return nil
diff --git a/core/types/bal/bal_encoding.go b/core/types/bal/bal_encoding.go
index d7d08801b1..24dfafa083 100644
--- a/core/types/bal/bal_encoding.go
+++ b/core/types/bal/bal_encoding.go
@@ -169,7 +169,7 @@ func (e *AccountAccess) validate() error {
// Convert code change
if len(e.Code) == 1 {
if len(e.Code[0].Code) > params.MaxCodeSize {
- return fmt.Errorf("code change contained oversized code")
+ return errors.New("code change contained oversized code")
}
}
return nil
diff --git a/core/types/block.go b/core/types/block.go
index b284fb3b16..da9614793a 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -128,7 +128,7 @@ func (h *Header) Hash() common.Hash {
return rlpHash(h)
}
-var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size())
+var headerSize = common.StorageSize(reflect.TypeFor[Header]().Size())
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
diff --git a/core/types/tx_setcode.go b/core/types/tx_setcode.go
index b8e38ef1f7..f2281d4ae7 100644
--- a/core/types/tx_setcode.go
+++ b/core/types/tx_setcode.go
@@ -89,7 +89,7 @@ type authorizationMarshaling struct {
// SignSetCode creates a signed the SetCode authorization.
func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAuthorization, error) {
- sighash := auth.sigHash()
+ sighash := auth.SigHash()
sig, err := crypto.Sign(sighash[:], prv)
if err != nil {
return SetCodeAuthorization{}, err
@@ -105,7 +105,8 @@ func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAutho
}, nil
}
-func (a *SetCodeAuthorization) sigHash() common.Hash {
+// SigHash returns the hash of SetCodeAuthorization for signing.
+func (a *SetCodeAuthorization) SigHash() common.Hash {
return prefixedRlpHash(0x05, []any{
a.ChainID,
a.Address,
@@ -115,7 +116,7 @@ func (a *SetCodeAuthorization) sigHash() common.Hash {
// Authority recovers the the authorizing account of an authorization.
func (a *SetCodeAuthorization) Authority() (common.Address, error) {
- sighash := a.sigHash()
+ sighash := a.SigHash()
if !crypto.ValidateSignatureValues(a.V, a.R.ToBig(), a.S.ToBig(), true) {
return common.Address{}, ErrInvalidSig
}
diff --git a/core/types/withdrawal.go b/core/types/withdrawal.go
index 6f99e53b56..2cf00195a0 100644
--- a/core/types/withdrawal.go
+++ b/core/types/withdrawal.go
@@ -49,7 +49,7 @@ type Withdrawals []*Withdrawal
// Len returns the length of s.
func (s Withdrawals) Len() int { return len(s) }
-var withdrawalSize = int(reflect.TypeOf(Withdrawal{}).Size())
+var withdrawalSize = int(reflect.TypeFor[Withdrawal]().Size())
func (s Withdrawals) Size() int {
return withdrawalSize * len(s)
diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go
index a89672e6e5..e200bf7f50 100644
--- a/core/verkle_witness_test.go
+++ b/core/verkle_witness_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
+ "fmt"
"math/big"
"slices"
"testing"
@@ -202,12 +203,15 @@ func TestProcessVerkle(t *testing.T) {
t.Log("verified verkle proof, inserting blocks into the chain")
+ for i, b := range chain {
+ fmt.Printf("%d %x\n", i, b.Root())
+ }
endnum, err := blockchain.InsertChain(chain)
if err != nil {
t.Fatalf("block %d imported with error: %v", endnum, err)
}
- for i := 0; i < 2; i++ {
+ for i := range 2 {
b := blockchain.GetBlockByNumber(uint64(i) + 1)
if b == nil {
t.Fatalf("expected block %d to be present in chain", i+1)
diff --git a/core/vm/analysis_legacy.go b/core/vm/analysis_legacy.go
index 38af9084ac..a445e2048e 100644
--- a/core/vm/analysis_legacy.go
+++ b/core/vm/analysis_legacy.go
@@ -25,16 +25,16 @@ const (
set7BitsMask = uint16(0b111_1111)
)
-// bitvec is a bit vector which maps bytes in a program.
+// BitVec is a bit vector which maps bytes in a program.
// An unset bit means the byte is an opcode, a set bit means
// it's data (i.e. argument of PUSHxx).
-type bitvec []byte
+type BitVec []byte
-func (bits bitvec) set1(pos uint64) {
+func (bits BitVec) set1(pos uint64) {
bits[pos/8] |= 1 << (pos % 8)
}
-func (bits bitvec) setN(flag uint16, pos uint64) {
+func (bits BitVec) setN(flag uint16, pos uint64) {
a := flag << (pos % 8)
bits[pos/8] |= byte(a)
if b := byte(a >> 8); b != 0 {
@@ -42,13 +42,13 @@ func (bits bitvec) setN(flag uint16, pos uint64) {
}
}
-func (bits bitvec) set8(pos uint64) {
+func (bits BitVec) set8(pos uint64) {
a := byte(0xFF << (pos % 8))
bits[pos/8] |= a
bits[pos/8+1] = ^a
}
-func (bits bitvec) set16(pos uint64) {
+func (bits BitVec) set16(pos uint64) {
a := byte(0xFF << (pos % 8))
bits[pos/8] |= a
bits[pos/8+1] = 0xFF
@@ -56,23 +56,23 @@ func (bits bitvec) set16(pos uint64) {
}
// codeSegment checks if the position is in a code segment.
-func (bits *bitvec) codeSegment(pos uint64) bool {
+func (bits *BitVec) codeSegment(pos uint64) bool {
return (((*bits)[pos/8] >> (pos % 8)) & 1) == 0
}
// codeBitmap collects data locations in code.
-func codeBitmap(code []byte) bitvec {
+func codeBitmap(code []byte) BitVec {
// The bitmap is 4 bytes longer than necessary, in case the code
// ends with a PUSH32, the algorithm will set bits on the
// bitvector outside the bounds of the actual code.
- bits := make(bitvec, len(code)/8+1+4)
+ bits := make(BitVec, len(code)/8+1+4)
return codeBitmapInternal(code, bits)
}
// codeBitmapInternal is the internal implementation of codeBitmap.
// It exists for the purpose of being able to run benchmark tests
// without dynamic allocations affecting the results.
-func codeBitmapInternal(code, bits bitvec) bitvec {
+func codeBitmapInternal(code, bits BitVec) BitVec {
for pc := uint64(0); pc < uint64(len(code)); {
op := OpCode(code[pc])
pc++
diff --git a/core/vm/analysis_legacy_test.go b/core/vm/analysis_legacy_test.go
index 471d2b4ffb..f84a4abc92 100644
--- a/core/vm/analysis_legacy_test.go
+++ b/core/vm/analysis_legacy_test.go
@@ -90,7 +90,7 @@ func BenchmarkJumpdestOpAnalysis(bench *testing.B) {
for i := range code {
code[i] = byte(op)
}
- bits := make(bitvec, len(code)/8+1+4)
+ bits := make(BitVec, len(code)/8+1+4)
b.ResetTimer()
for i := 0; i < b.N; i++ {
clear(bits)
diff --git a/core/vm/contract.go b/core/vm/contract.go
index 0eaa91d959..165ca833f8 100644
--- a/core/vm/contract.go
+++ b/core/vm/contract.go
@@ -31,8 +31,8 @@ type Contract struct {
caller common.Address
address common.Address
- jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis.
- analysis bitvec // Locally cached result of JUMPDEST analysis
+ jumpDests JumpDestCache // Aggregated result of JUMPDEST analysis.
+ analysis BitVec // Locally cached result of JUMPDEST analysis
Code []byte
CodeHash common.Hash
@@ -47,15 +47,15 @@ type Contract struct {
}
// NewContract returns a new contract environment for the execution of EVM.
-func NewContract(caller common.Address, address common.Address, value *uint256.Int, gas uint64, jumpDests map[common.Hash]bitvec) *Contract {
- // Initialize the jump analysis map if it's nil, mostly for tests
+func NewContract(caller common.Address, address common.Address, value *uint256.Int, gas uint64, jumpDests JumpDestCache) *Contract {
+ // Initialize the jump analysis cache if it's nil, mostly for tests
if jumpDests == nil {
- jumpDests = make(map[common.Hash]bitvec)
+ jumpDests = newMapJumpDests()
}
return &Contract{
caller: caller,
address: address,
- jumpdests: jumpDests,
+ jumpDests: jumpDests,
Gas: gas,
value: value,
}
@@ -87,12 +87,12 @@ func (c *Contract) isCode(udest uint64) bool {
// contracts ( not temporary initcode), we store the analysis in a map
if c.CodeHash != (common.Hash{}) {
// Does parent context have the analysis?
- analysis, exist := c.jumpdests[c.CodeHash]
+ analysis, exist := c.jumpDests.Load(c.CodeHash)
if !exist {
// Do the analysis and save in parent context
// We do not need to store it in c.analysis
analysis = codeBitmap(c.Code)
- c.jumpdests[c.CodeHash] = analysis
+ c.jumpDests.Store(c.CodeHash, analysis)
}
// Also stash it in current contract for faster access
c.analysis = analysis
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index b65dff602c..21307ff5ac 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -515,7 +515,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
}
// enforce size cap for inputs
if c.eip7823 && max(baseLen, expLen, modLen) > 1024 {
- return nil, fmt.Errorf("one or more of base/exponent/modulus length exceeded 1024 bytes")
+ return nil, errors.New("one or more of base/exponent/modulus length exceeded 1024 bytes")
}
// Retrieve the operands and execute the exponentiation
var (
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 7764bd20b6..10ca1fe9ab 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -89,8 +89,8 @@ func enable1884(jt *JumpTable) {
}
}
-func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
+func opSelfBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
scope.Stack.push(balance)
return nil, nil
}
@@ -108,8 +108,8 @@ func enable1344(jt *JumpTable) {
}
// opChainID implements CHAINID opcode
-func opChainID(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- chainId, _ := uint256.FromBig(interpreter.evm.chainConfig.ChainID)
+func opChainID(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ chainId, _ := uint256.FromBig(evm.chainConfig.ChainID)
scope.Stack.push(chainId)
return nil, nil
}
@@ -199,28 +199,28 @@ func enable1153(jt *JumpTable) {
}
// opTload implements TLOAD opcode
-func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opTload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32())
- val := interpreter.evm.StateDB.GetTransientState(scope.Contract.Address(), hash)
+ val := evm.StateDB.GetTransientState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes())
return nil, nil
}
// opTstore implements TSTORE opcode
-func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opTstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
- interpreter.evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
+ evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil
}
// opBaseFee implements BASEFEE opcode
-func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee)
+func opBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ baseFee, _ := uint256.FromBig(evm.Context.BaseFee)
scope.Stack.push(baseFee)
return nil, nil
}
@@ -237,7 +237,7 @@ func enable3855(jt *JumpTable) {
}
// opPush0 implements the PUSH0 opcode
-func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush0(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int))
return nil, nil
}
@@ -263,7 +263,7 @@ func enable5656(jt *JumpTable) {
}
// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656)
-func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMcopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
dst = scope.Stack.pop()
src = scope.Stack.pop()
@@ -276,10 +276,10 @@ func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
}
// opBlobHash implements the BLOBHASH opcode
-func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBlobHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
index := scope.Stack.peek()
- if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) {
- blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()]
+ if index.LtUint64(uint64(len(evm.TxContext.BlobHashes))) {
+ blobHash := evm.TxContext.BlobHashes[index.Uint64()]
index.SetBytes32(blobHash[:])
} else {
index.Clear()
@@ -288,14 +288,14 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
}
// opBlobBaseFee implements BLOBBASEFEE opcode
-func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee)
+func opBlobBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ blobBaseFee, _ := uint256.FromBig(evm.Context.BlobBaseFee)
scope.Stack.push(blobBaseFee)
return nil, nil
}
// opCLZ implements the CLZ opcode (count leading zero bytes)
-func opCLZ(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCLZ(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
x.SetUint64(256 - uint64(x.BitLen()))
return nil, nil
@@ -342,7 +342,7 @@ func enable6780(jt *JumpTable) {
}
}
-func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeCopyEIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
stack = scope.Stack
a = stack.pop()
@@ -355,10 +355,10 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
uint64CodeOffset = math.MaxUint64
}
addr := common.Address(a.Bytes20())
- code := interpreter.evm.StateDB.GetCode(addr)
+ code := evm.StateDB.GetCode(addr)
paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64())
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -370,7 +370,7 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
// opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which
// need not worry about the adjusted bound logic when adding the PUSHDATA to
// the list of access events.
-func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush1EIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -383,8 +383,8 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
// touch next chunk if PUSH1 is at the boundary. if so, *pc has
// advanced past this boundary.
contractAddr := scope.Contract.Address()
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(wanted, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(wanted, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -396,7 +396,7 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
}
func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1))
@@ -411,8 +411,8 @@ func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
if !scope.Contract.IsDeployment && !scope.Contract.IsSystemCall {
contractAddr := scope.Contract.Address()
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index b45a434545..e360187f7b 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
@@ -95,6 +96,9 @@ type EVM struct {
// StateDB gives access to the underlying state
StateDB StateDB
+ // table holds the opcode specific handlers
+ table *JumpTable
+
// depth is the current call stack
depth int
@@ -107,10 +111,6 @@ type EVM struct {
// virtual machine configuration options used to initialise the evm
Config Config
- // global (to this context) ethereum virtual machine used throughout
- // the execution of the tx
- interpreter *EVMInterpreter
-
// abort is used to abort the EVM calling operations
abort atomic.Bool
@@ -122,9 +122,14 @@ type EVM struct {
// precompiles holds the precompiled contracts for the current epoch
precompiles map[common.Address]PrecompiledContract
- // jumpDests is the aggregated result of JUMPDEST analysis made through
- // the life cycle of EVM.
- jumpDests map[common.Hash]bitvec
+ // jumpDests stores results of JUMPDEST analysis.
+ jumpDests JumpDestCache
+
+ hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
+ hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
+
+ readOnly bool // Whether to throw on stateful modifications
+ returnData []byte // Last CALL's return data for subsequent reuse
}
// NewEVM constructs an EVM instance with the supplied block context, state
@@ -138,10 +143,58 @@ func NewEVM(blockCtx BlockContext, statedb StateDB, chainConfig *params.ChainCon
Config: config,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
- jumpDests: make(map[common.Hash]bitvec),
+ jumpDests: newMapJumpDests(),
+ hasher: crypto.NewKeccakState(),
}
evm.precompiles = activePrecompiledContracts(evm.chainRules)
- evm.interpreter = NewEVMInterpreter(evm)
+
+ switch {
+ case evm.chainRules.IsOsaka:
+ evm.table = &osakaInstructionSet
+ case evm.chainRules.IsVerkle:
+ // TODO replace with proper instruction set when fork is specified
+ evm.table = &verkleInstructionSet
+ case evm.chainRules.IsPrague:
+ evm.table = &pragueInstructionSet
+ case evm.chainRules.IsCancun:
+ evm.table = &cancunInstructionSet
+ case evm.chainRules.IsShanghai:
+ evm.table = &shanghaiInstructionSet
+ case evm.chainRules.IsMerge:
+ evm.table = &mergeInstructionSet
+ case evm.chainRules.IsLondon:
+ evm.table = &londonInstructionSet
+ case evm.chainRules.IsBerlin:
+ evm.table = &berlinInstructionSet
+ case evm.chainRules.IsIstanbul:
+ evm.table = &istanbulInstructionSet
+ case evm.chainRules.IsConstantinople:
+ evm.table = &constantinopleInstructionSet
+ case evm.chainRules.IsByzantium:
+ evm.table = &byzantiumInstructionSet
+ case evm.chainRules.IsEIP158:
+ evm.table = &spuriousDragonInstructionSet
+ case evm.chainRules.IsEIP150:
+ evm.table = &tangerineWhistleInstructionSet
+ case evm.chainRules.IsHomestead:
+ evm.table = &homesteadInstructionSet
+ default:
+ evm.table = &frontierInstructionSet
+ }
+ var extraEips []int
+ if len(evm.Config.ExtraEips) > 0 {
+ // Deep-copy jumptable to prevent modification of opcodes in other tables
+ evm.table = copyJumpTable(evm.table)
+ }
+ for _, eip := range evm.Config.ExtraEips {
+ if err := EnableEIP(eip, evm.table); err != nil {
+ // Disable it, so caller can check if it's activated or not
+ log.Error("EIP activation failed", "eip", eip, "error", err)
+ } else {
+ extraEips = append(extraEips, eip)
+ }
+ }
+ evm.Config.ExtraEips = extraEips
return evm
}
@@ -152,6 +205,11 @@ func (evm *EVM) SetPrecompiles(precompiles PrecompiledContracts) {
evm.precompiles = precompiles
}
+// SetJumpDestCache configures the analysis cache.
+func (evm *EVM) SetJumpDestCache(jumpDests JumpDestCache) {
+ evm.jumpDests = jumpDests
+}
+
// SetTxContext resets the EVM with a new transaction context.
// This is not threadsafe and should only be done very cautiously.
func (evm *EVM) SetTxContext(txCtx TxContext) {
@@ -172,11 +230,6 @@ func (evm *EVM) Cancelled() bool {
return evm.abort.Load()
}
-// Interpreter returns the current interpreter
-func (evm *EVM) Interpreter() *EVMInterpreter {
- return evm.interpreter
-}
-
func isSystemCall(caller common.Address) bool {
return caller == params.SystemAddress
}
@@ -241,7 +294,7 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
contract := NewContract(caller, addr, value, gas, evm.jumpDests)
contract.IsSystemCall = isSystemCall(caller)
contract.SetCallCode(evm.resolveCodeHash(addr), code)
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
}
@@ -300,7 +353,7 @@ func (evm *EVM) CallCode(caller common.Address, addr common.Address, input []byt
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
if err != nil {
@@ -344,7 +397,7 @@ func (evm *EVM) DelegateCall(originCaller common.Address, caller common.Address,
// Note: The value refers to the original value from the parent call.
contract := NewContract(originCaller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
if err != nil {
@@ -399,7 +452,7 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
- ret, err = evm.interpreter.Run(contract, input, true)
+ ret, err = evm.Run(contract, input, true)
gas = contract.Gas
}
if err != nil {
@@ -520,7 +573,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// initNewContract runs a new contract's creation code, performs checks on the
// resulting code that is to be deployed, and consumes necessary gas.
func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]byte, error) {
- ret, err := evm.interpreter.Run(contract, nil, false)
+ ret, err := evm.Run(contract, nil, false)
if err != nil {
return ret, err
}
@@ -563,7 +616,7 @@ func (evm *EVM) Create(caller common.Address, code []byte, gas uint64, value *ui
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller common.Address, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
- inithash := crypto.HashData(evm.interpreter.hasher, code)
+ inithash := crypto.HashData(evm.hasher, code)
contractAddr = crypto.CreateAddress2(caller, salt.Bytes32(), inithash[:])
return evm.create(caller, code, gas, endowment, contractAddr, CREATE2)
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 63bb6d2d51..fffa65fd6a 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -26,67 +26,67 @@ import (
"github.com/holiman/uint256"
)
-func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAdd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Add(&x, y)
return nil, nil
}
-func opSub(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSub(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Sub(&x, y)
return nil, nil
}
-func opMul(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMul(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mul(&x, y)
return nil, nil
}
-func opDiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opDiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Div(&x, y)
return nil, nil
}
-func opSdiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSdiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.SDiv(&x, y)
return nil, nil
}
-func opMod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mod(&x, y)
return nil, nil
}
-func opSmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.SMod(&x, y)
return nil, nil
}
-func opExp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
base, exponent := scope.Stack.pop(), scope.Stack.peek()
exponent.Exp(&base, exponent)
return nil, nil
}
-func opSignExtend(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSignExtend(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
back, num := scope.Stack.pop(), scope.Stack.peek()
num.ExtendSign(num, &back)
return nil, nil
}
-func opNot(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opNot(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
x.Not(x)
return nil, nil
}
-func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opLt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Lt(y) {
y.SetOne()
@@ -96,7 +96,7 @@ func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opGt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Gt(y) {
y.SetOne()
@@ -106,7 +106,7 @@ func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSlt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Slt(y) {
y.SetOne()
@@ -116,7 +116,7 @@ func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSgt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Sgt(y) {
y.SetOne()
@@ -126,7 +126,7 @@ func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opEq(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Eq(y) {
y.SetOne()
@@ -136,7 +136,7 @@ func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opIszero(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
if x.IsZero() {
x.SetOne()
@@ -146,37 +146,37 @@ func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
return nil, nil
}
-func opAnd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAnd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.And(&x, y)
return nil, nil
}
-func opOr(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opOr(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Or(&x, y)
return nil, nil
}
-func opXor(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opXor(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Xor(&x, y)
return nil, nil
}
-func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opByte(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
th, val := scope.Stack.pop(), scope.Stack.peek()
val.Byte(&th)
return nil, nil
}
-func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAddmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.AddMod(&x, &y, z)
return nil, nil
}
-func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMulmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.MulMod(&x, &y, z)
return nil, nil
@@ -185,7 +185,7 @@ func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
// opSHL implements Shift Left
// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the left by arg1 number of bits.
-func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSHL(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) {
@@ -199,7 +199,7 @@ func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSHR implements Logical Shift Right
// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
-func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSHR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) {
@@ -213,7 +213,7 @@ func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSAR implements Arithmetic Shift Right
// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
-func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSAR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.GtUint64(256) {
if value.Sign() >= 0 {
@@ -229,50 +229,49 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opKeccak256(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.peek()
data := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
- interpreter.hasher.Reset()
- interpreter.hasher.Write(data)
- interpreter.hasher.Read(interpreter.hasherBuf[:])
+ evm.hasher.Reset()
+ evm.hasher.Write(data)
+ evm.hasher.Read(evm.hasherBuf[:])
- evm := interpreter.evm
if evm.Config.EnablePreimageRecording {
- evm.StateDB.AddPreimage(interpreter.hasherBuf, data)
+ evm.StateDB.AddPreimage(evm.hasherBuf, data)
}
- size.SetBytes(interpreter.hasherBuf[:])
+ size.SetBytes(evm.hasherBuf[:])
return nil, nil
}
-func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAddress(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes()))
return nil, nil
}
-func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())
- slot.Set(interpreter.evm.StateDB.GetBalance(address))
+ slot.Set(evm.StateDB.GetBalance(address))
return nil, nil
}
-func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes()))
+func opOrigin(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetBytes(evm.Origin.Bytes()))
return nil, nil
}
-func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCaller(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes()))
return nil, nil
}
-func opCallValue(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallValue(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(scope.Contract.value)
return nil, nil
}
-func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataLoad(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
if offset, overflow := x.Uint64WithOverflow(); !overflow {
data := getData(scope.Contract.Input, offset, 32)
@@ -283,12 +282,12 @@ func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil
}
-func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input))))
return nil, nil
}
-func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop()
@@ -306,12 +305,12 @@ func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil
}
-func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData))))
+func opReturnDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(evm.returnData))))
return nil, nil
}
-func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opReturnDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop()
@@ -326,25 +325,25 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
var end = dataOffset
end.Add(&dataOffset, &length)
end64, overflow := end.Uint64WithOverflow()
- if overflow || uint64(len(interpreter.returnData)) < end64 {
+ if overflow || uint64(len(evm.returnData)) < end64 {
return nil, ErrReturnDataOutOfBounds
}
- scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64])
+ scope.Memory.Set(memOffset.Uint64(), length.Uint64(), evm.returnData[offset64:end64])
return nil, nil
}
-func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
- slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
+ slot.SetUint64(uint64(evm.StateDB.GetCodeSize(slot.Bytes20())))
return nil, nil
}
-func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code))))
return nil, nil
}
-func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
codeOffset = scope.Stack.pop()
@@ -360,7 +359,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil
}
-func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
stack = scope.Stack
a = stack.pop()
@@ -373,7 +372,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
uint64CodeOffset = math.MaxUint64
}
addr := common.Address(a.Bytes20())
- code := interpreter.evm.StateDB.GetCode(addr)
+ code := evm.StateDB.GetCode(addr)
codeCopy := getData(code, uint64CodeOffset, length.Uint64())
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
@@ -406,24 +405,24 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
//
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
// account should be regarded as a non-existent account and zero should be returned.
-func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())
- if interpreter.evm.StateDB.Empty(address) {
+ if evm.StateDB.Empty(address) {
slot.Clear()
} else {
- slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(address).Bytes())
+ slot.SetBytes(evm.StateDB.GetCodeHash(address).Bytes())
}
return nil, nil
}
-func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.GasPrice)
+func opGasprice(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.GasPrice)
scope.Stack.push(v)
return nil, nil
}
-func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBlockhash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
num := scope.Stack.peek()
num64, overflow := num.Uint64WithOverflow()
if overflow {
@@ -432,18 +431,18 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
}
var upper, lower uint64
- upper = interpreter.evm.Context.BlockNumber.Uint64()
+ upper = evm.Context.BlockNumber.Uint64()
if upper < 257 {
lower = 0
} else {
lower = upper - 256
}
if num64 >= lower && num64 < upper {
- res := interpreter.evm.Context.GetHash(num64)
- if witness := interpreter.evm.StateDB.Witness(); witness != nil {
+ res := evm.Context.GetHash(num64)
+ if witness := evm.StateDB.Witness(); witness != nil {
witness.AddBlockHash(num64)
}
- if tracer := interpreter.evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
+ if tracer := evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
tracer.OnBlockHashRead(num64, res)
}
num.SetBytes(res[:])
@@ -453,83 +452,83 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
return nil, nil
}
-func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes()))
+func opCoinbase(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetBytes(evm.Context.Coinbase.Bytes()))
return nil, nil
}
-func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time))
+func opTimestamp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.Time))
return nil, nil
}
-func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber)
+func opNumber(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.Context.BlockNumber)
scope.Stack.push(v)
return nil, nil
}
-func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty)
+func opDifficulty(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.Context.Difficulty)
scope.Stack.push(v)
return nil, nil
}
-func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v := new(uint256.Int).SetBytes(interpreter.evm.Context.Random.Bytes())
+func opRandom(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v := new(uint256.Int).SetBytes(evm.Context.Random.Bytes())
scope.Stack.push(v)
return nil, nil
}
-func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
+func opGasLimit(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.GasLimit))
return nil, nil
}
-func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.pop()
return nil, nil
}
-func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v := scope.Stack.peek()
offset := v.Uint64()
v.SetBytes(scope.Memory.GetPtr(offset, 32))
return nil, nil
}
-func opMstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
mStart, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.Set32(mStart.Uint64(), &val)
return nil, nil
}
-func opMstore8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMstore8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
off, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.store[off.Uint64()] = byte(val.Uint64())
return nil, nil
}
-func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32())
- val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
+ val := evm.StateDB.GetState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes())
return nil, nil
}
-func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
- interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
+ evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil
}
-func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.evm.abort.Load() {
+func opJump(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.abort.Load() {
return nil, errStopToken
}
pos := scope.Stack.pop()
@@ -540,8 +539,8 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
return nil, nil
}
-func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.evm.abort.Load() {
+func opJumpi(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.abort.Load() {
return nil, errStopToken
}
pos, cond := scope.Stack.pop(), scope.Stack.pop()
@@ -554,107 +553,107 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
return nil, nil
}
-func opJumpdest(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opJumpdest(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, nil
}
-func opPc(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPc(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(*pc))
return nil, nil
}
-func opMsize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMsize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len())))
return nil, nil
}
-func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opGas(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas))
return nil, nil
}
-func opSwap1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap1()
return nil, nil
}
-func opSwap2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap2()
return nil, nil
}
-func opSwap3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap3(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap3()
return nil, nil
}
-func opSwap4(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap4(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap4()
return nil, nil
}
-func opSwap5(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap5(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap5()
return nil, nil
}
-func opSwap6(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap6(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap6()
return nil, nil
}
-func opSwap7(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap7(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap7()
return nil, nil
}
-func opSwap8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap8()
return nil, nil
}
-func opSwap9(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap9(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap9()
return nil, nil
}
-func opSwap10(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap10(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap10()
return nil, nil
}
-func opSwap11(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap11(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap11()
return nil, nil
}
-func opSwap12(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap12(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap12()
return nil, nil
}
-func opSwap13(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap13(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap13()
return nil, nil
}
-func opSwap14(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap14(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap14()
return nil, nil
}
-func opSwap15(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap15(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap15()
return nil, nil
}
-func opSwap16(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap16(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap16()
return nil, nil
}
-func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opCreate(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
var (
@@ -663,21 +662,21 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
gas = scope.Contract.Gas
)
- if interpreter.evm.chainRules.IsEIP150 {
+ if evm.chainRules.IsEIP150 {
gas -= gas / 64
}
// reuse size int for stackvalue
stackvalue := size
- scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation)
+ scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation)
- res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract.Address(), input, gas, &value)
+ res, addr, returnGas, suberr := evm.Create(scope.Contract.Address(), input, gas, &value)
// Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only
// rule) and treat as an error, if the ruleset is frontier we must
// ignore this error and pretend the operation was successful.
- if interpreter.evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas {
+ if evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas {
stackvalue.Clear()
} else if suberr != nil && suberr != ErrCodeStoreOutOfGas {
stackvalue.Clear()
@@ -686,18 +685,18 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
}
scope.Stack.push(&stackvalue)
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted {
- interpreter.returnData = res // set REVERT data to return data buffer
+ evm.returnData = res // set REVERT data to return data buffer
return res, nil
}
- interpreter.returnData = nil // clear dirty return data buffer
+ evm.returnData = nil // clear dirty return data buffer
return nil, nil
}
-func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opCreate2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
var (
@@ -710,10 +709,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
// Apply EIP150
gas -= gas / 64
- scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation2)
+ scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation2)
// reuse size int for stackvalue
stackvalue := size
- res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract.Address(), input, gas,
+ res, addr, returnGas, suberr := evm.Create2(scope.Contract.Address(), input, gas,
&endowment, &salt)
// Push item on the stack based on the returned error.
if suberr != nil {
@@ -722,35 +721,35 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
stackvalue.SetBytes(addr.Bytes())
}
scope.Stack.push(&stackvalue)
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted {
- interpreter.returnData = res // set REVERT data to return data buffer
+ evm.returnData = res // set REVERT data to return data buffer
return res, nil
}
- interpreter.returnData = nil // clear dirty return data buffer
+ evm.returnData = nil // clear dirty return data buffer
return nil, nil
}
-func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack
- // Pop gas. The actual gas in interpreter.evm.callGasTemp.
+ // Pop gas. The actual gas in evm.callGasTemp.
// We can use this as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get the arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- if interpreter.readOnly && !value.IsZero() {
+ if evm.readOnly && !value.IsZero() {
return nil, ErrWriteProtection
}
if !value.IsZero() {
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.Call(scope.Contract.Address(), toAddr, args, gas, &value)
+ ret, returnGas, err := evm.Call(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil {
temp.Clear()
@@ -762,18 +761,18 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+func opCallCode(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
@@ -784,7 +783,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
+ ret, returnGas, err := evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil {
temp.Clear()
} else {
@@ -795,25 +794,25 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opDelegateCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+ // Pop gas. The actual gas is in evm.callGasTemp.
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value)
+ ret, returnGas, err := evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value)
if err != nil {
temp.Clear()
} else {
@@ -824,25 +823,25 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+func opStaticCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
+ ret, returnGas, err := evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
if err != nil {
temp.Clear()
} else {
@@ -853,69 +852,69 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opReturn(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
return ret, errStopToken
}
-func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opRevert(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, ErrExecutionReverted
}
-func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opUndefined(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])}
}
-func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, errStopToken
}
-func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
beneficiary := scope.Stack.pop()
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
- interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
- interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address())
- if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
+ evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
+ evm.StateDB.SelfDestruct(scope.Contract.Address())
+ if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
- tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
+ tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
}
if tracer.OnExit != nil {
- tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false)
+ tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
}
}
return nil, errStopToken
}
-func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
beneficiary := scope.Stack.pop()
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
- interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
- interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
- interpreter.evm.StateDB.SelfDestruct6780(scope.Contract.Address())
- if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
+ evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
+ evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
+ evm.StateDB.SelfDestruct6780(scope.Contract.Address())
+ if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
- tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
+ tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
}
if tracer.OnExit != nil {
- tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false)
+ tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
}
}
return nil, errStopToken
@@ -925,8 +924,8 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
// make log instruction function
func makeLog(size int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
topics := make([]common.Hash, size)
@@ -938,13 +937,13 @@ func makeLog(size int) executionFunc {
}
d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64())
- interpreter.evm.StateDB.AddLog(&types.Log{
+ evm.StateDB.AddLog(&types.Log{
Address: scope.Contract.Address(),
Topics: topics,
Data: d,
// This is a non-consensus field, but assigned here because
// core/state doesn't know the current block number.
- BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(),
+ BlockNumber: evm.Context.BlockNumber.Uint64(),
})
return nil, nil
@@ -952,7 +951,7 @@ func makeLog(size int) executionFunc {
}
// opPush1 is a specialized version of pushN
-func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -967,7 +966,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
}
// opPush2 is a specialized version of pushN
-func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -985,7 +984,7 @@ func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
// make push instruction function
func makePush(size uint64, pushByteSize int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1))
@@ -1005,7 +1004,7 @@ func makePush(size uint64, pushByteSize int) executionFunc {
// make dup instruction function
func makeDup(size int64) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.dup(int(size))
return nil, nil
}
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 8a82de5d8b..cd31829a7e 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -107,7 +107,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
stack.push(x)
stack.push(y)
- opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opFn(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data))
}
@@ -221,7 +221,7 @@ func TestAddMod(t *testing.T) {
stack.push(z)
stack.push(y)
stack.push(x)
- opAddmod(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opAddmod(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop()
if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual)
@@ -247,7 +247,7 @@ func TestWriteExpectedValues(t *testing.T) {
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
stack.push(x)
stack.push(y)
- opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opFn(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop()
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
}
@@ -296,7 +296,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
for _, arg := range intArgs {
stack.push(arg)
}
- op(&pc, evm.interpreter, scope)
+ op(&pc, evm, scope)
stack.pop()
}
bench.StopTimer()
@@ -528,13 +528,13 @@ func TestOpMstore(t *testing.T) {
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v)))
stack.push(new(uint256.Int))
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
}
stack.push(new(uint256.Int).SetUint64(0x1))
stack.push(new(uint256.Int))
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
t.Fatalf("Mstore failed to overwrite previous value")
}
@@ -555,7 +555,7 @@ func BenchmarkOpMstore(bench *testing.B) {
for i := 0; i < bench.N; i++ {
stack.push(value)
stack.push(memStart)
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
}
}
@@ -581,14 +581,14 @@ func TestOpTstore(t *testing.T) {
stack.push(new(uint256.Int).SetBytes(value))
// push the location to the stack
stack.push(new(uint256.Int))
- opTstore(&pc, evm.interpreter, &scopeContext)
+ opTstore(&pc, evm, &scopeContext)
// there should be no elements on the stack after TSTORE
if stack.len() != 0 {
t.Fatal("stack wrong size")
}
// push the location to the stack
stack.push(new(uint256.Int))
- opTload(&pc, evm.interpreter, &scopeContext)
+ opTload(&pc, evm, &scopeContext)
// there should be one element on the stack after TLOAD
if stack.len() != 1 {
t.Fatal("stack wrong size")
@@ -613,7 +613,7 @@ func BenchmarkOpKeccak256(bench *testing.B) {
for i := 0; i < bench.N; i++ {
stack.push(uint256.NewInt(32))
stack.push(start)
- opKeccak256(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opKeccak256(&pc, evm, &ScopeContext{mem, stack, nil})
}
}
@@ -707,7 +707,7 @@ func TestRandom(t *testing.T) {
stack = newstack()
pc = uint64(0)
)
- opRandom(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opRandom(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
}
@@ -749,7 +749,7 @@ func TestBlobHash(t *testing.T) {
)
evm.SetTxContext(TxContext{BlobHashes: tt.hashes})
stack.push(uint256.NewInt(tt.idx))
- opBlobHash(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opBlobHash(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
}
@@ -889,7 +889,7 @@ func TestOpMCopy(t *testing.T) {
mem.Resize(memorySize)
}
// Do the copy
- opMcopy(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMcopy(&pc, evm, &ScopeContext{mem, stack, nil})
want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
if have := mem.store; !bytes.Equal(want, have) {
t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)
@@ -1001,7 +1001,7 @@ func TestOpCLZ(t *testing.T) {
}
stack.push(val)
- opCLZ(&pc, evm.interpreter, &ScopeContext{Stack: stack})
+ opCLZ(&pc, evm, &ScopeContext{Stack: stack})
if gotLen := stack.len(); gotLen != 1 {
t.Fatalf("stack length = %d; want 1", gotLen)
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 34d19008da..a0637a6800 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
"github.com/holiman/uint256"
)
@@ -89,93 +87,27 @@ func (ctx *ScopeContext) ContractCode() []byte {
return ctx.Contract.Code
}
-// EVMInterpreter represents an EVM interpreter
-type EVMInterpreter struct {
- evm *EVM
- table *JumpTable
-
- hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
- hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
-
- readOnly bool // Whether to throw on stateful modifications
- returnData []byte // Last CALL's return data for subsequent reuse
-}
-
-// NewEVMInterpreter returns a new instance of the Interpreter.
-func NewEVMInterpreter(evm *EVM) *EVMInterpreter {
- // If jump table was not initialised we set the default one.
- var table *JumpTable
- switch {
- case evm.chainRules.IsOsaka:
- table = &osakaInstructionSet
- case evm.chainRules.IsVerkle:
- // TODO replace with proper instruction set when fork is specified
- table = &verkleInstructionSet
- case evm.chainRules.IsPrague:
- table = &pragueInstructionSet
- case evm.chainRules.IsCancun:
- table = &cancunInstructionSet
- case evm.chainRules.IsShanghai:
- table = &shanghaiInstructionSet
- case evm.chainRules.IsMerge:
- table = &mergeInstructionSet
- case evm.chainRules.IsLondon:
- table = &londonInstructionSet
- case evm.chainRules.IsBerlin:
- table = &berlinInstructionSet
- case evm.chainRules.IsIstanbul:
- table = &istanbulInstructionSet
- case evm.chainRules.IsConstantinople:
- table = &constantinopleInstructionSet
- case evm.chainRules.IsByzantium:
- table = &byzantiumInstructionSet
- case evm.chainRules.IsEIP158:
- table = &spuriousDragonInstructionSet
- case evm.chainRules.IsEIP150:
- table = &tangerineWhistleInstructionSet
- case evm.chainRules.IsHomestead:
- table = &homesteadInstructionSet
- default:
- table = &frontierInstructionSet
- }
- var extraEips []int
- if len(evm.Config.ExtraEips) > 0 {
- // Deep-copy jumptable to prevent modification of opcodes in other tables
- table = copyJumpTable(table)
- }
- for _, eip := range evm.Config.ExtraEips {
- if err := EnableEIP(eip, table); err != nil {
- // Disable it, so caller can check if it's activated or not
- log.Error("EIP activation failed", "eip", eip, "error", err)
- } else {
- extraEips = append(extraEips, eip)
- }
- }
- evm.Config.ExtraEips = extraEips
- return &EVMInterpreter{evm: evm, table: table, hasher: crypto.NewKeccakState()}
-}
-
// Run loops and evaluates the contract's code with the given input data and returns
// the return byte-slice and an error if one occurred.
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation except for
// ErrExecutionReverted which means revert-and-keep-gas-left.
-func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
+func (evm *EVM) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
// Increment the call depth which is restricted to 1024
- in.evm.depth++
- defer func() { in.evm.depth-- }()
+ evm.depth++
+ defer func() { evm.depth-- }()
// Make sure the readOnly is only set if we aren't in readOnly yet.
// This also makes sure that the readOnly flag isn't removed for child calls.
- if readOnly && !in.readOnly {
- in.readOnly = true
- defer func() { in.readOnly = false }()
+ if readOnly && !evm.readOnly {
+ evm.readOnly = true
+ defer func() { evm.readOnly = false }()
}
// Reset the previous call's return data. It's unimportant to preserve the old buffer
// as every returning call will return new data anyway.
- in.returnData = nil
+ evm.returnData = nil
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
@@ -184,7 +116,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
var (
op OpCode // current opcode
- jumpTable *JumpTable = in.table
+ jumpTable *JumpTable = evm.table
mem = NewMemory() // bound memory
stack = newstack() // local stack
callContext = &ScopeContext{
@@ -198,11 +130,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
pc = uint64(0) // program counter
cost uint64
// copies used by tracer
- pcCopy uint64 // needed for the deferred EVMLogger
- gasCopy uint64 // for EVMLogger to log gas remaining before execution
- logged bool // deferred EVMLogger should ignore already logged steps
- res []byte // result of the opcode execution function
- debug = in.evm.Config.Tracer != nil
+ pcCopy uint64 // needed for the deferred EVMLogger
+ gasCopy uint64 // for EVMLogger to log gas remaining before execution
+ logged bool // deferred EVMLogger should ignore already logged steps
+ res []byte // result of the opcode execution function
+ debug = evm.Config.Tracer != nil
+ isEIP4762 = evm.chainRules.IsEIP4762
)
// Don't move this deferred function, it's placed before the OnOpcode-deferred method,
// so that it gets executed _after_: the OnOpcode needs the stacks before
@@ -218,11 +151,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err == nil {
return
}
- if !logged && in.evm.Config.Tracer.OnOpcode != nil {
- in.evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err))
+ if !logged && evm.Config.Tracer.OnOpcode != nil {
+ evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
}
- if logged && in.evm.Config.Tracer.OnFault != nil {
- in.evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, in.evm.depth, VMErrorFromErr(err))
+ if logged && evm.Config.Tracer.OnFault != nil {
+ evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, evm.depth, VMErrorFromErr(err))
}
}()
}
@@ -237,12 +170,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
logged, pcCopy, gasCopy = false, pc, contract.Gas
}
- if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
+ if isEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
// if the PC ends up in a new "chunk" of verkleized code, charge the
// associated costs.
contractAddr := contract.Address()
- consumed, wanted := in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
- contract.UseGas(consumed, in.evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk)
+ consumed, wanted := evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
+ contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -287,7 +220,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method can get the proper cost
var dynamicCost uint64
- dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
+ dynamicCost, err = operation.dynamicGas(evm, contract, stack, mem, memorySize)
cost += dynamicCost // for tracing
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
@@ -302,11 +235,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Do tracing before potential memory expansion
if debug {
- if in.evm.Config.Tracer.OnGasChange != nil {
- in.evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode)
+ if evm.Config.Tracer.OnGasChange != nil {
+ evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode)
}
- if in.evm.Config.Tracer.OnOpcode != nil {
- in.evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err))
+ if evm.Config.Tracer.OnOpcode != nil {
+ evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
logged = true
}
}
@@ -315,7 +248,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
}
// execute the operation
- res, err = operation.execute(&pc, in, callContext)
+ res, err = operation.execute(&pc, evm, callContext)
if err != nil {
break
}
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 22eed8754f..d7a4d9da1d 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -23,7 +23,7 @@ import (
)
type (
- executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error)
+ executionFunc func(pc *uint64, evm *EVM, callContext *ScopeContext) ([]byte, error)
gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*Stack) (size uint64, overflow bool)
diff --git a/core/vm/jumpdests.go b/core/vm/jumpdests.go
new file mode 100644
index 0000000000..1a30c1943f
--- /dev/null
+++ b/core/vm/jumpdests.go
@@ -0,0 +1,47 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package vm
+
+import "github.com/ethereum/go-ethereum/common"
+
+// JumpDestCache represents the cache of jumpdest analysis results.
+type JumpDestCache interface {
+ // Load retrieves the cached jumpdest analysis for the given code hash.
+ // Returns the BitVec and true if found, or nil and false if not cached.
+ Load(codeHash common.Hash) (BitVec, bool)
+
+ // Store saves the jumpdest analysis for the given code hash.
+ Store(codeHash common.Hash, vec BitVec)
+}
+
+// mapJumpDests is the default implementation of JumpDests using a map.
+// This implementation is not thread-safe and is meant to be used per EVM instance.
+type mapJumpDests map[common.Hash]BitVec
+
+// newMapJumpDests creates a new map-based JumpDests implementation.
+func newMapJumpDests() JumpDestCache {
+ return make(mapJumpDests)
+}
+
+func (j mapJumpDests) Load(codeHash common.Hash) (BitVec, bool) {
+ vec, ok := j[codeHash]
+ return vec, ok
+}
+
+func (j mapJumpDests) Store(codeHash common.Hash, vec BitVec) {
+ j[codeHash] = vec
+}
diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go
index baf9c9655b..9da2386368 100644
--- a/crypto/kzg4844/kzg4844.go
+++ b/crypto/kzg4844/kzg4844.go
@@ -31,9 +31,9 @@ import (
var content embed.FS
var (
- blobT = reflect.TypeOf(Blob{})
- commitmentT = reflect.TypeOf(Commitment{})
- proofT = reflect.TypeOf(Proof{})
+ blobT = reflect.TypeFor[Blob]()
+ commitmentT = reflect.TypeFor[Commitment]()
+ proofT = reflect.TypeFor[Proof]()
CellProofsPerBlob = 128
)
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index f91896cc6e..038328d9ba 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -18,7 +18,6 @@
package catalyst
import (
- "crypto/sha256"
"errors"
"fmt"
"strconv"
@@ -31,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/version"
@@ -120,10 +118,13 @@ var caps = []string{
var (
// Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
+
// Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
+
// Number of times getBlobsV2 responded with “hit”
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
+
// Number of times getBlobsV2 responded with “miss”
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
)
@@ -494,29 +495,15 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
}
- var (
- res = make([]*engine.BlobAndProofV1, len(hashes))
- hasher = sha256.New()
- index = make(map[common.Hash]int)
- sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
- )
-
- for i, hash := range hashes {
- index[hash] = i
+ blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0)
+ if err != nil {
+ return nil, engine.InvalidParams.With(err)
}
- for i, sidecar := range sidecars {
- if res[i] != nil || sidecar == nil {
- // already filled
- continue
- }
- for cIdx, commitment := range sidecar.Commitments {
- computed := kzg4844.CalcBlobHashV1(hasher, &commitment)
- if idx, ok := index[computed]; ok {
- res[idx] = &engine.BlobAndProofV1{
- Blob: sidecar.Blobs[cIdx][:],
- Proof: sidecar.Proofs[cIdx][:],
- }
- }
+ res := make([]*engine.BlobAndProofV1, len(hashes))
+ for i := 0; i < len(blobs); i++ {
+ res[i] = &engine.BlobAndProofV1{
+ Blob: blobs[i][:],
+ Proof: proofs[i][0][:],
}
}
return res, nil
@@ -538,47 +525,19 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
}
getBlobsV2RequestHit.Inc(1)
- // pull up the blob hashes
- var (
- res = make([]*engine.BlobAndProofV2, len(hashes))
- index = make(map[common.Hash][]int)
- sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
- )
-
- for i, hash := range hashes {
- index[hash] = append(index[hash], i)
+ blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1)
+ if err != nil {
+ return nil, engine.InvalidParams.With(err)
}
- for i, sidecar := range sidecars {
- if res[i] != nil {
- // already filled
- continue
+ res := make([]*engine.BlobAndProofV2, len(hashes))
+ for i := 0; i < len(blobs); i++ {
+ var cellProofs []hexutil.Bytes
+ for _, proof := range proofs[i] {
+ cellProofs = append(cellProofs, proof[:])
}
- if sidecar == nil {
- // not found, return empty response
- return nil, nil
- }
- if sidecar.Version != types.BlobSidecarVersion1 {
- log.Info("GetBlobs queried V0 transaction: index %v, blobhashes %v", index, sidecar.BlobHashes())
- return nil, nil
- }
- blobHashes := sidecar.BlobHashes()
- for bIdx, hash := range blobHashes {
- if idxes, ok := index[hash]; ok {
- proofs, err := sidecar.CellProofsAt(bIdx)
- if err != nil {
- return nil, engine.InvalidParams.With(err)
- }
- var cellProofs []hexutil.Bytes
- for _, proof := range proofs {
- cellProofs = append(cellProofs, proof[:])
- }
- for _, idx := range idxes {
- res[idx] = &engine.BlobAndProofV2{
- Blob: sidecar.Blobs[bIdx][:],
- CellProofs: cellProofs,
- }
- }
- }
+ res[i] = &engine.BlobAndProofV2{
+ Blob: blobs[i][:],
+ CellProofs: cellProofs,
}
}
return res, nil
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index dc7967ba2e..ad377113b5 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -1497,7 +1497,7 @@ func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
}
}
if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
- return fmt.Errorf("withdrawals mismatch")
+ return errors.New("withdrawals mismatch")
}
return nil
}
diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go
deleted file mode 100644
index 10a480837e..0000000000
--- a/eth/catalyst/tester.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package catalyst
-
-import (
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/node"
-)
-
-// FullSyncTester is an auxiliary service that allows Geth to perform full sync
-// alone without consensus-layer attached. Users must specify a valid block hash
-// as the sync target.
-//
-// This tester can be applied to different networks, no matter it's pre-merge or
-// post-merge, but only for full-sync.
-type FullSyncTester struct {
- stack *node.Node
- backend *eth.Ethereum
- target common.Hash
- closed chan struct{}
- wg sync.WaitGroup
- exitWhenSynced bool
-}
-
-// RegisterFullSyncTester registers the full-sync tester service into the node
-// stack for launching and stopping the service controlled by node.
-func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*FullSyncTester, error) {
- cl := &FullSyncTester{
- stack: stack,
- backend: backend,
- target: target,
- closed: make(chan struct{}),
- exitWhenSynced: exitWhenSynced,
- }
- stack.RegisterLifecycle(cl)
- return cl, nil
-}
-
-// Start launches the beacon sync with provided sync target.
-func (tester *FullSyncTester) Start() error {
- tester.wg.Add(1)
- go func() {
- defer tester.wg.Done()
-
- // Trigger beacon sync with the provided block hash as trusted
- // chain head.
- err := tester.backend.Downloader().BeaconDevSync(ethconfig.FullSync, tester.target, tester.closed)
- if err != nil {
- log.Info("Failed to trigger beacon sync", "err", err)
- }
-
- ticker := time.NewTicker(time.Second * 5)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // Stop in case the target block is already stored locally.
- if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
- log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
-
- if tester.exitWhenSynced {
- go tester.stack.Close() // async since we need to close ourselves
- log.Info("Terminating the node")
- }
- return
- }
-
- case <-tester.closed:
- return
- }
- }
- }()
- return nil
-}
-
-// Stop stops the full-sync tester to stop all background activities.
-// This function can only be called for one time.
-func (tester *FullSyncTester) Stop() error {
- close(tester.closed)
- tester.wg.Wait()
- return nil
-}
diff --git a/eth/catalyst/witness.go b/eth/catalyst/witness.go
index 712539c5e3..703f1b0881 100644
--- a/eth/catalyst/witness.go
+++ b/eth/catalyst/witness.go
@@ -228,8 +228,8 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData,
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil executionRequests post-prague")
- case !api.checkFork(params.Timestamp, forks.Prague):
- return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV3 must only be called for cancun payloads")
+ case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka):
+ return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV4 must only be called for prague payloads")
}
requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index c98f9a2c3f..f97371de5f 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -200,7 +200,7 @@ func (s *SyncStatusSubscription) Unsubscribe() {
}
// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
-// The given channel must receive interface values, the result can either.
+// The given channel must receive interface values, the result can either be a SyncingResult or false.
func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription {
api.installSyncSubscription <- status
return &SyncStatusSubscription{api: api, c: status}
diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go
index 0032eb53b9..7b30684133 100644
--- a/eth/downloader/beacondevsync.go
+++ b/eth/downloader/beacondevsync.go
@@ -18,7 +18,6 @@ package downloader
import (
"errors"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,28 +33,14 @@ import (
// Note, this must not be used in live code. If the forkchcoice endpoint where
// to use this instead of giving us the payload first, then essentially nobody
// in the network would have the block yet that we'd attempt to retrieve.
-func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error {
+func (d *Downloader) BeaconDevSync(mode SyncMode, header *types.Header) error {
// Be very loud that this code should not be used in a live node
log.Warn("----------------------------------")
- log.Warn("Beacon syncing with hash as target", "hash", hash)
+ log.Warn("Beacon syncing with hash as target", "number", header.Number, "hash", header.Hash())
log.Warn("This is unhealthy for a live node!")
+ log.Warn("This is incompatible with the consensus layer!")
log.Warn("----------------------------------")
-
- log.Info("Waiting for peers to retrieve sync target")
- for {
- // If the node is going down, unblock
- select {
- case <-stop:
- return errors.New("stop requested")
- default:
- }
- header, err := d.GetHeader(hash)
- if err != nil {
- time.Sleep(time.Second)
- continue
- }
- return d.BeaconSync(mode, header, header)
- }
+ return d.BeaconSync(mode, header, header)
}
// GetHeader tries to retrieve the header with a given hash from a random peer.
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index dcda4e521c..09837a3045 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -199,7 +199,7 @@ type BlockChain interface {
// InsertChain inserts a batch of blocks into the local chain.
InsertChain(types.Blocks) (int, error)
- // InterruptInsert whether disables the chain insertion.
+ // InterruptInsert disables or enables chain insertion.
InterruptInsert(on bool)
// InsertReceiptChain inserts a batch of blocks along with their receipts
@@ -513,7 +513,7 @@ func (d *Downloader) syncToHead() (err error) {
//
// For non-merged networks, if there is a checkpoint available, then calculate
// the ancientLimit through that. Otherwise calculate the ancient limit through
- // the advertised height of the remote peer. This most is mostly a fallback for
+ // the advertised height of the remote peer. This is mostly a fallback for
// legacy networks, but should eventually be dropped. TODO(karalabe).
//
// Beacon sync, use the latest finalized block as the ancient limit
@@ -946,7 +946,7 @@ func (d *Downloader) processSnapSyncContent() error {
if !d.committed.Load() {
latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot
- // become stale in the network, and it was garbage collected, move to a
+ // became stale in the network, and it was garbage collected, move to a
// new pivot.
//
// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
@@ -1043,7 +1043,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state
first, last := results[0].Header, results[len(results)-1].Header
log.Debug("Inserting snap-sync blocks", "items", len(results),
"firstnum", first.Number, "firsthash", first.Hash(),
- "lastnumn", last.Number, "lasthash", last.Hash(),
+ "lastnum", last.Number, "lasthash", last.Hash(),
)
blocks := make([]*types.Block, len(results))
receipts := make([]rlp.RawValue, len(results))
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 669ce003cf..c1a31d6e1c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -544,7 +544,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
- t.Fatalf("failed to start beacon sync: #{err}")
+ t.Fatalf("failed to start beacon sync: %v", err)
}
select {
case <-complete:
diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go
index 4ebb9bbc98..6e5c65eb20 100644
--- a/eth/downloader/fetchers.go
+++ b/eth/downloader/fetchers.go
@@ -45,9 +45,6 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
defer timeoutTimer.Stop()
select {
- case <-d.cancelCh:
- return nil, nil, errCanceled
-
case <-timeoutTimer.C:
// Header retrieval timed out, update the metrics
p.log.Debug("Header request timed out", "elapsed", ttl)
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 97d23744a0..82c3c500a7 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -18,7 +18,7 @@
package ethconfig
import (
- "fmt"
+ "errors"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -154,7 +154,7 @@ type Config struct {
// RPCEVMTimeout is the global timeout for eth-call.
RPCEVMTimeout time.Duration
- // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for
+ // RPCTxFeeCap is the global transaction fee (price * gas limit) cap for
// send-transaction variants. The unit is ether.
RPCTxFeeCap float64
@@ -171,7 +171,7 @@ type Config struct {
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
if config.TerminalTotalDifficulty == nil {
log.Error("Geth only supports PoS networks. Please transition legacy networks using Geth v1.13.x.")
- return nil, fmt.Errorf("'terminalTotalDifficulty' is not set in genesis block")
+ return nil, errors.New("'terminalTotalDifficulty' is not set in genesis block")
}
// Wrap previously supported consensus engines into their post-merge counterpart
if config.Clique != nil {
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 232af23957..c929810a12 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -38,6 +38,8 @@ var (
errInvalidTopic = errors.New("invalid topic(s)")
errFilterNotFound = errors.New("filter not found")
errInvalidBlockRange = errors.New("invalid block range params")
+ errUnknownBlock = errors.New("unknown block")
+ errBlockHashWithRange = errors.New("can't specify fromBlock/toBlock with blockHash")
errPendingLogsUnsupported = errors.New("pending logs are not supported")
errExceedMaxTopics = errors.New("exceed max topics")
errExceedMaxAddresses = errors.New("exceed max addresses")
@@ -348,8 +350,13 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type
if len(crit.Addresses) > maxAddresses {
return nil, errExceedMaxAddresses
}
+
var filter *Filter
if crit.BlockHash != nil {
+ if crit.FromBlock != nil || crit.ToBlock != nil {
+ return nil, errBlockHashWithRange
+ }
+
// Block filter requested, construct a single-shot filter
filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics)
} else {
@@ -372,6 +379,7 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type
// Construct the range filter
filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics)
}
+
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index ada478ae1d..1a9918d0ee 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -85,7 +85,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
return nil, err
}
if header == nil {
- return nil, errors.New("unknown block")
+ return nil, errUnknownBlock
}
if header.Number.Uint64() < f.sys.backend.HistoryPruningCutoff() {
return nil, &history.PrunedHistoryError{}
@@ -456,7 +456,6 @@ func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.
// checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match.
-// skipFilter signals all logs of the given block are requested.
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
hash := header.Hash()
// Logs in cache are partially filled with context data
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 82c91266c0..751cd417e8 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -207,7 +207,7 @@ type EventSystem struct {
}
// NewEventSystem creates a new manager that listens for event on the given mux,
-// parses and filters them. It uses the all map to retrieve filter changes. The
+// parses and filters them. It uses an internal map to retrieve filter changes. The
// work loop holds its own index that is used to forward events to filters.
//
// The returned manager has a loop that needs to be stopped with the Stop function
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 3e686ca2eb..013c1ae527 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -450,24 +450,65 @@ func TestInvalidGetLogsRequest(t *testing.T) {
t.Parallel()
var (
- db = rawdb.NewMemoryDatabase()
- _, sys = newTestFilterSystem(db, Config{})
- api = NewFilterAPI(sys)
- blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+ genesis = &core.Genesis{
+ Config: params.TestChainConfig,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ db, blocks, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {})
+ _, sys = newTestFilterSystem(db, Config{})
+ api = NewFilterAPI(sys)
+ blockHash = blocks[0].Hash()
+ unknownBlockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
)
- // Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
- testCases := []FilterCriteria{
- 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
- 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
- 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
- 3: {BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
- 4: {BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)},
+ // Insert the blocks into the chain so filter can look them up
+ blockchain, err := core.NewBlockChain(db, genesis, ethash.NewFaker(), nil)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ if n, err := blockchain.InsertChain(blocks); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+
+ type testcase struct {
+ f FilterCriteria
+ err error
+ }
+ testCases := []testcase{
+ {
+ f: FilterCriteria{BlockHash: &blockHash, FromBlock: big.NewInt(100)},
+ err: errBlockHashWithRange,
+ },
+ {
+ f: FilterCriteria{BlockHash: &blockHash, ToBlock: big.NewInt(500)},
+ err: errBlockHashWithRange,
+ },
+ {
+ f: FilterCriteria{BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
+ err: errBlockHashWithRange,
+ },
+ {
+ f: FilterCriteria{BlockHash: &unknownBlockHash},
+ err: errUnknownBlock,
+ },
+ {
+ f: FilterCriteria{BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
+ err: errExceedMaxTopics,
+ },
+ {
+ f: FilterCriteria{BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
+ err: errExceedMaxTopics,
+ },
+ {
+ f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)},
+ err: errExceedMaxAddresses,
+ },
}
for i, test := range testCases {
- if _, err := api.GetLogs(context.Background(), test); err == nil {
- t.Errorf("Expected Logs for case #%d to fail", i)
+ _, err := api.GetLogs(context.Background(), test.f)
+ if !errors.Is(err, test.err) {
+ t.Errorf("case %d: wrong error: %q\nwant: %q", i, err, test.err)
}
}
}
diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go
index 7e9d8125de..6e79fbd62b 100644
--- a/eth/gasestimator/gasestimator.go
+++ b/eth/gasestimator/gasestimator.go
@@ -62,6 +62,23 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
if call.GasLimit >= params.TxGas {
hi = call.GasLimit
}
+
+ // Cap the maximum gas allowance according to EIP-7825 if the estimation targets Osaka
+ if hi > params.MaxTxGas {
+ blockNumber, blockTime := opts.Header.Number, opts.Header.Time
+ if opts.BlockOverrides != nil {
+ if opts.BlockOverrides.Number != nil {
+ blockNumber = opts.BlockOverrides.Number.ToInt()
+ }
+ if opts.BlockOverrides.Time != nil {
+ blockTime = uint64(*opts.BlockOverrides.Time)
+ }
+ }
+ if opts.Config.IsOsaka(blockNumber, blockTime) {
+ hi = params.MaxTxGas
+ }
+ }
+
// Normalize the max fee per gas the call is willing to spend.
var feeCap *big.Int
if call.GasFeeCap != nil {
@@ -209,6 +226,9 @@ func execute(ctx context.Context, call *core.Message, opts *Options, gasLimit ui
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
}
+ if errors.Is(err, core.ErrGasLimitTooHigh) {
+ return true, nil, nil // Special case, lower gas limit
+ }
return true, nil, err // Bail out
}
return result.Failed(), result, nil
diff --git a/eth/protocols/snap/metrics.go b/eth/protocols/snap/metrics.go
index 6878e5b280..6319a9b75d 100644
--- a/eth/protocols/snap/metrics.go
+++ b/eth/protocols/snap/metrics.go
@@ -66,4 +66,7 @@ var (
// discarded during the snap sync.
largeStorageDiscardGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/discard", nil)
largeStorageResumedGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/resume", nil)
+
+ stateSyncTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/statesync", nil)
+ stateHealTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/stateheal", nil)
)
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 84ceb9105e..cf4e494645 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -502,8 +502,10 @@ type Syncer struct {
storageHealed uint64 // Number of storage slots downloaded during the healing stage
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
- startTime time.Time // Time instance when snapshot sync started
- logTime time.Time // Time instance when status was last reported
+ startTime time.Time // Time instance when snapshot sync started
+ healStartTime time.Time // Time instance when the state healing started
+ syncTimeOnce sync.Once // Ensure that the state sync time is uploaded only once
+ logTime time.Time // Time instance when status was last reported
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
@@ -685,6 +687,14 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.cleanStorageTasks()
s.cleanAccountTasks()
if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
+ // State healing phase completed, record the elapsed time in metrics.
+ // Note: healing may be rerun in subsequent cycles to fill gaps between
+ // pivot states (e.g., if chain sync takes longer).
+ if !s.healStartTime.IsZero() {
+ stateHealTimeGauge.Inc(int64(time.Since(s.healStartTime)))
+ log.Info("State healing phase is completed", "elapsed", common.PrettyDuration(time.Since(s.healStartTime)))
+ s.healStartTime = time.Time{}
+ }
return nil
}
// Assign all the data retrieval tasks to any free peers
@@ -693,7 +703,17 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.assignStorageTasks(storageResps, storageReqFails, cancel)
if len(s.tasks) == 0 {
- // Sync phase done, run heal phase
+ // State sync phase completed, record the elapsed time in metrics.
+ // Note: the initial state sync runs only once, regardless of whether
+ // a new cycle is started later. Any state differences in subsequent
+ // cycles will be handled by the state healer.
+ s.syncTimeOnce.Do(func() {
+ stateSyncTimeGauge.Update(int64(time.Since(s.startTime)))
+ log.Info("State sync phase is completed", "elapsed", common.PrettyDuration(time.Since(s.startTime)))
+ })
+ if s.healStartTime.IsZero() {
+ s.healStartTime = time.Now()
+ }
s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
}
diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go
new file mode 100644
index 0000000000..5c4d2401e9
--- /dev/null
+++ b/eth/syncer/syncer.go
@@ -0,0 +1,197 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package syncer
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+type syncReq struct {
+ hash common.Hash
+ errc chan error
+}
+
+// Syncer is an auxiliary service that allows Geth to perform full sync
+// alone without consensus-layer attached. Users must specify a valid block hash
+// as the sync target.
+//
+// This tool can be applied to different networks, no matter it's pre-merge or
+// post-merge, but only for full-sync.
+type Syncer struct {
+ stack *node.Node
+ backend *eth.Ethereum
+ target common.Hash
+ request chan *syncReq
+ closed chan struct{}
+ wg sync.WaitGroup
+ exitWhenSynced bool
+}
+
+// Register registers the synchronization override service into the node
+// stack for launching and stopping the service controlled by node.
+func Register(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*Syncer, error) {
+ s := &Syncer{
+ stack: stack,
+ backend: backend,
+ target: target,
+ request: make(chan *syncReq),
+ closed: make(chan struct{}),
+ exitWhenSynced: exitWhenSynced,
+ }
+ stack.RegisterAPIs(s.APIs())
+ stack.RegisterLifecycle(s)
+ return s, nil
+}
+
+// APIs return the collection of RPC services the ethereum package offers.
+// NOTE, some of these services probably need to be moved to somewhere else.
+func (s *Syncer) APIs() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: "debug",
+ Service: NewAPI(s),
+ },
+ }
+}
+
+// run is the main loop that monitors sync requests from users and initiates
+// sync operations when necessary. It also checks whether the specified target
+// has been reached and shuts down Geth if requested by the user.
+func (s *Syncer) run() {
+ defer s.wg.Done()
+
+ var (
+ target *types.Header
+ ticker = time.NewTicker(time.Second * 5)
+ )
+ for {
+ select {
+ case req := <-s.request:
+ var (
+ resync bool
+ retries int
+ logged bool
+ )
+ for {
+ if retries >= 10 {
+ req.errc <- fmt.Errorf("sync target is not avaibale, %x", req.hash)
+ break
+ }
+ select {
+ case <-s.closed:
+ req.errc <- errors.New("syncer closed")
+ return
+ default:
+ }
+
+ header, err := s.backend.Downloader().GetHeader(req.hash)
+ if err != nil {
+ if !logged {
+ logged = true
+ log.Info("Waiting for peers to retrieve sync target", "hash", req.hash)
+ }
+ time.Sleep(time.Second * time.Duration(retries+1))
+ retries++
+ continue
+ }
+ if target != nil && header.Number.Cmp(target.Number) <= 0 {
+ req.errc <- fmt.Errorf("stale sync target, current: %d, received: %d", target.Number, header.Number)
+ break
+ }
+ target = header
+ resync = true
+ break
+ }
+ if resync {
+ req.errc <- s.backend.Downloader().BeaconDevSync(ethconfig.FullSync, target)
+ }
+
+ case <-ticker.C:
+ if target == nil || !s.exitWhenSynced {
+ continue
+ }
+ if block := s.backend.BlockChain().GetBlockByHash(target.Hash()); block != nil {
+ log.Info("Sync target reached", "number", block.NumberU64(), "hash", block.Hash())
+ go s.stack.Close() // async since we need to close ourselves
+ return
+ }
+
+ case <-s.closed:
+ return
+ }
+ }
+}
+
+// Start launches the synchronization service.
+func (s *Syncer) Start() error {
+ s.wg.Add(1)
+ go s.run()
+ if s.target == (common.Hash{}) {
+ return nil
+ }
+ return s.Sync(s.target)
+}
+
+// Stop terminates the synchronization service and stop all background activities.
+// This function can only be called for one time.
+func (s *Syncer) Stop() error {
+ close(s.closed)
+ s.wg.Wait()
+ return nil
+}
+
+// Sync sets the synchronization target. Notably, setting a target lower than the
+// previous one is not allowed, as backward synchronization is not supported.
+func (s *Syncer) Sync(hash common.Hash) error {
+ req := &syncReq{
+ hash: hash,
+ errc: make(chan error, 1),
+ }
+ select {
+ case s.request <- req:
+ return <-req.errc
+ case <-s.closed:
+ return errors.New("syncer is closed")
+ }
+}
+
+// API is the collection of synchronization service APIs for debugging the
+// protocol.
+type API struct {
+ s *Syncer
+}
+
+// NewAPI creates a new debug API instance.
+func NewAPI(s *Syncer) *API {
+ return &API{s: s}
+}
+
+// Sync initiates a full sync to the target block hash.
+func (api *API) Sync(target common.Hash) error {
+ return api.s.Sync(target)
+}
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index a12b990a93..7f376a27fc 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -65,7 +65,7 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit, GasPrice: vmctx.txCtx.GasPrice}), contract.Caller())
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
- ret, err := evm.Interpreter().Run(contract, []byte{}, false)
+ ret, err := evm.Run(contract, []byte{}, false)
tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
// Rest gas assumes no refund
tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil)
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index 12000b3b9a..acc3069e70 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -52,7 +52,7 @@ func TestStoreCapture(t *testing.T) {
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
var index common.Hash
logger.OnTxStart(evm.GetVMContext(), nil, common.Address{})
- _, err := evm.Interpreter().Run(contract, []byte{}, false)
+ _, err := evm.Run(contract, []byte{}, false)
if err != nil {
t.Fatal(err)
}
diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go
index 7a399d41f3..ee20cd171a 100644
--- a/ethclient/simulated/backend_test.go
+++ b/ethclient/simulated/backend_test.go
@@ -52,7 +52,7 @@ func simTestBackend(testAddr common.Address) *Backend {
)
}
-func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
+func newBlobTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client()
testBlob := &kzg4844.Blob{0x00}
@@ -67,12 +67,8 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
- nonce, err := client.PendingNonceAt(context.Background(), addr)
- if err != nil {
- return nil, err
- }
-
chainidU256, _ := uint256.FromBig(chainid)
+
tx := types.NewTx(&types.BlobTx{
ChainID: chainidU256,
GasTipCap: gasTipCapU256,
@@ -88,7 +84,7 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
}
-func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
+func newTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client()
// create a signed transaction to send
@@ -96,10 +92,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
- nonce, err := client.PendingNonceAt(context.Background(), addr)
- if err != nil {
- return nil, err
- }
+
tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainid,
Nonce: nonce,
@@ -161,7 +154,7 @@ func TestSendTransaction(t *testing.T) {
client := sim.Client()
ctx := context.Background()
- signedTx, err := newTx(sim, testKey)
+ signedTx, err := newTx(sim, testKey, 0)
if err != nil {
t.Errorf("could not create transaction: %v", err)
}
@@ -252,7 +245,7 @@ func TestForkResendTx(t *testing.T) {
parent, _ := client.HeaderByNumber(ctx, nil)
// 2.
- tx, err := newTx(sim, testKey)
+ tx, err := newTx(sim, testKey, 0)
if err != nil {
t.Fatalf("could not create transaction: %v", err)
}
@@ -297,7 +290,7 @@ func TestCommitReturnValue(t *testing.T) {
}
// Create a block in the original chain (containing a transaction to force different block hashes)
- tx, _ := newTx(sim, testKey)
+ tx, _ := newTx(sim, testKey, 0)
if err := client.SendTransaction(ctx, tx); err != nil {
t.Errorf("sending transaction: %v", err)
}
diff --git a/ethclient/simulated/rollback_test.go b/ethclient/simulated/rollback_test.go
index 57c59496d5..093467d291 100644
--- a/ethclient/simulated/rollback_test.go
+++ b/ethclient/simulated/rollback_test.go
@@ -38,9 +38,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
defer sim.Close()
client := sim.Client()
- btx0 := testSendSignedTx(t, testKey, sim, true)
- tx0 := testSendSignedTx(t, testKey2, sim, false)
- tx1 := testSendSignedTx(t, testKey2, sim, false)
+ btx0 := testSendSignedTx(t, testKey, sim, true, 0)
+ tx0 := testSendSignedTx(t, testKey2, sim, false, 0)
+ tx1 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Rollback()
@@ -48,9 +48,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
t.Fatalf("all transactions were not rolled back")
}
- btx2 := testSendSignedTx(t, testKey, sim, true)
- tx2 := testSendSignedTx(t, testKey2, sim, false)
- tx3 := testSendSignedTx(t, testKey2, sim, false)
+ btx2 := testSendSignedTx(t, testKey, sim, true, 0)
+ tx2 := testSendSignedTx(t, testKey2, sim, false, 0)
+ tx3 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Commit()
@@ -61,7 +61,7 @@ func TestTransactionRollbackBehavior(t *testing.T) {
// testSendSignedTx sends a signed transaction to the simulated backend.
// It does not commit the block.
-func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool) *types.Transaction {
+func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool, nonce uint64) *types.Transaction {
t.Helper()
client := sim.Client()
ctx := context.Background()
@@ -71,9 +71,9 @@ func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobT
signedTx *types.Transaction
)
if isBlobTx {
- signedTx, err = newBlobTx(sim, key)
+ signedTx, err = newBlobTx(sim, key, nonce)
} else {
- signedTx, err = newTx(sim, key)
+ signedTx, err = newTx(sim, key, nonce)
}
if err != nil {
t.Fatalf("failed to create transaction: %v", err)
@@ -96,13 +96,13 @@ func pendingStateHasTx(client Client, tx *types.Transaction) bool {
)
// Poll for receipt with timeout
- deadline := time.Now().Add(2 * time.Second)
+ deadline := time.Now().Add(200 * time.Millisecond)
for time.Now().Before(deadline) {
receipt, err = client.TransactionReceipt(ctx, tx.Hash())
if err == nil && receipt != nil {
break
}
- time.Sleep(100 * time.Millisecond)
+ time.Sleep(5 * time.Millisecond)
}
if err != nil {
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 736a44d73d..8e1bb86fec 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -22,6 +22,7 @@ package leveldb
import (
"bytes"
+ "errors"
"fmt"
"sync"
"time"
@@ -31,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/errors"
+ lerrors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
@@ -120,7 +121,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, options)
- if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
+ if _, corrupted := err.(*lerrors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
if err != nil {
@@ -548,7 +549,7 @@ func (r *replayer) DeleteRange(start, end []byte) {
if rangeDeleter, ok := r.writer.(ethdb.KeyValueRangeDeleter); ok {
r.failure = rangeDeleter.DeleteRange(start, end)
} else {
- r.failure = fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ r.failure = errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index 5c4c48de64..200ad60245 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -20,7 +20,6 @@ package memorydb
import (
"bytes"
"errors"
- "fmt"
"sort"
"strings"
"sync"
@@ -327,7 +326,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err
}
} else {
- return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
}
continue
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 58a521f6fb..2370d4654f 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -18,6 +18,7 @@
package pebble
import (
+ "errors"
"fmt"
"runtime"
"strings"
@@ -705,7 +706,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err
}
} else {
- return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
} else {
return fmt.Errorf("unhandled operation, keytype: %v", kind)
diff --git a/go.mod b/go.mod
index 6b63450a91..363d7d3dfb 100644
--- a/go.mod
+++ b/go.mod
@@ -29,7 +29,7 @@ require (
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/gofrs/flock v0.12.1
- github.com/golang-jwt/jwt/v4 v4.5.1
+ github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0
github.com/google/uuid v1.3.0
diff --git a/go.sum b/go.sum
index db59c74229..099d432ba4 100644
--- a/go.sum
+++ b/go.sum
@@ -148,8 +148,8 @@ github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
-github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
diff --git a/internal/ethapi/override/override_test.go b/internal/ethapi/override/override_test.go
index 02a17c1331..41b4f2c253 100644
--- a/internal/ethapi/override/override_test.go
+++ b/internal/ethapi/override/override_test.go
@@ -31,6 +31,10 @@ import (
type precompileContract struct{}
+func (p *precompileContract) Name() string {
+ panic("implement me")
+}
+
func (p *precompileContract) RequiredGas(input []byte) uint64 { return 0 }
func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil }
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index a6d93fc1c5..d7f37a79ee 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -468,6 +468,11 @@ web3._extend({
call: 'debug_getTrieFlushInterval',
params: 0
}),
+ new web3._extend.Method({
+ name: 'sync',
+ call: 'debug_sync',
+ params: 1
+ }),
],
properties: []
});
diff --git a/metrics/runtimehistogram.go b/metrics/runtimehistogram.go
index 53904b2b28..0ab8914602 100644
--- a/metrics/runtimehistogram.go
+++ b/metrics/runtimehistogram.go
@@ -14,7 +14,7 @@ func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runt
// runtimeHistogram wraps a runtime/metrics histogram.
type runtimeHistogram struct {
- v atomic.Value // v is a pointer to a metrics.Float64Histogram
+ v atomic.Pointer[metrics.Float64Histogram]
scaleFactor float64
}
@@ -58,7 +58,7 @@ func (h *runtimeHistogram) Update(int64) {
// Snapshot returns a non-changing copy of the histogram.
func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
- hist := h.v.Load().(*metrics.Float64Histogram)
+ hist := h.v.Load()
return newRuntimeHistogramSnapshot(hist)
}
diff --git a/p2p/enode/localnode.go b/p2p/enode/localnode.go
index 6e79c9cbdc..d8fa6a9202 100644
--- a/p2p/enode/localnode.go
+++ b/p2p/enode/localnode.go
@@ -45,7 +45,7 @@ const (
// current process. Setting ENR entries via the Set method updates the record. A new version
// of the record is signed on demand when the Node method is called.
type LocalNode struct {
- cur atomic.Value // holds a non-nil node pointer while the record is up-to-date
+ cur atomic.Pointer[Node] // holds a non-nil node pointer while the record is up-to-date
id ID
key *ecdsa.PrivateKey
@@ -82,7 +82,7 @@ func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
}
ln.seq = db.localSeq(ln.id)
ln.update = time.Now()
- ln.cur.Store((*Node)(nil))
+ ln.cur.Store(nil)
return ln
}
@@ -94,7 +94,7 @@ func (ln *LocalNode) Database() *DB {
// Node returns the current version of the local node record.
func (ln *LocalNode) Node() *Node {
// If we have a valid record, return that
- n := ln.cur.Load().(*Node)
+ n := ln.cur.Load()
if n != nil {
return n
}
@@ -105,7 +105,7 @@ func (ln *LocalNode) Node() *Node {
// Double check the current record, since multiple goroutines might be waiting
// on the write mutex.
- if n = ln.cur.Load().(*Node); n != nil {
+ if n = ln.cur.Load(); n != nil {
return n
}
@@ -121,7 +121,7 @@ func (ln *LocalNode) Node() *Node {
ln.sign()
ln.update = time.Now()
- return ln.cur.Load().(*Node)
+ return ln.cur.Load()
}
// Seq returns the current sequence number of the local node record.
@@ -276,11 +276,11 @@ func (e *lnEndpoint) get() (newIP net.IP, newPort uint16) {
}
func (ln *LocalNode) invalidate() {
- ln.cur.Store((*Node)(nil))
+ ln.cur.Store(nil)
}
func (ln *LocalNode) sign() {
- if n := ln.cur.Load().(*Node); n != nil {
+ if n := ln.cur.Load(); n != nil {
return // no changes
}
diff --git a/rlp/decode.go b/rlp/decode.go
index 0fbca243ee..5a06f35ec0 100644
--- a/rlp/decode.go
+++ b/rlp/decode.go
@@ -148,9 +148,9 @@ func addErrorContext(err error, ctx string) error {
}
var (
- decoderInterface = reflect.TypeOf(new(Decoder)).Elem()
- bigInt = reflect.TypeOf(big.Int{})
- u256Int = reflect.TypeOf(uint256.Int{})
+ decoderInterface = reflect.TypeFor[Decoder]()
+ bigInt = reflect.TypeFor[big.Int]()
+ u256Int = reflect.TypeFor[uint256.Int]()
)
func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) {
@@ -512,7 +512,7 @@ func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tag
}
}
-var ifsliceType = reflect.TypeOf([]interface{}{})
+var ifsliceType = reflect.TypeFor[[]any]()
func decodeInterface(s *Stream, val reflect.Value) error {
if val.Type().NumMethod() != 0 {
diff --git a/rlp/encode.go b/rlp/encode.go
index 3645bbfda0..623932a90b 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -133,7 +133,7 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
return sizesize + 1
}
-var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
+var encoderInterface = reflect.TypeFor[Encoder]()
// makeWriter creates a writer function for the given type.
func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
diff --git a/rlp/raw.go b/rlp/raw.go
index 879e3bfe5d..cec90346a1 100644
--- a/rlp/raw.go
+++ b/rlp/raw.go
@@ -26,7 +26,7 @@ import (
// not verify whether the content of RawValues is valid RLP.
type RawValue []byte
-var rawValueType = reflect.TypeOf(RawValue{})
+var rawValueType = reflect.TypeFor[RawValue]()
// StringSize returns the encoded size of a string.
func StringSize(s string) uint64 {
diff --git a/rlp/rlpgen/gen.go b/rlp/rlpgen/gen.go
index ff39874737..64841b38a0 100644
--- a/rlp/rlpgen/gen.go
+++ b/rlp/rlpgen/gen.go
@@ -24,6 +24,7 @@ import (
"sort"
"github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
+ "golang.org/x/tools/go/packages"
)
// buildContext keeps the data needed for make*Op.
@@ -96,14 +97,20 @@ func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type {
// file and assigns unique names of temporary variables.
type genContext struct {
inPackage *types.Package
- imports map[string]struct{}
+ imports map[string]genImportPackage
tempCounter int
}
+type genImportPackage struct {
+ alias string
+ pkg *types.Package
+}
+
func newGenContext(inPackage *types.Package) *genContext {
return &genContext{
- inPackage: inPackage,
- imports: make(map[string]struct{}),
+ inPackage: inPackage,
+ imports: make(map[string]genImportPackage),
+ tempCounter: 0,
}
}
@@ -117,32 +124,78 @@ func (ctx *genContext) resetTemp() {
ctx.tempCounter = 0
}
-func (ctx *genContext) addImport(path string) {
- if path == ctx.inPackage.Path() {
- return // avoid importing the package that we're generating in.
+func (ctx *genContext) addImportPath(path string) {
+ pkg, err := ctx.loadPackage(path)
+ if err != nil {
+ panic(fmt.Sprintf("can't load package %q: %v", path, err))
}
- // TODO: renaming?
- ctx.imports[path] = struct{}{}
+ ctx.addImport(pkg)
}
-// importsList returns all packages that need to be imported.
-func (ctx *genContext) importsList() []string {
- imp := make([]string, 0, len(ctx.imports))
- for k := range ctx.imports {
- imp = append(imp, k)
+func (ctx *genContext) addImport(pkg *types.Package) string {
+ if pkg.Path() == ctx.inPackage.Path() {
+ return "" // avoid importing the package that we're generating in
}
- sort.Strings(imp)
- return imp
+ if p, exists := ctx.imports[pkg.Path()]; exists {
+ return p.alias
+ }
+ var (
+ baseName = pkg.Name()
+ alias = baseName
+ counter = 1
+ )
+ // If the base name conflicts with an existing import, add a numeric suffix.
+ for ctx.hasAlias(alias) {
+ alias = fmt.Sprintf("%s%d", baseName, counter)
+ counter++
+ }
+ ctx.imports[pkg.Path()] = genImportPackage{alias, pkg}
+ return alias
}
-// qualify is the types.Qualifier used for printing types.
+// hasAlias checks if an alias is already in use
+func (ctx *genContext) hasAlias(alias string) bool {
+ for _, p := range ctx.imports {
+ if p.alias == alias {
+ return true
+ }
+ }
+ return false
+}
+
+// loadPackage attempts to load package information
+func (ctx *genContext) loadPackage(path string) (*types.Package, error) {
+ cfg := &packages.Config{Mode: packages.NeedName}
+ pkgs, err := packages.Load(cfg, path)
+ if err != nil {
+ return nil, err
+ }
+ if len(pkgs) == 0 {
+ return nil, fmt.Errorf("no package found for path %s", path)
+ }
+ return types.NewPackage(path, pkgs[0].Name), nil
+}
+
+// qualify is the types.Qualifier used for printing types
func (ctx *genContext) qualify(pkg *types.Package) string {
if pkg.Path() == ctx.inPackage.Path() {
return ""
}
- ctx.addImport(pkg.Path())
- // TODO: renaming?
- return pkg.Name()
+ return ctx.addImport(pkg)
+}
+
+// importsList returns all packages that need to be imported
+func (ctx *genContext) importsList() []string {
+ imp := make([]string, 0, len(ctx.imports))
+ for path, p := range ctx.imports {
+ if p.alias == p.pkg.Name() {
+ imp = append(imp, fmt.Sprintf("%q", path))
+ } else {
+ imp = append(imp, fmt.Sprintf("%s %q", p.alias, path))
+ }
+ }
+ sort.Strings(imp)
+ return imp
}
type op interface {
@@ -359,7 +412,7 @@ func (op uint256Op) genWrite(ctx *genContext, v string) string {
}
func (op uint256Op) genDecode(ctx *genContext) (string, string) {
- ctx.addImport("github.com/holiman/uint256")
+ ctx.addImportPath("github.com/holiman/uint256")
var b bytes.Buffer
resultV := ctx.temp()
@@ -732,7 +785,7 @@ func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstru
// generateDecoder generates the DecodeRLP method on 'typ'.
func generateDecoder(ctx *genContext, typ string, op op) []byte {
ctx.resetTemp()
- ctx.addImport(pathOfPackageRLP)
+ ctx.addImportPath(pathOfPackageRLP)
result, code := op.genDecode(ctx)
var b bytes.Buffer
@@ -747,8 +800,8 @@ func generateDecoder(ctx *genContext, typ string, op op) []byte {
// generateEncoder generates the EncodeRLP method on 'typ'.
func generateEncoder(ctx *genContext, typ string, op op) []byte {
ctx.resetTemp()
- ctx.addImport("io")
- ctx.addImport(pathOfPackageRLP)
+ ctx.addImportPath("io")
+ ctx.addImportPath(pathOfPackageRLP)
var b bytes.Buffer
fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ)
@@ -783,7 +836,7 @@ func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]b
var b bytes.Buffer
fmt.Fprintf(&b, "package %s\n\n", pkg.Name())
for _, imp := range ctx.importsList() {
- fmt.Fprintf(&b, "import %q\n", imp)
+ fmt.Fprintf(&b, "import %s\n", imp)
}
if encoder {
fmt.Fprintln(&b)
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
index b4fabb3dc6..4bfb1b9d25 100644
--- a/rlp/rlpgen/gen_test.go
+++ b/rlp/rlpgen/gen_test.go
@@ -47,7 +47,7 @@ func init() {
}
}
-var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"}
+var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256", "pkgclash"}
func TestOutput(t *testing.T) {
for _, test := range tests {
diff --git a/rlp/rlpgen/testdata/pkgclash.in.txt b/rlp/rlpgen/testdata/pkgclash.in.txt
new file mode 100644
index 0000000000..1d407881ce
--- /dev/null
+++ b/rlp/rlpgen/testdata/pkgclash.in.txt
@@ -0,0 +1,13 @@
+// -*- mode: go -*-
+
+package test
+
+import (
+ eth1 "github.com/ethereum/go-ethereum/eth"
+ eth2 "github.com/ethereum/go-ethereum/eth/protocols/eth"
+)
+
+type Test struct {
+ A eth1.MinerAPI
+ B eth2.GetReceiptsPacket
+}
diff --git a/rlp/rlpgen/testdata/pkgclash.out.txt b/rlp/rlpgen/testdata/pkgclash.out.txt
new file mode 100644
index 0000000000..d119639b99
--- /dev/null
+++ b/rlp/rlpgen/testdata/pkgclash.out.txt
@@ -0,0 +1,82 @@
+package test
+
+import "github.com/ethereum/go-ethereum/common"
+import "github.com/ethereum/go-ethereum/eth"
+import "github.com/ethereum/go-ethereum/rlp"
+import "io"
+import eth1 "github.com/ethereum/go-ethereum/eth/protocols/eth"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ _tmp1 := w.List()
+ w.ListEnd(_tmp1)
+ _tmp2 := w.List()
+ w.WriteUint64(obj.B.RequestId)
+ _tmp3 := w.List()
+ for _, _tmp4 := range obj.B.GetReceiptsRequest {
+ w.WriteBytes(_tmp4[:])
+ }
+ w.ListEnd(_tmp3)
+ w.ListEnd(_tmp2)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ var _tmp1 eth.MinerAPI
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.A = _tmp1
+ // B:
+ var _tmp2 eth1.GetReceiptsPacket
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // RequestId:
+ _tmp3, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp2.RequestId = _tmp3
+ // GetReceiptsRequest:
+ var _tmp4 []common.Hash
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp5 common.Hash
+ if err := dec.ReadBytes(_tmp5[:]); err != nil {
+ return err
+ }
+ _tmp4 = append(_tmp4, _tmp5)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp2.GetReceiptsRequest = _tmp4
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.B = _tmp2
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rpc/service.go b/rpc/service.go
index d50090e9fb..0f62d7eb7c 100644
--- a/rpc/service.go
+++ b/rpc/service.go
@@ -29,10 +29,10 @@ import (
)
var (
- contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- subscriptionType = reflect.TypeOf(Subscription{})
- stringType = reflect.TypeOf("")
+ contextType = reflect.TypeFor[context.Context]()
+ errorType = reflect.TypeFor[error]()
+ subscriptionType = reflect.TypeFor[Subscription]()
+ stringType = reflect.TypeFor[string]()
)
type serviceRegistry struct {
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 66c750a9c3..dcbd04867c 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -151,7 +151,7 @@ func (args *SendTxArgs) ToTransaction() (*types.Transaction, error) {
al = *args.AccessList
}
if to == nil {
- return nil, fmt.Errorf("transaction recipient must be set for blob transactions")
+ return nil, errors.New("transaction recipient must be set for blob transactions")
}
data = &types.BlobTx{
To: *to,
@@ -544,7 +544,7 @@ func parseBytes(encType interface{}) ([]byte, bool) {
// Handle array types.
val := reflect.ValueOf(encType)
if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 {
- v := reflect.MakeSlice(reflect.TypeOf([]byte{}), val.Len(), val.Len())
+ v := reflect.ValueOf(make([]byte, val.Len()))
reflect.Copy(v, val)
return v.Bytes(), true
}
diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go
index b2efabe82e..a90c2d522f 100644
--- a/tests/transaction_test_util.go
+++ b/tests/transaction_test_util.go
@@ -17,6 +17,7 @@
package tests
import (
+ "errors"
"fmt"
"math/big"
@@ -43,7 +44,7 @@ type ttFork struct {
func (tt *TransactionTest) validate() error {
if tt.Txbytes == nil {
- return fmt.Errorf("missing txbytes")
+ return errors.New("missing txbytes")
}
for name, fork := range tt.Result {
if err := tt.validateFork(fork); err != nil {
@@ -58,10 +59,10 @@ func (tt *TransactionTest) validateFork(fork *ttFork) error {
return nil
}
if fork.Hash == nil && fork.Exception == nil {
- return fmt.Errorf("missing hash and exception")
+ return errors.New("missing hash and exception")
}
if fork.Hash != nil && fork.Sender == nil {
- return fmt.Errorf("missing sender")
+ return errors.New("missing sender")
}
return nil
}
diff --git a/trie/hasher.go b/trie/hasher.go
index 16606808c9..a2a1f5b662 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -17,6 +17,8 @@
package trie
import (
+ "bytes"
+ "fmt"
"sync"
"github.com/ethereum/go-ethereum/crypto"
@@ -54,7 +56,7 @@ func returnHasherToPool(h *hasher) {
}
// hash collapses a node down into a hash node.
-func (h *hasher) hash(n node, force bool) node {
+func (h *hasher) hash(n node, force bool) []byte {
// Return the cached hash if it's available
if hash, _ := n.cache(); hash != nil {
return hash
@@ -62,101 +64,110 @@ func (h *hasher) hash(n node, force bool) node {
// Trie not processed yet, walk the children
switch n := n.(type) {
case *shortNode:
- collapsed := h.hashShortNodeChildren(n)
- hashed := h.shortnodeToHash(collapsed, force)
- if hn, ok := hashed.(hashNode); ok {
- n.flags.hash = hn
- } else {
- n.flags.hash = nil
+ enc := h.encodeShortNode(n)
+ if len(enc) < 32 && !force {
+ // Nodes smaller than 32 bytes are embedded directly in their parent.
+ // In such cases, return the raw encoded blob instead of the node hash.
+ // It's essential to deep-copy the node blob, as the underlying buffer
+ // of enc will be reused later.
+ buf := make([]byte, len(enc))
+ copy(buf, enc)
+ return buf
}
- return hashed
+ hash := h.hashData(enc)
+ n.flags.hash = hash
+ return hash
+
case *fullNode:
- collapsed := h.hashFullNodeChildren(n)
- hashed := h.fullnodeToHash(collapsed, force)
- if hn, ok := hashed.(hashNode); ok {
- n.flags.hash = hn
- } else {
- n.flags.hash = nil
+ enc := h.encodeFullNode(n)
+ if len(enc) < 32 && !force {
+ // Nodes smaller than 32 bytes are embedded directly in their parent.
+ // In such cases, return the raw encoded blob instead of the node hash.
+ // It's essential to deep-copy the node blob, as the underlying buffer
+ // of enc will be reused later.
+ buf := make([]byte, len(enc))
+ copy(buf, enc)
+ return buf
}
- return hashed
- default:
- // Value and hash nodes don't have children, so they're left as were
+ hash := h.hashData(enc)
+ n.flags.hash = hash
+ return hash
+
+ case hashNode:
+ // hash nodes don't have children, so they're left as were
return n
- }
-}
-// hashShortNodeChildren returns a copy of the supplied shortNode, with its child
-// being replaced by either the hash or an embedded node if the child is small.
-func (h *hasher) hashShortNodeChildren(n *shortNode) *shortNode {
- var collapsed shortNode
- collapsed.Key = hexToCompact(n.Key)
- switch n.Val.(type) {
- case *fullNode, *shortNode:
- collapsed.Val = h.hash(n.Val, false)
default:
- collapsed.Val = n.Val
+ panic(fmt.Errorf("unexpected node type, %T", n))
}
- return &collapsed
}
-// hashFullNodeChildren returns a copy of the supplied fullNode, with its child
-// being replaced by either the hash or an embedded node if the child is small.
-func (h *hasher) hashFullNodeChildren(n *fullNode) *fullNode {
- var children [17]node
+// encodeShortNode encodes the provided shortNode into the bytes. Notably, the
+// return slice must be deep-copied explicitly, otherwise the underlying slice
+// will be reused later.
+func (h *hasher) encodeShortNode(n *shortNode) []byte {
+ // Encode leaf node
+ if hasTerm(n.Key) {
+ var ln leafNodeEncoder
+ ln.Key = hexToCompact(n.Key)
+ ln.Val = n.Val.(valueNode)
+ ln.encode(h.encbuf)
+ return h.encodedBytes()
+ }
+ // Encode extension node
+ var en extNodeEncoder
+ en.Key = hexToCompact(n.Key)
+ en.Val = h.hash(n.Val, false)
+ en.encode(h.encbuf)
+ return h.encodedBytes()
+}
+
+// fnEncoderPool is the pool for storing shared fullNode encoder to mitigate
+// the significant memory allocation overhead.
+var fnEncoderPool = sync.Pool{
+ New: func() interface{} {
+ var enc fullnodeEncoder
+ return &enc
+ },
+}
+
+// encodeFullNode encodes the provided fullNode into the bytes. Notably, the
+// return slice must be deep-copied explicitly, otherwise the underlying slice
+// will be reused later.
+func (h *hasher) encodeFullNode(n *fullNode) []byte {
+ fn := fnEncoderPool.Get().(*fullnodeEncoder)
+ fn.reset()
+
if h.parallel {
var wg sync.WaitGroup
for i := 0; i < 16; i++ {
- if child := n.Children[i]; child != nil {
- wg.Add(1)
- go func(i int) {
- hasher := newHasher(false)
- children[i] = hasher.hash(child, false)
- returnHasherToPool(hasher)
- wg.Done()
- }(i)
- } else {
- children[i] = nilValueNode
+ if n.Children[i] == nil {
+ continue
}
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ h := newHasher(false)
+ fn.Children[i] = h.hash(n.Children[i], false)
+ returnHasherToPool(h)
+ }(i)
}
wg.Wait()
} else {
for i := 0; i < 16; i++ {
if child := n.Children[i]; child != nil {
- children[i] = h.hash(child, false)
- } else {
- children[i] = nilValueNode
+ fn.Children[i] = h.hash(child, false)
}
}
}
if n.Children[16] != nil {
- children[16] = n.Children[16]
+ fn.Children[16] = n.Children[16].(valueNode)
}
- return &fullNode{flags: nodeFlag{}, Children: children}
-}
+ fn.encode(h.encbuf)
+ fnEncoderPool.Put(fn)
-// shortNodeToHash computes the hash of the given shortNode. The shortNode must
-// first be collapsed, with its key converted to compact form. If the RLP-encoded
-// node data is smaller than 32 bytes, the node itself is returned.
-func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
-}
-
-// fullnodeToHash computes the hash of the given fullNode. If the RLP-encoded
-// node data is smaller than 32 bytes, the node itself is returned.
-func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
- n.encode(h.encbuf)
- enc := h.encodedBytes()
-
- if len(enc) < 32 && !force {
- return n // Nodes smaller than 32 bytes are stored inside their parent
- }
- return h.hashData(enc)
+ return h.encodedBytes()
}
// encodedBytes returns the result of the last encoding operation on h.encbuf.
@@ -175,9 +186,10 @@ func (h *hasher) encodedBytes() []byte {
return h.tmp
}
-// hashData hashes the provided data
-func (h *hasher) hashData(data []byte) hashNode {
- n := make(hashNode, 32)
+// hashData hashes the provided data. It is safe to modify the returned slice after
+// the function returns.
+func (h *hasher) hashData(data []byte) []byte {
+ n := make([]byte, 32)
h.sha.Reset()
h.sha.Write(data)
h.sha.Read(n)
@@ -192,20 +204,17 @@ func (h *hasher) hashDataTo(dst, data []byte) {
h.sha.Read(dst)
}
-// proofHash is used to construct trie proofs, and returns the 'collapsed'
-// node (for later RLP encoding) as well as the hashed node -- unless the
-// node is smaller than 32 bytes, in which case it will be returned as is.
-// This method does not do anything on value- or hash-nodes.
-func (h *hasher) proofHash(original node) (collapsed, hashed node) {
+// proofHash is used to construct trie proofs, returning the rlp-encoded node blobs.
+// Note, only resolved node (shortNode or fullNode) is expected for proofing.
+//
+// It is safe to modify the returned slice after the function returns.
+func (h *hasher) proofHash(original node) []byte {
switch n := original.(type) {
case *shortNode:
- sn := h.hashShortNodeChildren(n)
- return sn, h.shortnodeToHash(sn, false)
+ return bytes.Clone(h.encodeShortNode(n))
case *fullNode:
- fn := h.hashFullNodeChildren(n)
- return fn, h.fullnodeToHash(fn, false)
+ return bytes.Clone(h.encodeFullNode(n))
default:
- // Value and hash nodes don't have children, so they're left as were
- return n, n
+ panic(fmt.Errorf("unexpected node type, %T", original))
}
}
diff --git a/trie/iterator.go b/trie/iterator.go
index fa01611063..e6fedf2430 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -240,9 +240,9 @@ func (it *nodeIterator) LeafProof() [][]byte {
for i, item := range it.stack[:len(it.stack)-1] {
// Gather nodes that end up as hash nodes (or the root)
- node, hashed := hasher.proofHash(item.node)
- if _, ok := hashed.(hashNode); ok || i == 0 {
- proofs = append(proofs, nodeToBytes(node))
+ enc := hasher.proofHash(item.node)
+ if len(enc) >= 32 || i == 0 {
+ proofs = append(proofs, enc)
}
}
return proofs
diff --git a/trie/node.go b/trie/node.go
index 96f077ebbb..74fac4fd4e 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -68,10 +68,6 @@ type (
}
)
-// nilValueNode is used when collapsing internal trie nodes for hashing, since
-// unset children need to serialize correctly.
-var nilValueNode = valueNode(nil)
-
// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
eb := rlp.NewEncoderBuffer(w)
diff --git a/trie/node_enc.go b/trie/node_enc.go
index c95587eeab..02b93ee6f3 100644
--- a/trie/node_enc.go
+++ b/trie/node_enc.go
@@ -42,18 +42,29 @@ func (n *fullNode) encode(w rlp.EncoderBuffer) {
func (n *fullnodeEncoder) encode(w rlp.EncoderBuffer) {
offset := w.List()
- for _, c := range n.Children {
- if c == nil {
+ for i, c := range n.Children {
+ if len(c) == 0 {
w.Write(rlp.EmptyString)
- } else if len(c) < 32 {
- w.Write(c) // rawNode
} else {
- w.WriteBytes(c) // hashNode
+ // valueNode or hashNode
+ if i == 16 || len(c) >= 32 {
+ w.WriteBytes(c)
+ } else {
+ w.Write(c) // rawNode
+ }
}
}
w.ListEnd(offset)
}
+func (n *fullnodeEncoder) reset() {
+ for i, c := range n.Children {
+ if len(c) != 0 {
+ n.Children[i] = n.Children[i][:0]
+ }
+ }
+}
+
func (n *shortNode) encode(w rlp.EncoderBuffer) {
offset := w.List()
w.WriteBytes(n.Key)
@@ -70,7 +81,7 @@ func (n *extNodeEncoder) encode(w rlp.EncoderBuffer) {
w.WriteBytes(n.Key)
if n.Val == nil {
- w.Write(rlp.EmptyString)
+ w.Write(rlp.EmptyString) // theoretically impossible to happen
} else if len(n.Val) < 32 {
w.Write(n.Val) // rawNode
} else {
diff --git a/trie/proof.go b/trie/proof.go
index 751d6f620f..53b7acc30c 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -22,6 +22,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
@@ -85,16 +86,9 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
defer returnHasherToPool(hasher)
for i, n := range nodes {
- var hn node
- n, hn = hasher.proofHash(n)
- if hash, ok := hn.(hashNode); ok || i == 0 {
- // If the node's database encoding is a hash (or is the
- // root node), it becomes a proof element.
- enc := nodeToBytes(n)
- if !ok {
- hash = hasher.hashData(enc)
- }
- proofDb.Put(hash, enc)
+ enc := hasher.proofHash(n)
+ if len(enc) >= 32 || i == 0 {
+ proofDb.Put(crypto.Keccak256(enc), enc)
}
}
return nil
diff --git a/trie/trie.go b/trie/trie.go
index fdb4da9be4..222bf8b1f0 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -626,7 +626,7 @@ func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
- return common.BytesToHash(t.hashRoot().(hashNode))
+ return common.BytesToHash(t.hashRoot())
}
// Commit collects all dirty nodes in the trie and replaces them with the
@@ -677,9 +677,9 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
}
// hashRoot calculates the root hash of the given trie
-func (t *Trie) hashRoot() node {
+func (t *Trie) hashRoot() []byte {
if t.root == nil {
- return hashNode(types.EmptyRootHash.Bytes())
+ return types.EmptyRootHash.Bytes()
}
// If the number of changes is below 100, we let one thread handle it
h := newHasher(t.unhashed >= 100)
diff --git a/trie/trie_test.go b/trie/trie_test.go
index b806ae6b0c..edd85677fe 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -863,7 +863,6 @@ func (s *spongeDb) Flush() {
s.sponge.Write([]byte(key))
s.sponge.Write([]byte(s.values[key]))
}
- fmt.Println(len(s.keys))
}
// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go
index f79581b38b..e781a898e1 100644
--- a/triedb/pathdb/history_index.go
+++ b/triedb/pathdb/history_index.go
@@ -353,7 +353,7 @@ func (d *indexDeleter) empty() bool {
// pop removes the last written element from the index writer.
func (d *indexDeleter) pop(id uint64) error {
if id == 0 {
- return fmt.Errorf("zero history ID is not valid")
+ return errors.New("zero history ID is not valid")
}
if id != d.lastID {
return fmt.Errorf("pop element out of order, last: %d, this: %d", d.lastID, id)
diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go
index 42103fab32..054d43e946 100644
--- a/triedb/pathdb/history_indexer.go
+++ b/triedb/pathdb/history_indexer.go
@@ -392,16 +392,17 @@ func (i *indexIniter) run(lastID uint64) {
select {
case signal := <-i.interrupt:
// The indexing limit can only be extended or shortened continuously.
- if signal.newLastID != lastID+1 && signal.newLastID != lastID-1 {
- signal.result <- fmt.Errorf("invalid history id, last: %d, got: %d", lastID, signal.newLastID)
+ newLastID := signal.newLastID
+ if newLastID != lastID+1 && newLastID != lastID-1 {
+ signal.result <- fmt.Errorf("invalid history id, last: %d, got: %d", lastID, newLastID)
continue
}
- i.last.Store(signal.newLastID) // update indexing range
+ i.last.Store(newLastID) // update indexing range
// The index limit is extended by one, update the limit without
// interrupting the current background process.
- if signal.newLastID == lastID+1 {
- lastID = signal.newLastID
+ if newLastID == lastID+1 {
+ lastID = newLastID
signal.result <- nil
log.Debug("Extended state history range", "last", lastID)
continue
@@ -425,7 +426,9 @@ func (i *indexIniter) run(lastID uint64) {
return
}
// Adjust the indexing target and relaunch the process
- lastID = signal.newLastID
+ lastID = newLastID
+ signal.result <- nil
+
done, interrupt = make(chan struct{}), new(atomic.Int32)
go i.index(done, interrupt, lastID)
log.Debug("Shortened state history range", "last", lastID)
diff --git a/triedb/pathdb/history_indexer_test.go b/triedb/pathdb/history_indexer_test.go
new file mode 100644
index 0000000000..abfcafc945
--- /dev/null
+++ b/triedb/pathdb/history_indexer_test.go
@@ -0,0 +1,57 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
+)
+
+// TestHistoryIndexerShortenDeadlock tests that a call to shorten does not
+// deadlock when the indexer is active. This specifically targets the case where
+// signal.result must be sent to unblock the caller.
+func TestHistoryIndexerShortenDeadlock(t *testing.T) {
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ db := rawdb.NewMemoryDatabase()
+ freezer, _ := rawdb.NewStateFreezer(t.TempDir(), false, false)
+ defer freezer.Close()
+
+ histories := makeHistories(100)
+ for i, h := range histories {
+ accountData, storageData, accountIndex, storageIndex := h.encode()
+ rawdb.WriteStateHistory(freezer, uint64(i+1), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ }
+ // As a workaround, assign a future block to keep the initer running indefinitely
+ indexer := newHistoryIndexer(db, freezer, 200)
+ defer indexer.close()
+
+ done := make(chan error, 1)
+ go func() {
+ done <- indexer.shorten(200)
+ }()
+
+ select {
+ case err := <-done:
+ if err != nil {
+ t.Fatalf("shorten returned an unexpected error: %v", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("timed out waiting for shorten to complete, potential deadlock")
+ }
+}
diff --git a/version/version.go b/version/version.go
index ab1559d8aa..30c81b804b 100644
--- a/version/version.go
+++ b/version/version.go
@@ -19,6 +19,6 @@ package version
const (
Major = 1 // Major version component of the current release
Minor = 16 // Minor version component of the current release
- Patch = 2 // Patch version component of the current release
+ Patch = 3 // Patch version component of the current release
Meta = "unstable" // Version metadata to append to the version string
)