Merge branch 'master' into release/1.16

This commit is contained in:
Felix Lange 2025-11-03 17:47:20 +01:00
commit 386c3de6c4
78 changed files with 2005 additions and 473 deletions

View file

@ -122,6 +122,27 @@ jobs:
LINUX_SIGNING_KEY: ${{ secrets.LINUX_SIGNING_KEY }}
AZURE_BLOBSTORE_TOKEN: ${{ secrets.AZURE_BLOBSTORE_TOKEN }}
keeper:
name: Keeper Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.24
cache: false
- name: Install cross toolchain
run: |
apt-get update
apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- name: Build (amd64)
run: |
go run build/ci.go keeper -dlgo
windows:
name: Windows Build
runs-on: "win-11"

View file

@ -34,6 +34,47 @@ jobs:
go run build/ci.go check_generate
go run build/ci.go check_baddeps
keeper:
name: Keeper Builds
needs: test
runs-on: [self-hosted-ghr, size-l-x64]
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.25'
cache: false
- name: Build
run: go run build/ci.go keeper
test-32bit:
name: "32bit tests"
needs: test
runs-on: [self-hosted-ghr, size-l-x64]
steps:
- uses: actions/checkout@v4
with:
submodules: false
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.25'
cache: false
- name: Install cross toolchain
run: |
apt-get update
apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- name: Build
run: go run build/ci.go test -arch 386 -short -p 8
test:
name: Test
needs: lint

3
.gitignore vendored
View file

@ -55,4 +55,5 @@ cmd/ethkey/ethkey
cmd/evm/evm
cmd/geth/geth
cmd/rlpdump/rlpdump
cmd/workload/workload
cmd/workload/workload
cmd/keeper/keeper

View file

@ -144,10 +144,9 @@ func TestWaitDeployedCornerCases(t *testing.T) {
done := make(chan struct{})
go func() {
defer close(done)
want := errors.New("context canceled")
_, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash())
if err == nil || errors.Is(want, err) {
t.Errorf("error mismatch: want %v, got %v", want, err)
if !errors.Is(err, context.Canceled) {
t.Errorf("error mismatch: want %v, got %v", context.Canceled, err)
}
}()

View file

@ -36,4 +36,4 @@ for:
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script:
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short -skip-spectests
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short

View file

@ -108,6 +108,8 @@ func (c *ChainConfig) LoadForks(file []byte) error {
switch version := value.(type) {
case int:
versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4))
case int64:
versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4))
case uint64:
versions[name] = new(big.Int).SetUint64(version).FillBytes(make([]byte, 4))
case string:
@ -125,6 +127,8 @@ func (c *ChainConfig) LoadForks(file []byte) error {
switch epoch := value.(type) {
case int:
epochs[name] = uint64(epoch)
case int64:
epochs[name] = uint64(epoch)
case uint64:
epochs[name] = epoch
case string:

View file

@ -31,6 +31,9 @@ Available commands are:
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests
keeper [ -dlgo ]
keeper-archive [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ]
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
@ -86,6 +89,30 @@ var (
executablePath("clef"),
}
// Keeper build targets with their configurations
keeperTargets = []struct {
Name string
GOOS string
GOARCH string
CC string
Tags string
Env map[string]string
}{
{
Name: "ziren",
GOOS: "linux",
GOARCH: "mipsle",
// enable when cgo works
// CC: "mipsel-linux-gnu-gcc",
Tags: "ziren",
Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"},
},
{
Name: "example",
Tags: "example",
},
}
// A debian package is created for all executables listed here.
debExecutables = []debExecutable{
{
@ -178,6 +205,10 @@ func main() {
doPurge(os.Args[2:])
case "sanitycheck":
doSanityCheck()
case "keeper":
doInstallKeeper(os.Args[2:])
case "keeper-archive":
doKeeperArchive(os.Args[2:])
default:
log.Fatal("unknown command ", os.Args[1])
}
@ -212,9 +243,6 @@ func doInstall(cmdline []string) {
// Configure the build.
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
// We use -trimpath to avoid leaking local paths into the built executables.
gobuild.Args = append(gobuild.Args, "-trimpath")
// Show packages during build.
gobuild.Args = append(gobuild.Args, "-v")
@ -234,6 +262,43 @@ func doInstall(cmdline []string) {
}
}
// doInstallKeeper builds keeper binaries for all supported targets.
func doInstallKeeper(cmdline []string) {
var dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
flag.CommandLine.Parse(cmdline)
env := build.Env()
// Configure the toolchain.
tc := build.GoToolchain{}
if *dlgo {
csdb := download.MustLoadChecksums("build/checksums.txt")
tc.Root = build.DownloadGo(csdb)
}
for _, target := range keeperTargets {
log.Printf("Building keeper-%s", target.Name)
// Configure the build.
tc.GOARCH = target.GOARCH
tc.GOOS = target.GOOS
tc.CC = target.CC
gobuild := tc.Go("build", buildFlags(env, true, []string{target.Tags})...)
gobuild.Dir = "./cmd/keeper"
gobuild.Args = append(gobuild.Args, "-v")
for key, value := range target.Env {
gobuild.Env = append(gobuild.Env, key+"="+value)
}
outputName := fmt.Sprintf("keeper-%s", target.Name)
args := slices.Clone(gobuild.Args)
args = append(args, "-o", executablePath(outputName))
args = append(args, ".")
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
}
}
// buildFlags returns the go tool flags for building.
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string
@ -272,6 +337,8 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
if len(buildTags) > 0 {
flags = append(flags, "-tags", strings.Join(buildTags, ","))
}
// We use -trimpath to avoid leaking local paths into the built executables.
flags = append(flags, "-trimpath")
return flags
}
@ -281,16 +348,15 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
func doTest(cmdline []string) {
var (
dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
arch = flag.String("arch", "", "Run tests for given architecture")
cc = flag.String("cc", "", "Sets C compiler binary")
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector")
short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
skipspectests = flag.Bool("skip-spectests", false, "Skip downloading execution-spec-tests fixtures")
threads = flag.Int("p", 1, "Number of CPU threads to use for testing")
dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
arch = flag.String("arch", "", "Run tests for given architecture")
cc = flag.String("cc", "", "Sets C compiler binary")
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector")
short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
threads = flag.Int("p", 1, "Number of CPU threads to use for testing")
)
flag.CommandLine.Parse(cmdline)
@ -298,7 +364,7 @@ func doTest(cmdline []string) {
csdb := download.MustLoadChecksums("build/checksums.txt")
// Get test fixtures.
if !*skipspectests {
if !*short {
downloadSpecTestFixtures(csdb, *cachedir)
}
@ -630,6 +696,32 @@ func doArchive(cmdline []string) {
}
}
func doKeeperArchive(cmdline []string) {
var (
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`)
signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
)
flag.CommandLine.Parse(cmdline)
var (
env = build.Env()
vsn = version.Archive(env.Commit)
keeper = "keeper-" + vsn + ".tar.gz"
)
maybeSkipArchive(env)
files := []string{"COPYING"}
for _, target := range keeperTargets {
files = append(files, executablePath(fmt.Sprintf("keeper-%s", target.Name)))
}
if err := build.WriteArchive(keeper, files); err != nil {
log.Fatal(err)
}
if err := archiveUpload(keeper, *upload, *signer, *signify); err != nil {
log.Fatal(err)
}
}
func archiveBasename(arch string, archiveVersion string) string {
platform := runtime.GOOS + "-" + arch
if arch == "arm" {

View file

@ -121,7 +121,7 @@ with our test chain. The chain files are located in `./cmd/devp2p/internal/ethte
--nat=none \
--networkid 3503995874084926 \
--verbosity 5 \
--authrpc.jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
--authrpc.jwtsecret jwt.secret
Note that the tests also require access to the engine API.
The test suite can now be executed using the devp2p tool.
@ -130,7 +130,7 @@ The test suite can now be executed using the devp2p tool.
--chain internal/ethtest/testdata \
--node enode://.... \
--engineapi http://127.0.0.1:8551 \
--jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
--jwtsecret $(cat jwt.secret)
Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again.

View file

@ -143,9 +143,6 @@ type testParams struct {
func cliTestParams(ctx *cli.Context) *testParams {
nodeStr := ctx.String(testNodeFlag.Name)
if nodeStr == "" {
exit(fmt.Errorf("missing -%s", testNodeFlag.Name))
}
node, err := parseNode(nodeStr)
if err != nil {
exit(err)
@ -156,14 +153,5 @@ func cliTestParams(ctx *cli.Context) *testParams {
jwt: ctx.String(testNodeJWTFlag.Name),
chainDir: ctx.String(testChainDirFlag.Name),
}
if p.engineAPI == "" {
exit(fmt.Errorf("missing -%s", testNodeEngineFlag.Name))
}
if p.jwt == "" {
exit(fmt.Errorf("missing -%s", testNodeJWTFlag.Name))
}
if p.chainDir == "" {
exit(fmt.Errorf("missing -%s", testChainDirFlag.Name))
}
return &p
}

View file

@ -39,26 +39,29 @@ var (
}
// for eth/snap tests
testChainDirFlag = &cli.StringFlag{
testChainDirFlag = &cli.PathFlag{
Name: "chain",
Usage: "Test chain directory (required)",
Category: flags.TestingCategory,
Required: true,
}
testNodeFlag = &cli.StringFlag{
Name: "node",
Usage: "Peer-to-Peer endpoint (ENR) of the test node (required)",
Category: flags.TestingCategory,
Required: true,
}
testNodeJWTFlag = &cli.StringFlag{
Name: "jwtsecret",
Usage: "JWT secret for the engine API of the test node (required)",
Category: flags.TestingCategory,
Value: "0x7365637265747365637265747365637265747365637265747365637265747365",
Required: true,
}
testNodeEngineFlag = &cli.StringFlag{
Name: "engineapi",
Usage: "Engine API endpoint of the test node (required)",
Category: flags.TestingCategory,
Required: true,
}
// These two are specific to the discovery tests.

View file

@ -66,6 +66,7 @@ var (
utils.OverrideBPO1,
utils.OverrideBPO2,
utils.OverrideVerkle,
utils.OverrideGenesisFlag,
utils.EnablePersonal, // deprecated
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
@ -188,6 +189,8 @@ var (
utils.AllowUnprotectedTxs,
utils.BatchRequestLimit,
utils.BatchResponseMaxSize,
utils.RPCTxSyncDefaultTimeoutFlag,
utils.RPCTxSyncMaxTimeoutFlag,
}
metricsFlags = []cli.Flag{

View file

@ -201,7 +201,7 @@ func expandVerkle(ctx *cli.Context) error {
}
for i, key := range keylist {
log.Info("Reading key", "index", i, "key", keylist[0])
log.Info("Reading key", "index", i, "key", key)
root.Get(key, chaindb.Get)
}

View file

@ -19,7 +19,7 @@
package main
import (
zkruntime "github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime"
zkruntime "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime"
)
// getInput reads the input payload from the zkVM runtime environment.

View file

@ -3,8 +3,8 @@ module github.com/ethereum/go-ethereum/cmd/keeper
go 1.24.0
require (
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6
github.com/ethereum/go-ethereum v0.0.0-00010101000000-000000000000
github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
)
require (
@ -43,7 +43,4 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
)
replace (
github.com/ethereum/go-ethereum => ../../
github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime => github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
)
replace github.com/ethereum/go-ethereum => ../../

View file

@ -1,5 +1,7 @@
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
@ -117,8 +119,6 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 h1:MxKlbmI7Dta6O6Nsc9OAer/rOltjoL11CVLMqCiYnxU=
github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5/go.mod h1:zk/SUgiiVz2U1ufZ+yM2MHPbD93W25KH5zK3qAxXbT4=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=

View file

@ -25,6 +25,7 @@ import (
"errors"
"fmt"
"io"
"math"
"math/big"
"os"
"os/signal"
@ -311,7 +312,7 @@ func ImportHistory(chain *core.BlockChain, dir string, network string) error {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
encReceipts := types.EncodeBlockReceiptLists([]types.Receipts{receipts})
if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, 2^64-1); err != nil {
if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, math.MaxUint64); err != nil {
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
}
imported += 1

View file

@ -20,7 +20,6 @@ package utils
import (
"context"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
@ -262,6 +261,11 @@ var (
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideGenesisFlag = &cli.StringFlag{
Name: "override.genesis",
Usage: "Load genesis block and configuration from file at this path",
Category: flags.EthCategory,
}
SyncModeFlag = &cli.StringFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap" or "full")`,
@ -615,6 +619,18 @@ var (
Value: ethconfig.Defaults.LogQueryLimit,
Category: flags.APICategory,
}
RPCTxSyncDefaultTimeoutFlag = &cli.DurationFlag{
Name: "rpc.txsync.defaulttimeout",
Usage: "Default timeout for eth_sendRawTransactionSync (e.g. 2s, 500ms)",
Value: ethconfig.Defaults.TxSyncDefaultTimeout,
Category: flags.APICategory,
}
RPCTxSyncMaxTimeoutFlag = &cli.DurationFlag{
Name: "rpc.txsync.maxtimeout",
Usage: "Maximum allowed timeout for eth_sendRawTransactionSync (e.g. 5m)",
Value: ethconfig.Defaults.TxSyncMaxTimeout,
Category: flags.APICategory,
}
// Authenticated RPC HTTP settings
AuthListenFlag = &cli.StringFlag{
Name: "authrpc.addr",
@ -1324,15 +1340,10 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
return
}
addr := ctx.String(MinerPendingFeeRecipientFlag.Name)
if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") {
addr = addr[2:]
}
b, err := hex.DecodeString(addr)
if err != nil || len(b) != common.AddressLength {
if !common.IsHexAddress(addr) {
Fatalf("-%s: invalid pending block producer address %q", MinerPendingFeeRecipientFlag.Name, addr)
return
}
cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b)
cfg.Miner.PendingFeeRecipient = common.HexToAddress(addr)
}
func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
@ -1593,7 +1604,7 @@ func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags, don't allow network id override on preset networks
flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, NetworkIdFlag)
flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, NetworkIdFlag, OverrideGenesisFlag)
flags.CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
// Set configurations from CLI flags
@ -1717,6 +1728,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(RPCGlobalLogQueryLimit.Name) {
cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name)
}
if ctx.IsSet(RPCTxSyncDefaultTimeoutFlag.Name) {
cfg.TxSyncDefaultTimeout = ctx.Duration(RPCTxSyncDefaultTimeoutFlag.Name)
}
if ctx.IsSet(RPCTxSyncMaxTimeoutFlag.Name) {
cfg.TxSyncMaxTimeout = ctx.Duration(RPCTxSyncMaxTimeoutFlag.Name)
}
if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 {
// If snap-sync is requested, this flag is also required
if cfg.SyncMode == ethconfig.SnapSync {
@ -1873,6 +1890,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if !ctx.IsSet(MinerGasPriceFlag.Name) {
cfg.Miner.GasPrice = big.NewInt(1)
}
case ctx.String(OverrideGenesisFlag.Name) != "":
f, err := os.Open(ctx.String(OverrideGenesisFlag.Name))
if err != nil {
Fatalf("Failed to read genesis file: %v", err)
}
defer f.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(f).Decode(genesis); err != nil {
Fatalf("Invalid genesis file: %v", err)
}
cfg.Genesis = genesis
default:
if cfg.NetworkId == 1 {
SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)

View file

@ -69,7 +69,7 @@ func (t PrettyAge) String() string {
result, prec := "", 0
for _, unit := range ageUnits {
if diff > unit.Size {
if diff >= unit.Size {
result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol)
diff %= unit.Size

View file

@ -17,6 +17,8 @@
package common
import (
"errors"
"io/fs"
"os"
"path/filepath"
)
@ -24,10 +26,7 @@ import (
// FileExist checks if a file exists at filePath.
func FileExist(filePath string) bool {
_, err := os.Stat(filePath)
if err != nil && os.IsNotExist(err) {
return false
}
return true
return !errors.Is(err, fs.ErrNotExist)
}
// AbsolutePath returns datadir + filename, or filename if it is absolute.
@ -37,3 +36,14 @@ func AbsolutePath(datadir string, filename string) string {
}
return filepath.Join(datadir, filename)
}
// IsNonEmptyDir checks if a directory exists and is non-empty.
func IsNonEmptyDir(dir string) bool {
f, err := os.Open(dir)
if err != nil {
return false
}
defer f.Close()
names, _ := f.Readdirnames(1)
return len(names) > 0
}

View file

@ -76,10 +76,16 @@ func TestCreation(t *testing.T) {
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
{30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
{40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block
{30000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block
{30000000, 1746022486, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // Last Cancun block
{30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // First Prague block
{50000000, 2000000000, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // Future Prague block
{30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // First Prague block
{30000000, 1764798550, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // Last Prague block
{30000000, 1764798551, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // First Osaka block
{30000000, 1765290070, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // Last Osaka block
{30000000, 1765290071, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // First BPO1 block
{30000000, 1767747670, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // Last BPO1 block
{30000000, 1767747671, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // First BPO2 block
{50000000, 2000000000, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // Future BPO2 block
},
},
// Sepolia test cases
@ -162,6 +168,9 @@ func TestValidation(t *testing.T) {
legacyConfig.ShanghaiTime = nil
legacyConfig.CancunTime = nil
legacyConfig.PragueTime = nil
legacyConfig.OsakaTime = nil
legacyConfig.BPO1Time = nil
legacyConfig.BPO2Time = nil
tests := []struct {
config *params.ChainConfig
@ -361,11 +370,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Shanghai, remote is random Shanghai.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Prague, far in the future. Remote announces Gopherium (non existing fork)
// Local is mainnet BPO2, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xc376cf8b), Next: 8888888888}, ErrLocalIncompatibleOrStale},
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x07c9462e), Next: 8888888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.

View file

@ -669,23 +669,24 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(0),
Alloc: map[common.Address]types.Account{
common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD
common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity
common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp
common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing
common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b
common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval
common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add
common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp
common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add
common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp
common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing
common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1
common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2
common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD
common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity
common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp
common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing
common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b
common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval
common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add
common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp
common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add
common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp
common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing
common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1
common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2
common.BytesToAddress([]byte{0x1, 00}): {Balance: big.NewInt(1)}, // P256Verify
// Pre-deploy system contracts
params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},

View file

@ -313,13 +313,13 @@ func ReadTrienodeHistoryHeader(db ethdb.AncientReaderOp, id uint64) ([]byte, err
}
// ReadTrienodeHistoryKeySection retrieves the key section of trienode history.
func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
return db.Ancient(trienodeHistoryKeySectionTable, id-1)
func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) {
return db.AncientBytes(trienodeHistoryKeySectionTable, id-1, offset, length)
}
// ReadTrienodeHistoryValueSection retrieves the value section of trienode history.
func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
return db.Ancient(trienodeHistoryValueSectionTable, id-1)
func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) {
return db.AncientBytes(trienodeHistoryValueSectionTable, id-1, offset, length)
}
// ReadTrienodeHistoryList retrieves the a list of trienode history corresponding

View file

@ -105,6 +105,23 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
}
infos = append(infos, info)
case MerkleTrienodeFreezerName, VerkleTrienodeFreezerName:
datadir, err := db.AncientDatadir()
if err != nil {
return nil, err
}
f, err := NewTrienodeFreezer(datadir, freezer == VerkleTrienodeFreezerName, true)
if err != nil {
continue // might be possible the trienode freezer is not existent
}
defer f.Close()
info, err := inspect(freezer, trienodeFreezerTableConfigs, f)
if err != nil {
return nil, err
}
infos = append(infos, info)
default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}

View file

@ -177,7 +177,7 @@ func resolveChainFreezerDir(ancient string) string {
// - chain freezer exists in legacy location (root ancient folder)
freezer := filepath.Join(ancient, ChainFreezerName)
if !common.FileExist(freezer) {
if !common.FileExist(ancient) {
if !common.FileExist(ancient) || !common.IsNonEmptyDir(ancient) {
// The entire ancient store is not initialized, still use the sub
// folder for initialization.
} else {

View file

@ -61,9 +61,10 @@ func newAccessList() *accessList {
// Copy creates an independent copy of an accessList.
func (al *accessList) Copy() *accessList {
cp := newAccessList()
cp.addresses = maps.Clone(al.addresses)
cp.slots = make([]map[common.Hash]struct{}, len(al.slots))
cp := &accessList{
addresses: maps.Clone(al.addresses),
slots: make([]map[common.Hash]struct{}, len(al.slots)),
}
for i, slotMap := range al.slots {
cp.slots[i] = maps.Clone(slotMap)
}

View file

@ -302,6 +302,8 @@ func mustCopyTrie(t Trie) Trie {
return t.Copy()
case *trie.VerkleTrie:
return t.Copy()
case *trie.TransitionTrie:
return t.Copy()
default:
panic(fmt.Errorf("unknown trie type %T", t))
}

View file

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
)
@ -494,8 +495,20 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
selfDestructed: s.selfDestructed,
newContract: s.newContract,
}
if s.trie != nil {
switch s.trie.(type) {
case *trie.VerkleTrie:
// Verkle uses only one tree, and the copy has already been
// made in mustCopyTrie.
obj.trie = db.trie
case *trie.TransitionTrie:
// Same thing for the transition tree, since the MPT is
// read-only.
obj.trie = db.trie
case *trie.StateTrie:
obj.trie = mustCopyTrie(s.trie)
case nil:
// do nothing
}
return obj
}

View file

@ -94,6 +94,14 @@ func TestSizeTracker(t *testing.T) {
}
baselineRoot := currentRoot
// Close and reopen the trie database so all async flushes triggered by the
// baseline commits are written before we measure the baseline snapshot.
if err := tdb.Close(); err != nil {
t.Fatalf("Failed to close triedb before baseline measurement: %v", err)
}
tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
sdb = NewDatabase(tdb, nil)
// Wait for snapshot completion
for !tdb.SnapshotCompleted() {
time.Sleep(100 * time.Millisecond)
@ -215,13 +223,12 @@ func TestSizeTracker(t *testing.T) {
if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes {
t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes)
}
// TODO: failed on github actions, need to investigate
// if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
// t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
// }
// if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
// t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
// }
if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
}
if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
}
if actualStats.StorageTrienodes != expectedStats.StorageTrienodes {
t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes)
}

View file

@ -191,17 +191,18 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr
func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte {
prev := s.inner.SetCode(address, code, reason)
if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil {
prevHash := types.EmptyCodeHash
if len(prev) != 0 {
prevHash = crypto.Keccak256Hash(prev)
}
prevHash := crypto.Keccak256Hash(prev)
codeHash := crypto.Keccak256Hash(code)
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
// Invoke the hooks only if the contract code is changed
if prevHash != codeHash {
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
}
}
}
return prev

View file

@ -992,7 +992,7 @@ func TestOpenCap(t *testing.T) {
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotterEIP7594(testMaxBlobsPerBlock), nil)
// Insert a few transactions from a few accounts
var (
@ -1014,7 +1014,7 @@ func TestOpenCap(t *testing.T) {
keep = []common.Address{addr1, addr3}
drop = []common.Address{addr2}
size = uint64(2 * (txAvgSize + blobSize))
size = 2 * (txAvgSize + blobSize + uint64(txBlobOverhead))
)
store.Put(blob1)
store.Put(blob2)
@ -1023,7 +1023,7 @@ func TestOpenCap(t *testing.T) {
// Verify pool capping twice: first by reducing the data cap, then restarting
// with a high cap to ensure everything was persisted previously
for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
for _, datacap := range []uint64{2 * (txAvgSize + blobSize + uint64(txBlobOverhead)), 1000 * (txAvgSize + blobSize + uint64(txBlobOverhead))} {
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)

View file

@ -117,6 +117,25 @@ func (journal *journal) load(add func([]*types.Transaction) []error) error {
return failure
}
func (journal *journal) setupWriter() error {
if journal.writer != nil {
if err := journal.writer.Close(); err != nil {
return err
}
journal.writer = nil
}
// Re-open the journal file for appending
// Use O_APPEND to ensure we always write to the end of the file
sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}
journal.writer = sink
return nil
}
// insert adds the specified transaction to the local disk journal.
func (journal *journal) insert(tx *types.Transaction) error {
if journal.writer == nil {
@ -177,7 +196,6 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error
// close flushes the transaction journal contents to disk and closes the file.
func (journal *journal) close() error {
var err error
if journal.writer != nil {
err = journal.writer.Close()
journal.writer = nil

View file

@ -114,13 +114,14 @@ func (tracker *TxTracker) TrackAll(txs []*types.Transaction) {
}
// recheck checks and returns any transactions that needs to be resubmitted.
func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transaction, rejournal map[common.Address]types.Transactions) {
func (tracker *TxTracker) recheck(journalCheck bool) []*types.Transaction {
tracker.mu.Lock()
defer tracker.mu.Unlock()
var (
numStales = 0
numOk = 0
resubmits []*types.Transaction
)
for sender, txs := range tracker.byAddr {
// Wipe the stales
@ -141,7 +142,7 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac
}
if journalCheck { // rejournal
rejournal = make(map[common.Address]types.Transactions)
rejournal := make(map[common.Address]types.Transactions)
for _, tx := range tracker.all {
addr, _ := types.Sender(tracker.signer, tx)
rejournal[addr] = append(rejournal[addr], tx)
@ -153,10 +154,18 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac
return int(a.Nonce() - b.Nonce())
})
}
// Rejournal the tracker while holding the lock. No new transactions will
// be added to the old journal during this period, preventing any potential
// transaction loss.
if tracker.journal != nil {
if err := tracker.journal.rotate(rejournal); err != nil {
log.Warn("Transaction journal rotation failed", "err", err)
}
}
}
localGauge.Update(int64(len(tracker.all)))
log.Debug("Tx tracker status", "need-resubmit", len(resubmits), "stale", numStales, "ok", numOk)
return resubmits, rejournal
return resubmits
}
// Start implements node.Lifecycle interface
@ -185,6 +194,12 @@ func (tracker *TxTracker) loop() {
tracker.TrackAll(transactions)
return nil
})
// Setup the writer for the upcoming transactions
if err := tracker.journal.setupWriter(); err != nil {
log.Error("Failed to setup the journal writer", "err", err)
return
}
defer tracker.journal.close()
}
var (
@ -196,20 +211,15 @@ func (tracker *TxTracker) loop() {
case <-tracker.shutdownCh:
return
case <-timer.C:
checkJournal := tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal
resubmits, rejournal := tracker.recheck(checkJournal)
var rejournal bool
if tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal {
rejournal, lastJournal = true, time.Now()
log.Debug("Rejournal the transaction tracker")
}
resubmits := tracker.recheck(rejournal)
if len(resubmits) > 0 {
tracker.pool.Add(resubmits, false)
}
if checkJournal {
// Lock to prevent journal.rotate <-> journal.insert (via TrackAll) conflicts
tracker.mu.Lock()
lastJournal = time.Now()
if err := tracker.journal.rotate(rejournal); err != nil {
log.Warn("Transaction journal rotation failed", "err", err)
}
tracker.mu.Unlock()
}
timer.Reset(recheckInterval)
}
}

View file

@ -17,7 +17,11 @@
package locals
import (
"fmt"
"maps"
"math/big"
"math/rand"
"path/filepath"
"testing"
"time"
@ -146,20 +150,59 @@ func TestResubmit(t *testing.T) {
txsA := txs[:len(txs)/2]
txsB := txs[len(txs)/2:]
env.pool.Add(txsA, true)
pending, queued := env.pool.ContentFrom(address)
if len(pending) != len(txsA) || len(queued) != 0 {
t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued))
}
env.tracker.TrackAll(txs)
resubmit, all := env.tracker.recheck(true)
resubmit := env.tracker.recheck(true)
if len(resubmit) != len(txsB) {
t.Fatalf("Unexpected transactions to resubmit, got: %d, want: %d", len(resubmit), len(txsB))
}
if len(all) == 0 || len(all[address]) == 0 {
t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", 0, len(txs))
}
if len(all[address]) != len(txs) {
t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(all[address]), len(txs))
env.tracker.mu.Lock()
allCopy := maps.Clone(env.tracker.all)
env.tracker.mu.Unlock()
if len(allCopy) != len(txs) {
t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs))
}
}
func TestJournal(t *testing.T) {
journalPath := filepath.Join(t.TempDir(), fmt.Sprintf("%d", rand.Int63()))
env := newTestEnv(t, 10, 0, journalPath)
defer env.close()
env.tracker.Start()
defer env.tracker.Stop()
txs := env.makeTxs(10)
txsA := txs[:len(txs)/2]
txsB := txs[len(txs)/2:]
env.pool.Add(txsA, true)
pending, queued := env.pool.ContentFrom(address)
if len(pending) != len(txsA) || len(queued) != 0 {
t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued))
}
env.tracker.TrackAll(txsA)
env.tracker.TrackAll(txsB)
env.tracker.recheck(true) // manually rejournal the tracker
// Make sure all the transactions are properly journalled
trackerB := New(journalPath, time.Minute, gspec.Config, env.pool)
trackerB.journal.load(func(transactions []*types.Transaction) []error {
trackerB.TrackAll(transactions)
return nil
})
trackerB.mu.Lock()
allCopy := maps.Clone(trackerB.all)
trackerB.mu.Unlock()
if len(allCopy) != len(txs) {
t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs))
}
}

View file

@ -34,7 +34,6 @@ import (
var (
ErrInvalidSig = errors.New("invalid transaction v, r, s values")
ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures")
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
ErrUint256Overflow = errors.New("bigint overflow, too large for uint256")
@ -648,7 +647,7 @@ func TxDifference(a, b Transactions) Transactions {
func HashDifference(a, b []common.Hash) []common.Hash {
keep := make([]common.Hash, 0, len(a))
remove := make(map[common.Hash]struct{})
remove := make(map[common.Hash]struct{}, len(b))
for _, hash := range b {
remove[hash] = struct{}{}
}

View file

@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
"maps"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -183,18 +182,31 @@ type Signer interface {
// modernSigner is the signer implementation that handles non-legacy transaction types.
// For legacy transactions, it defers to one of the legacy signers (frontier, homestead, eip155).
type modernSigner struct {
txtypes map[byte]struct{}
txtypes txtypeSet
chainID *big.Int
legacy Signer
}
// txtypeSet is a bitmap for transaction types.
type txtypeSet [2]uint64
func (v *txtypeSet) set(txType byte) {
v[txType/64] |= 1 << (txType % 64)
}
func (v *txtypeSet) has(txType byte) bool {
if txType >= byte(len(v)*64) {
return false
}
return v[txType/64]&(1<<(txType%64)) != 0
}
func newModernSigner(chainID *big.Int, fork forks.Fork) Signer {
if chainID == nil || chainID.Sign() <= 0 {
panic(fmt.Sprintf("invalid chainID %v", chainID))
}
s := &modernSigner{
chainID: chainID,
txtypes: make(map[byte]struct{}, 4),
}
// configure legacy signer
switch {
@ -205,19 +217,19 @@ func newModernSigner(chainID *big.Int, fork forks.Fork) Signer {
default:
s.legacy = FrontierSigner{}
}
s.txtypes[LegacyTxType] = struct{}{}
s.txtypes.set(LegacyTxType)
// configure tx types
if fork >= forks.Berlin {
s.txtypes[AccessListTxType] = struct{}{}
s.txtypes.set(AccessListTxType)
}
if fork >= forks.London {
s.txtypes[DynamicFeeTxType] = struct{}{}
s.txtypes.set(DynamicFeeTxType)
}
if fork >= forks.Cancun {
s.txtypes[BlobTxType] = struct{}{}
s.txtypes.set(BlobTxType)
}
if fork >= forks.Prague {
s.txtypes[SetCodeTxType] = struct{}{}
s.txtypes.set(SetCodeTxType)
}
return s
}
@ -228,7 +240,7 @@ func (s *modernSigner) ChainID() *big.Int {
func (s *modernSigner) Equal(s2 Signer) bool {
other, ok := s2.(*modernSigner)
return ok && s.chainID.Cmp(other.chainID) == 0 && maps.Equal(s.txtypes, other.txtypes) && s.legacy.Equal(other.legacy)
return ok && s.chainID.Cmp(other.chainID) == 0 && s.txtypes == other.txtypes && s.legacy.Equal(other.legacy)
}
func (s *modernSigner) Hash(tx *Transaction) common.Hash {
@ -236,8 +248,7 @@ func (s *modernSigner) Hash(tx *Transaction) common.Hash {
}
func (s *modernSigner) supportsType(txtype byte) bool {
_, ok := s.txtypes[txtype]
return ok
return s.txtypes.has(txtype)
}
func (s *modernSigner) Sender(tx *Transaction) (common.Address, error) {

View file

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/params/forks"
"github.com/ethereum/go-ethereum/rlp"
)
@ -188,3 +189,14 @@ func createTestLegacyTxInner() *LegacyTx {
Data: nil,
}
}
func Benchmark_modernSigner_Equal(b *testing.B) {
signer1 := newModernSigner(big.NewInt(1), forks.Amsterdam)
signer2 := newModernSigner(big.NewInt(1), forks.Amsterdam)
for b.Loop() {
if !signer1.Equal(signer2) {
b.Fatal("expected signers to be equal")
}
}
}

View file

@ -455,7 +455,7 @@ func verkleTestGenesis(config *params.ChainConfig) *Genesis {
func TestProcessVerkleContractWithEmptyCode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
gspec := verkleTestGenesis(&config)
genesisH, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) {
@ -511,7 +511,7 @@ func TestProcessVerkleContractWithEmptyCode(t *testing.T) {
func TestProcessVerkleExtCodeHashOpcode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -615,7 +615,7 @@ func TestProcessVerkleExtCodeHashOpcode(t *testing.T) {
func TestProcessVerkleBalanceOpcode(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -672,7 +672,7 @@ func TestProcessVerkleBalanceOpcode(t *testing.T) {
func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -792,7 +792,7 @@ func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) {
func TestProcessVerkleSelfDestructInSameTx(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -888,7 +888,7 @@ func TestProcessVerkleSelfDestructInSameTx(t *testing.T) {
func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -978,7 +978,7 @@ func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T)
func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)
@ -1042,7 +1042,7 @@ func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) {
func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
config := *testKaustinenLikeChainConfig
config.ChainID.SetUint64(69421)
config.ChainID = new(big.Int).SetUint64(69421)
var (
signer = types.LatestSigner(&config)

View file

@ -601,7 +601,9 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]b
}
}
evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation)
if len(ret) > 0 {
evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation)
}
return ret, nil
}

View file

@ -28,12 +28,10 @@ import (
"io"
"math/big"
"os"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// SignatureLength indicates the byte length required to carry a signature with recovery id.
@ -69,17 +67,6 @@ type KeccakState interface {
Read([]byte) (int, error)
}
// NewKeccakState creates a new KeccakState
func NewKeccakState() KeccakState {
return sha3.NewLegacyKeccak256().(KeccakState)
}
var hasherPool = sync.Pool{
New: func() any {
return sha3.NewLegacyKeccak256().(KeccakState)
},
}
// HashData hashes the provided data using the KeccakState and returns a 32 byte hash
func HashData(kh KeccakState, data []byte) (h common.Hash) {
kh.Reset()
@ -88,41 +75,6 @@ func HashData(kh KeccakState, data []byte) (h common.Hash) {
return h
}
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
b := make([]byte, 32)
d := hasherPool.Get().(KeccakState)
d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(b)
hasherPool.Put(d)
return b
}
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
d := hasherPool.Get().(KeccakState)
d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(h[:])
hasherPool.Put(d)
return h
}
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}
return d.Sum(nil)
}
// CreateAddress creates an ethereum address given the bytes and the nonce
func CreateAddress(b common.Address, nonce uint64) common.Address {
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})

63
crypto/keccak.go Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !ziren
package crypto
import (
"sync"
"github.com/ethereum/go-ethereum/common"
"golang.org/x/crypto/sha3"
)
// NewKeccakState creates a new KeccakState
func NewKeccakState() KeccakState {
return sha3.NewLegacyKeccak256().(KeccakState)
}
var hasherPool = sync.Pool{
New: func() any {
return sha3.NewLegacyKeccak256().(KeccakState)
},
}
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
b := make([]byte, 32)
d := hasherPool.Get().(KeccakState)
d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(b)
hasherPool.Put(d)
return b
}
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
d := hasherPool.Get().(KeccakState)
d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(h[:])
hasherPool.Put(d)
return h
}

122
crypto/keccak_ziren.go Normal file
View file

@ -0,0 +1,122 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build ziren
package crypto
import (
"github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime"
"github.com/ethereum/go-ethereum/common"
)
// zirenKeccakState implements the KeccakState interface using the Ziren zkvm_runtime.
// It accumulates data written to it and uses the zkvm's Keccak256 system call for hashing.
type zirenKeccakState struct {
buf []byte // accumulated data
result []byte // cached result
dirty bool // whether new data has been written since last hash
}
func newZirenKeccakState() KeccakState {
return &zirenKeccakState{
buf: make([]byte, 0, 512), // pre-allocate reasonable capacity
}
}
func (s *zirenKeccakState) Write(p []byte) (n int, err error) {
s.buf = append(s.buf, p...)
s.dirty = true
return len(p), nil
}
func (s *zirenKeccakState) Sum(b []byte) []byte {
s.computeHashIfNeeded()
return append(b, s.result...)
}
func (s *zirenKeccakState) Reset() {
s.buf = s.buf[:0]
s.result = nil
s.dirty = false
}
func (s *zirenKeccakState) Size() int {
return 32
}
func (s *zirenKeccakState) BlockSize() int {
return 136 // Keccak256 rate
}
func (s *zirenKeccakState) Read(p []byte) (n int, err error) {
s.computeHashIfNeeded()
if len(p) == 0 {
return 0, nil
}
// After computeHashIfNeeded(), s.result is always a 32-byte slice
n = copy(p, s.result)
return n, nil
}
func (s *zirenKeccakState) computeHashIfNeeded() {
if s.dirty || s.result == nil {
// Use the zkvm_runtime Keccak256 which uses SyscallKeccakSponge
hashArray := zkvm_runtime.Keccak256(s.buf)
s.result = hashArray[:]
s.dirty = false
}
}
// NewKeccakState creates a new KeccakState
// This uses a Ziren-optimized implementation that leverages the zkvm_runtime.Keccak256 system call.
func NewKeccakState() KeccakState {
return newZirenKeccakState()
}
// Keccak256 calculates and returns the Keccak256 hash using the Ziren zkvm_runtime implementation.
func Keccak256(data ...[]byte) []byte {
// For multiple data chunks, concatenate them
if len(data) == 0 {
result := zkvm_runtime.Keccak256(nil)
return result[:]
}
if len(data) == 1 {
result := zkvm_runtime.Keccak256(data[0])
return result[:]
}
// Concatenate multiple data chunks
var totalLen int
for _, d := range data {
totalLen += len(d)
}
combined := make([]byte, 0, totalLen)
for _, d := range data {
combined = append(combined, d...)
}
result := zkvm_runtime.Keccak256(combined)
return result[:]
}
// Keccak256Hash calculates and returns the Keccak256 hash as a Hash using the Ziren zkvm_runtime implementation.
func Keccak256Hash(data ...[]byte) common.Hash {
return common.Hash(Keccak256(data...))
}

View file

@ -486,3 +486,11 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*types.Transaction, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
return b.eth.stateAtTransaction(ctx, block, txIndex, reexec)
}
func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration {
return b.eth.config.TxSyncDefaultTimeout
}
func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration {
return b.eth.config.TxSyncMaxTimeout
}

View file

@ -351,6 +351,7 @@ func XTestDelivery(t *testing.T) {
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
// reserve receiptfetch

View file

@ -49,27 +49,29 @@ var FullNodeGPO = gasprice.Config{
// Defaults contains default settings for use on the Ethereum main net.
var Defaults = Config{
HistoryMode: history.KeepAll,
SyncMode: SnapSync,
NetworkId: 0, // enable auto configuration of networkID == chainID
TxLookupLimit: 2350000,
TransactionHistory: 2350000,
LogHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold,
DatabaseCache: 512,
TrieCleanCache: 154,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
SnapshotCache: 102,
FilterLogCacheSize: 32,
LogQueryLimit: 1000,
Miner: miner.DefaultConfig,
TxPool: legacypool.DefaultConfig,
BlobPool: blobpool.DefaultConfig,
RPCGasCap: 50000000,
RPCEVMTimeout: 5 * time.Second,
GPO: FullNodeGPO,
RPCTxFeeCap: 1, // 1 ether
HistoryMode: history.KeepAll,
SyncMode: SnapSync,
NetworkId: 0, // enable auto configuration of networkID == chainID
TxLookupLimit: 2350000,
TransactionHistory: 2350000,
LogHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold,
DatabaseCache: 512,
TrieCleanCache: 154,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
SnapshotCache: 102,
FilterLogCacheSize: 32,
LogQueryLimit: 1000,
Miner: miner.DefaultConfig,
TxPool: legacypool.DefaultConfig,
BlobPool: blobpool.DefaultConfig,
RPCGasCap: 50000000,
RPCEVMTimeout: 5 * time.Second,
GPO: FullNodeGPO,
RPCTxFeeCap: 1, // 1 ether
TxSyncDefaultTimeout: 20 * time.Second,
TxSyncMaxTimeout: 1 * time.Minute,
}
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
@ -183,6 +185,10 @@ type Config struct {
// OverrideVerkle (TODO: remove after the fork)
OverrideVerkle *uint64 `toml:",omitempty"`
// EIP-7966: eth_sendRawTransactionSync timeouts
TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
TxSyncMaxTimeout time.Duration `toml:",omitempty"`
}
// CreateConsensusEngine creates a consensus engine for the given chain config.

View file

@ -58,10 +58,12 @@ func (c Config) MarshalTOML() (interface{}, error) {
RPCGasCap uint64
RPCEVMTimeout time.Duration
RPCTxFeeCap float64
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideBPO1 *uint64 `toml:",omitempty"`
OverrideBPO2 *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideBPO1 *uint64 `toml:",omitempty"`
OverrideBPO2 *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
TxSyncDefaultTimeout time.Duration `toml:",omitempty"`
TxSyncMaxTimeout time.Duration `toml:",omitempty"`
}
var enc Config
enc.Genesis = c.Genesis
@ -109,6 +111,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.OverrideBPO1 = c.OverrideBPO1
enc.OverrideBPO2 = c.OverrideBPO2
enc.OverrideVerkle = c.OverrideVerkle
enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout
enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout
return &enc, nil
}
@ -156,10 +160,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
RPCGasCap *uint64
RPCEVMTimeout *time.Duration
RPCTxFeeCap *float64
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideBPO1 *uint64 `toml:",omitempty"`
OverrideBPO2 *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideBPO1 *uint64 `toml:",omitempty"`
OverrideBPO2 *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
TxSyncDefaultTimeout *time.Duration `toml:",omitempty"`
TxSyncMaxTimeout *time.Duration `toml:",omitempty"`
}
var dec Config
if err := unmarshal(&dec); err != nil {
@ -300,5 +306,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.OverrideVerkle != nil {
c.OverrideVerkle = dec.OverrideVerkle
}
if dec.TxSyncDefaultTimeout != nil {
c.TxSyncDefaultTimeout = *dec.TxSyncDefaultTimeout
}
if dec.TxSyncMaxTimeout != nil {
c.TxSyncMaxTimeout = *dec.TxSyncMaxTimeout
}
return nil
}

59
eth/fetcher/metrics.go Normal file
View file

@ -0,0 +1,59 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/
// Contains the metrics collected by the txfetcher.
package fetcher
import "github.com/ethereum/go-ethereum/metrics"
var (
txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
txFetcherSlowPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/slow/peers", nil)
// Note: this metric does not mean that the fetching of a transaction
// was blocked by a specific peer during this period, since we request
// another peer to fetch the same transaction hash.
// The purpose of this metric is to measure how long it takes for a slow peer
// to become "unfrozen", either by eventually replying to the request
// or by being dropped, measuring from the moment the request was sent.
txFetcherSlowWait = metrics.NewRegisteredHistogram("eth/fetcher/transaction/slow/wait", nil, metrics.NewExpDecaySample(1028, 0.015))
)

View file

@ -30,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
const (
@ -80,35 +79,6 @@ var (
txFetchTimeout = 5 * time.Second
)
var (
txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
)
var errTerminated = errors.New("terminated")
// txAnnounce is the notification of the availability of a batch
@ -635,6 +605,7 @@ func (f *TxFetcher) loop() {
}
// Keep track of the request as dangling, but never expire
f.requests[peer].hashes = nil
txFetcherSlowPeers.Inc(1)
}
}
// Schedule a new transaction retrieval
@ -728,6 +699,10 @@ func (f *TxFetcher) loop() {
log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
break
}
if req.hashes == nil {
txFetcherSlowPeers.Dec(1)
txFetcherSlowWait.Update(time.Duration(f.clock.Now() - req.time).Nanoseconds())
}
delete(f.requests, delivery.origin)
// Anything not delivered should be re-scheduled (with or without
@ -807,6 +782,10 @@ func (f *TxFetcher) loop() {
}
delete(f.fetching, hash)
}
if request.hashes == nil {
txFetcherSlowPeers.Dec(1)
txFetcherSlowWait.Update(time.Duration(f.clock.Now() - request.time).Nanoseconds())
}
delete(f.requests, drop.peer)
}
// Clean up general announcement tracking
@ -816,6 +795,10 @@ func (f *TxFetcher) loop() {
if len(f.announced[hash]) == 0 {
delete(f.announced, hash)
}
delete(f.alternates[hash], drop.peer)
if len(f.alternates[hash]) == 0 {
delete(f.alternates, hash)
}
}
delete(f.announces, drop.peer)
}
@ -879,7 +862,7 @@ func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
// This method is a bit "flaky" "by design". In theory the timeout timer only ever
// should be rescheduled if some request is pending. In practice, a timeout will
// cause the timer to be rescheduled every 5 secs (until the peer comes through or
// disconnects). This is a limitation of the fetcher code because we don't trac
// disconnects). This is a limitation of the fetcher code because we don't track
// pending requests and timed out requests separately. Without double tracking, if
// we simply didn't reschedule the timer on all-timeout then the timer would never
// be set again since len(request) > 0 => something's running.

View file

@ -1858,6 +1858,56 @@ func TestBlobTransactionAnnounce(t *testing.T) {
})
}
func TestTransactionFetcherDropAlternates(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}},
doWait{time: txArriveTimeout, step: true},
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}},
isScheduled{
tracking: map[string][]announce{
"A": {
{testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
},
"B": {
{testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
},
},
fetching: map[string][]common.Hash{
"A": {testTxsHashes[0]},
},
},
doDrop("B"),
isScheduled{
tracking: map[string][]announce{
"A": {
{testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())},
},
},
fetching: map[string][]common.Hash{
"A": {testTxsHashes[0]},
},
},
doDrop("A"),
isScheduled{
tracking: nil, fetching: nil,
},
},
})
}
func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
t.Parallel()
testTransactionFetcher(t, tt)

View file

@ -563,7 +563,7 @@ type ReceiptWithTx struct {
// In addition to returning receipts, it also returns the corresponding transactions.
// This is because receipts only contain low-level data, while user-facing data
// may require additional information from the Transaction.
func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx {
func filterReceipts(txHashes map[common.Hash]bool, ev core.ChainEvent) []*ReceiptWithTx {
var ret []*ReceiptWithTx
receipts := ev.Receipts
@ -583,27 +583,9 @@ func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx
Transaction: txs[i],
}
}
} else if len(txHashes) == 1 {
// Filter by single transaction hash.
// This is a common case, so we distinguish it from filtering by multiple tx hashes and made a small optimization.
for i, receipt := range receipts {
if receipt.TxHash == txHashes[0] {
ret = append(ret, &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],
})
break
}
}
} else {
// Filter by multiple transaction hashes.
txHashMap := make(map[common.Hash]bool, len(txHashes))
for _, hash := range txHashes {
txHashMap[hash] = true
}
for i, receipt := range receipts {
if txHashMap[receipt.TxHash] {
if txHashes[receipt.TxHash] {
ret = append(ret, &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],

View file

@ -185,9 +185,9 @@ type subscription struct {
txs chan []*types.Transaction
headers chan *types.Header
receipts chan []*ReceiptWithTx
txHashes []common.Hash // contains transaction hashes for transactionReceipts subscription filtering
installed chan struct{} // closed when the filter is installed
err chan error // closed when the filter is uninstalled
txHashes map[common.Hash]bool // contains transaction hashes for transactionReceipts subscription filtering
installed chan struct{} // closed when the filter is installed
err chan error // closed when the filter is uninstalled
}
// EventSystem creates subscriptions, processes events and broadcasts them to the
@ -403,6 +403,10 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc
// transactions when they are included in blocks. If txHashes is provided, only receipts
// for those specific transaction hashes will be delivered.
func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription {
hashSet := make(map[common.Hash]bool)
for _, h := range txHashes {
hashSet[h] = true
}
sub := &subscription{
id: rpc.NewID(),
typ: TransactionReceiptsSubscription,
@ -411,7 +415,7 @@ func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, rece
txs: make(chan []*types.Transaction),
headers: make(chan *types.Header),
receipts: receipts,
txHashes: txHashes,
txHashes: hashSet,
installed: make(chan struct{}),
err: make(chan error),
}

View file

@ -959,7 +959,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
// Apply the customization rules if required.
if config != nil {
if config.BlockOverrides != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 {
if config.BlockOverrides != nil && config.BlockOverrides.Number != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 {
// Overriding the block number to n+1 is a common way for wallets to
// simulate transactions, however without the following fix, a contract
// can assert it is being simulated by checking if blockhash(n) == 0x0 and

View file

@ -471,6 +471,20 @@ func TestTraceCall(t *testing.T) {
{"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]},
{"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`,
},
// Tests issue #33014 where accessing nil block number override panics.
{
blockNumber: rpc.BlockNumber(0),
call: ethapi.TransactionArgs{
From: &accounts[0].addr,
To: &accounts[1].addr,
Value: (*hexutil.Big)(big.NewInt(1000)),
},
config: &TraceCallConfig{
BlockOverrides: &override.BlockOverrides{},
},
expectErr: nil,
expect: `{"gas":21000,"failed":false,"returnValue":"0x","structLogs":[]}`,
},
}
for i, testspec := range testSuite {
result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config)

View file

@ -0,0 +1,101 @@
{
"context": {
"difficulty": "0",
"gasLimit": "8000000",
"miner": "0x0000000000000000000000000000000000000000",
"number": "1",
"timestamp": "1000",
"baseFeePerGas": "7"
},
"genesis": {
"alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x10000000000000000",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x1111111111111111111111111111111111111111": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x2222222222222222222222222222222222222222": {
"balance": "0xde0b6b3a7640000",
"nonce": "1",
"code": "0x6099600155731111111111111111111111111111111111111111ff",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd",
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000001234"
}
}
},
"config": {
"chainId": 1,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"mergeNetsplitBlock": 0,
"shanghaiTime": 0,
"cancunTime": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true
},
"difficulty": "0",
"extraData": "0x",
"gasLimit": "8000000",
"hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"miner": "0x0000000000000000000000000000000000000000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"number": "0",
"stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0"
},
"input": "0xf860800a830186a094222222222222222222222222222222222222222280801ba0c4829400221936e8016721406f84b4710ead5608f15c785a3cedc20a7aebaab2a033e8e6e12cc432098b5ce8a409691f977867249073a3fc7804e8676c4d159475",
"tracerConfig": {
"diffMode": true
},
"result": {
"pre": {
"0x2222222222222222222222222222222222222222": {
"balance": "0xde0b6b3a7640000",
"nonce": 1,
"code": "0x6099600155731111111111111111111111111111111111111111ff",
"codeHash": "0x701bdb1d43777a9304905a100f758955d130e09c8e86d97e3f6becccdc001048",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd"
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x10000000000000000"
}
},
"post": {
"0x0000000000000000000000000000000000000000": {
"balance": "0x2aed3"
},
"0x1111111111111111111111111111111111111111": {
"balance": "0xde0b6b3a7640000"
},
"0x2222222222222222222222222222222222222222": {
"balance": "0x0",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000099"
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0xfffffffffff70e96",
"nonce": 1
}
}
}
}

View file

@ -131,7 +131,15 @@ func (t *prestateTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scop
addr := common.Address(stackData[stackLen-1].Bytes20())
t.lookupAccount(addr)
if op == vm.SELFDESTRUCT {
t.deleted[caller] = true
if t.chainConfig.IsCancun(t.env.BlockNumber, t.env.Time) {
// EIP-6780: only delete if created in same transaction
if t.created[caller] {
t.deleted[caller] = true
}
} else {
// Pre-EIP-6780: always delete
t.deleted[caller] = true
}
}
case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE):
addr := common.Address(stackData[stackLen-2].Bytes20())

View file

@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
@ -705,6 +706,39 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
return ec.c.CallContext(ctx, nil, "eth_sendRawTransaction", hexutil.Encode(data))
}
// SendTransactionSync submits a signed tx and waits for a receipt (or until
// the optional timeout elapses on the server side). If timeout == 0, the server
// uses its default.
func (ec *Client) SendTransactionSync(
ctx context.Context,
tx *types.Transaction,
timeout *time.Duration,
) (*types.Receipt, error) {
raw, err := tx.MarshalBinary()
if err != nil {
return nil, err
}
return ec.SendRawTransactionSync(ctx, raw, timeout)
}
func (ec *Client) SendRawTransactionSync(
ctx context.Context,
rawTx []byte,
timeout *time.Duration,
) (*types.Receipt, error) {
var ms *hexutil.Uint64
if timeout != nil {
if d := hexutil.Uint64(timeout.Milliseconds()); d > 0 {
ms = &d
}
}
var receipt types.Receipt
if err := ec.c.CallContext(ctx, &receipt, "eth_sendRawTransactionSync", hexutil.Bytes(rawTx), ms); err != nil {
return nil, err
}
return &receipt, nil
}
// RevertErrorData returns the 'revert reason' data of a contract call.
//
// This can be used with CallContract and EstimateGas, and only when the server is Geth.
@ -828,3 +862,89 @@ func (p *rpcProgress) toSyncProgress() *ethereum.SyncProgress {
StateIndexRemaining: uint64(p.StateIndexRemaining),
}
}
// SimulateOptions represents the options for eth_simulateV1.
type SimulateOptions struct {
BlockStateCalls []SimulateBlock `json:"blockStateCalls"`
TraceTransfers bool `json:"traceTransfers"`
Validation bool `json:"validation"`
ReturnFullTransactions bool `json:"returnFullTransactions"`
}
// SimulateBlock represents a batch of calls to be simulated.
type SimulateBlock struct {
BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"`
StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"`
Calls []ethereum.CallMsg `json:"calls"`
}
// MarshalJSON implements json.Marshaler for SimulateBlock.
func (s SimulateBlock) MarshalJSON() ([]byte, error) {
type Alias struct {
BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"`
StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"`
Calls []interface{} `json:"calls"`
}
calls := make([]interface{}, len(s.Calls))
for i, call := range s.Calls {
calls[i] = toCallArg(call)
}
return json.Marshal(Alias{
BlockOverrides: s.BlockOverrides,
StateOverrides: s.StateOverrides,
Calls: calls,
})
}
//go:generate go run github.com/fjl/gencodec -type SimulateCallResult -field-override simulateCallResultMarshaling -out gen_simulate_call_result.go
// SimulateCallResult is the result of a simulated call.
type SimulateCallResult struct {
ReturnValue []byte `json:"returnData"`
Logs []*types.Log `json:"logs"`
GasUsed uint64 `json:"gasUsed"`
Status uint64 `json:"status"`
Error *CallError `json:"error,omitempty"`
}
type simulateCallResultMarshaling struct {
ReturnValue hexutil.Bytes
GasUsed hexutil.Uint64
Status hexutil.Uint64
}
// CallError represents an error from a simulated call.
type CallError struct {
Code int `json:"code"`
Message string `json:"message"`
Data string `json:"data,omitempty"`
}
//go:generate go run github.com/fjl/gencodec -type SimulateBlockResult -field-override simulateBlockResultMarshaling -out gen_simulate_block_result.go
// SimulateBlockResult represents the result of a simulated block.
type SimulateBlockResult struct {
Number *big.Int `json:"number"`
Hash common.Hash `json:"hash"`
Timestamp uint64 `json:"timestamp"`
GasLimit uint64 `json:"gasLimit"`
GasUsed uint64 `json:"gasUsed"`
FeeRecipient common.Address `json:"miner"`
BaseFeePerGas *big.Int `json:"baseFeePerGas,omitempty"`
Calls []SimulateCallResult `json:"calls"`
}
type simulateBlockResultMarshaling struct {
Number *hexutil.Big
Timestamp hexutil.Uint64
GasLimit hexutil.Uint64
GasUsed hexutil.Uint64
BaseFeePerGas *hexutil.Big
}
// SimulateV1 executes transactions on top of a base state.
func (ec *Client) SimulateV1(ctx context.Context, opts SimulateOptions, blockNrOrHash *rpc.BlockNumberOrHash) ([]SimulateBlockResult, error) {
var result []SimulateBlockResult
err := ec.c.CallContext(ctx, &result, "eth_simulateV1", opts, blockNrOrHash)
return result, err
}

View file

@ -754,3 +754,250 @@ func ExampleRevertErrorData() {
// revert: 08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000a75736572206572726f72
// message: user error
}
func TestSimulateV1(t *testing.T) {
backend, _, err := newTestBackend(nil)
if err != nil {
t.Fatalf("Failed to create test backend: %v", err)
}
defer backend.Close()
client := ethclient.NewClient(backend.Attach())
defer client.Close()
ctx := context.Background()
// Get current base fee
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
t.Fatalf("Failed to get header: %v", err)
}
// Simple test: transfer ETH from one account to another
from := testAddr
to := common.HexToAddress("0x0000000000000000000000000000000000000001")
value := big.NewInt(100)
gas := uint64(100000)
maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
opts := ethclient.SimulateOptions{
BlockStateCalls: []ethclient.SimulateBlock{
{
Calls: []ethereum.CallMsg{
{
From: from,
To: &to,
Value: value,
Gas: gas,
GasFeeCap: maxFeePerGas,
},
},
},
},
Validation: true,
}
results, err := client.SimulateV1(ctx, opts, nil)
if err != nil {
t.Fatalf("SimulateV1 failed: %v", err)
}
if len(results) != 1 {
t.Fatalf("expected 1 block result, got %d", len(results))
}
if len(results[0].Calls) != 1 {
t.Fatalf("expected 1 call result, got %d", len(results[0].Calls))
}
// Check that the transaction succeeded
if results[0].Calls[0].Status != 1 {
t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status)
}
if results[0].Calls[0].Error != nil {
t.Errorf("expected no error, got %v", results[0].Calls[0].Error)
}
}
func TestSimulateV1WithBlockOverrides(t *testing.T) {
backend, _, err := newTestBackend(nil)
if err != nil {
t.Fatalf("Failed to create test backend: %v", err)
}
defer backend.Close()
client := ethclient.NewClient(backend.Attach())
defer client.Close()
ctx := context.Background()
// Get current base fee
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
t.Fatalf("Failed to get header: %v", err)
}
from := testAddr
to := common.HexToAddress("0x0000000000000000000000000000000000000001")
value := big.NewInt(100)
gas := uint64(100000)
maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
// Override timestamp only
timestamp := uint64(1234567890)
opts := ethclient.SimulateOptions{
BlockStateCalls: []ethclient.SimulateBlock{
{
BlockOverrides: &ethereum.BlockOverrides{
Time: timestamp,
},
Calls: []ethereum.CallMsg{
{
From: from,
To: &to,
Value: value,
Gas: gas,
GasFeeCap: maxFeePerGas,
},
},
},
},
Validation: true,
}
results, err := client.SimulateV1(ctx, opts, nil)
if err != nil {
t.Fatalf("SimulateV1 with block overrides failed: %v", err)
}
if len(results) != 1 {
t.Fatalf("expected 1 block result, got %d", len(results))
}
// Verify the timestamp was overridden
if results[0].Timestamp != timestamp {
t.Errorf("expected timestamp %d, got %d", timestamp, results[0].Timestamp)
}
}
func TestSimulateV1WithStateOverrides(t *testing.T) {
backend, _, err := newTestBackend(nil)
if err != nil {
t.Fatalf("Failed to create test backend: %v", err)
}
defer backend.Close()
client := ethclient.NewClient(backend.Attach())
defer client.Close()
ctx := context.Background()
// Get current base fee
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
t.Fatalf("Failed to get header: %v", err)
}
from := testAddr
to := common.HexToAddress("0x0000000000000000000000000000000000000001")
value := big.NewInt(1000000000000000000) // 1 ETH
gas := uint64(100000)
maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
// Override the balance of the 'from' address
balanceStr := "1000000000000000000000"
balance := new(big.Int)
balance.SetString(balanceStr, 10)
stateOverrides := map[common.Address]ethereum.OverrideAccount{
from: {
Balance: balance,
},
}
opts := ethclient.SimulateOptions{
BlockStateCalls: []ethclient.SimulateBlock{
{
StateOverrides: stateOverrides,
Calls: []ethereum.CallMsg{
{
From: from,
To: &to,
Value: value,
Gas: gas,
GasFeeCap: maxFeePerGas,
},
},
},
},
Validation: true,
}
results, err := client.SimulateV1(ctx, opts, nil)
if err != nil {
t.Fatalf("SimulateV1 with state overrides failed: %v", err)
}
if len(results) != 1 {
t.Fatalf("expected 1 block result, got %d", len(results))
}
if results[0].Calls[0].Status != 1 {
t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status)
}
}
func TestSimulateV1WithBlockNumberOrHash(t *testing.T) {
backend, _, err := newTestBackend(nil)
if err != nil {
t.Fatalf("Failed to create test backend: %v", err)
}
defer backend.Close()
client := ethclient.NewClient(backend.Attach())
defer client.Close()
ctx := context.Background()
// Get current base fee
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
t.Fatalf("Failed to get header: %v", err)
}
from := testAddr
to := common.HexToAddress("0x0000000000000000000000000000000000000001")
value := big.NewInt(100)
gas := uint64(100000)
maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2))
opts := ethclient.SimulateOptions{
BlockStateCalls: []ethclient.SimulateBlock{
{
Calls: []ethereum.CallMsg{
{
From: from,
To: &to,
Value: value,
Gas: gas,
GasFeeCap: maxFeePerGas,
},
},
},
},
Validation: true,
}
// Simulate on the latest block
latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
results, err := client.SimulateV1(ctx, opts, &latest)
if err != nil {
t.Fatalf("SimulateV1 with latest block failed: %v", err)
}
if len(results) != 1 {
t.Fatalf("expected 1 block result, got %d", len(results))
}
}

View file

@ -0,0 +1,80 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package ethclient
import (
"encoding/json"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
var _ = (*simulateBlockResultMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s SimulateBlockResult) MarshalJSON() ([]byte, error) {
type SimulateBlockResult struct {
Number *hexutil.Big `json:"number"`
Hash common.Hash `json:"hash"`
Timestamp hexutil.Uint64 `json:"timestamp"`
GasLimit hexutil.Uint64 `json:"gasLimit"`
GasUsed hexutil.Uint64 `json:"gasUsed"`
FeeRecipient common.Address `json:"miner"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"`
Calls []SimulateCallResult `json:"calls"`
}
var enc SimulateBlockResult
enc.Number = (*hexutil.Big)(s.Number)
enc.Hash = s.Hash
enc.Timestamp = hexutil.Uint64(s.Timestamp)
enc.GasLimit = hexutil.Uint64(s.GasLimit)
enc.GasUsed = hexutil.Uint64(s.GasUsed)
enc.FeeRecipient = s.FeeRecipient
enc.BaseFeePerGas = (*hexutil.Big)(s.BaseFeePerGas)
enc.Calls = s.Calls
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (s *SimulateBlockResult) UnmarshalJSON(input []byte) error {
type SimulateBlockResult struct {
Number *hexutil.Big `json:"number"`
Hash *common.Hash `json:"hash"`
Timestamp *hexutil.Uint64 `json:"timestamp"`
GasLimit *hexutil.Uint64 `json:"gasLimit"`
GasUsed *hexutil.Uint64 `json:"gasUsed"`
FeeRecipient *common.Address `json:"miner"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"`
Calls []SimulateCallResult `json:"calls"`
}
var dec SimulateBlockResult
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.Number != nil {
s.Number = (*big.Int)(dec.Number)
}
if dec.Hash != nil {
s.Hash = *dec.Hash
}
if dec.Timestamp != nil {
s.Timestamp = uint64(*dec.Timestamp)
}
if dec.GasLimit != nil {
s.GasLimit = uint64(*dec.GasLimit)
}
if dec.GasUsed != nil {
s.GasUsed = uint64(*dec.GasUsed)
}
if dec.FeeRecipient != nil {
s.FeeRecipient = *dec.FeeRecipient
}
if dec.BaseFeePerGas != nil {
s.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas)
}
if dec.Calls != nil {
s.Calls = dec.Calls
}
return nil
}

View file

@ -0,0 +1,61 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package ethclient
import (
"encoding/json"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*simulateCallResultMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s SimulateCallResult) MarshalJSON() ([]byte, error) {
type SimulateCallResult struct {
ReturnValue hexutil.Bytes `json:"returnData"`
Logs []*types.Log `json:"logs"`
GasUsed hexutil.Uint64 `json:"gasUsed"`
Status hexutil.Uint64 `json:"status"`
Error *CallError `json:"error,omitempty"`
}
var enc SimulateCallResult
enc.ReturnValue = s.ReturnValue
enc.Logs = s.Logs
enc.GasUsed = hexutil.Uint64(s.GasUsed)
enc.Status = hexutil.Uint64(s.Status)
enc.Error = s.Error
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (s *SimulateCallResult) UnmarshalJSON(input []byte) error {
type SimulateCallResult struct {
ReturnValue *hexutil.Bytes `json:"returnData"`
Logs []*types.Log `json:"logs"`
GasUsed *hexutil.Uint64 `json:"gasUsed"`
Status *hexutil.Uint64 `json:"status"`
Error *CallError `json:"error,omitempty"`
}
var dec SimulateCallResult
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.ReturnValue != nil {
s.ReturnValue = *dec.ReturnValue
}
if dec.Logs != nil {
s.Logs = dec.Logs
}
if dec.GasUsed != nil {
s.GasUsed = uint64(*dec.GasUsed)
}
if dec.Status != nil {
s.Status = uint64(*dec.Status)
}
if dec.Error != nil {
s.Error = dec.Error
}
return nil
}

View file

@ -19,7 +19,6 @@ package gethclient
import (
"context"
"encoding/json"
"fmt"
"math/big"
"runtime"
@ -280,97 +279,8 @@ func toCallArg(msg ethereum.CallMsg) interface{} {
return arg
}
// OverrideAccount specifies the state of an account to be overridden.
type OverrideAccount struct {
// Nonce sets nonce of the account. Note: the nonce override will only
// be applied when it is set to a non-zero value.
Nonce uint64
// OverrideAccount is an alias for ethereum.OverrideAccount.
type OverrideAccount = ethereum.OverrideAccount
// Code sets the contract code. The override will be applied
// when the code is non-nil, i.e. setting empty code is possible
// using an empty slice.
Code []byte
// Balance sets the account balance.
Balance *big.Int
// State sets the complete storage. The override will be applied
// when the given map is non-nil. Using an empty map wipes the
// entire contract storage during the call.
State map[common.Hash]common.Hash
// StateDiff allows overriding individual storage slots.
StateDiff map[common.Hash]common.Hash
}
func (a OverrideAccount) MarshalJSON() ([]byte, error) {
type acc struct {
Nonce hexutil.Uint64 `json:"nonce,omitempty"`
Code string `json:"code,omitempty"`
Balance *hexutil.Big `json:"balance,omitempty"`
State interface{} `json:"state,omitempty"`
StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"`
}
output := acc{
Nonce: hexutil.Uint64(a.Nonce),
Balance: (*hexutil.Big)(a.Balance),
StateDiff: a.StateDiff,
}
if a.Code != nil {
output.Code = hexutil.Encode(a.Code)
}
if a.State != nil {
output.State = a.State
}
return json.Marshal(output)
}
// BlockOverrides specifies the set of header fields to override.
type BlockOverrides struct {
// Number overrides the block number.
Number *big.Int
// Difficulty overrides the block difficulty.
Difficulty *big.Int
// Time overrides the block timestamp. Time is applied only when
// it is non-zero.
Time uint64
// GasLimit overrides the block gas limit. GasLimit is applied only when
// it is non-zero.
GasLimit uint64
// Coinbase overrides the block coinbase. Coinbase is applied only when
// it is different from the zero address.
Coinbase common.Address
// Random overrides the block extra data which feeds into the RANDOM opcode.
// Random is applied only when it is a non-zero hash.
Random common.Hash
// BaseFee overrides the block base fee.
BaseFee *big.Int
}
func (o BlockOverrides) MarshalJSON() ([]byte, error) {
type override struct {
Number *hexutil.Big `json:"number,omitempty"`
Difficulty *hexutil.Big `json:"difficulty,omitempty"`
Time hexutil.Uint64 `json:"time,omitempty"`
GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"`
Coinbase *common.Address `json:"feeRecipient,omitempty"`
Random *common.Hash `json:"prevRandao,omitempty"`
BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"`
}
output := override{
Number: (*hexutil.Big)(o.Number),
Difficulty: (*hexutil.Big)(o.Difficulty),
Time: hexutil.Uint64(o.Time),
GasLimit: hexutil.Uint64(o.GasLimit),
BaseFee: (*hexutil.Big)(o.BaseFee),
}
if o.Coinbase != (common.Address{}) {
output.Coinbase = &o.Coinbase
}
if o.Random != (common.Hash{}) {
output.Random = &o.Random
}
return json.Marshal(output)
}
// BlockOverrides is an alias for ethereum.BlockOverrides.
type BlockOverrides = ethereum.BlockOverrides

1
go.mod
View file

@ -81,6 +81,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/DataDog/zstd v1.4.5 // indirect
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect

2
go.sum
View file

@ -14,6 +14,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=

View file

@ -19,10 +19,12 @@ package ethereum
import (
"context"
"encoding/json"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
@ -293,3 +295,98 @@ type BlockNumberReader interface {
type ChainIDReader interface {
ChainID(ctx context.Context) (*big.Int, error)
}
// OverrideAccount specifies the state of an account to be overridden.
type OverrideAccount struct {
// Nonce sets nonce of the account. Note: the nonce override will only
// be applied when it is set to a non-zero value.
Nonce uint64
// Code sets the contract code. The override will be applied
// when the code is non-nil, i.e. setting empty code is possible
// using an empty slice.
Code []byte
// Balance sets the account balance.
Balance *big.Int
// State sets the complete storage. The override will be applied
// when the given map is non-nil. Using an empty map wipes the
// entire contract storage during the call.
State map[common.Hash]common.Hash
// StateDiff allows overriding individual storage slots.
StateDiff map[common.Hash]common.Hash
}
func (a OverrideAccount) MarshalJSON() ([]byte, error) {
type acc struct {
Nonce hexutil.Uint64 `json:"nonce,omitempty"`
Code string `json:"code,omitempty"`
Balance *hexutil.Big `json:"balance,omitempty"`
State interface{} `json:"state,omitempty"`
StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"`
}
output := acc{
Nonce: hexutil.Uint64(a.Nonce),
Balance: (*hexutil.Big)(a.Balance),
StateDiff: a.StateDiff,
}
if a.Code != nil {
output.Code = hexutil.Encode(a.Code)
}
if a.State != nil {
output.State = a.State
}
return json.Marshal(output)
}
// BlockOverrides specifies the set of header fields to override.
type BlockOverrides struct {
// Number overrides the block number.
Number *big.Int
// Difficulty overrides the block difficulty.
Difficulty *big.Int
// Time overrides the block timestamp. Time is applied only when
// it is non-zero.
Time uint64
// GasLimit overrides the block gas limit. GasLimit is applied only when
// it is non-zero.
GasLimit uint64
// Coinbase overrides the block coinbase. Coinbase is applied only when
// it is different from the zero address.
Coinbase common.Address
// Random overrides the block extra data which feeds into the RANDOM opcode.
// Random is applied only when it is a non-zero hash.
Random common.Hash
// BaseFee overrides the block base fee.
BaseFee *big.Int
}
func (o BlockOverrides) MarshalJSON() ([]byte, error) {
type override struct {
Number *hexutil.Big `json:"number,omitempty"`
Difficulty *hexutil.Big `json:"difficulty,omitempty"`
Time hexutil.Uint64 `json:"time,omitempty"`
GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"`
Coinbase *common.Address `json:"feeRecipient,omitempty"`
Random *common.Hash `json:"prevRandao,omitempty"`
BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"`
}
output := override{
Number: (*hexutil.Big)(o.Number),
Difficulty: (*hexutil.Big)(o.Difficulty),
Time: hexutil.Uint64(o.Time),
GasLimit: hexutil.Uint64(o.GasLimit),
BaseFee: (*hexutil.Big)(o.BaseFee),
}
if o.Coinbase != (common.Address{}) {
output.Coinbase = &o.Coinbase
}
if o.Random != (common.Hash{}) {
output.Random = &o.Random
}
return json.Marshal(output)
}

View file

@ -55,6 +55,7 @@ import (
const estimateGasErrorRatio = 0.015
var errBlobTxNotSupported = errors.New("signing blob transactions not supported")
var errSubClosed = errors.New("chain subscription closed")
// EthereumAPI provides an API to access Ethereum related information.
type EthereumAPI struct {
@ -1666,6 +1667,103 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil
return SubmitTransaction(ctx, api.b, tx)
}
// SendRawTransactionSync will add the signed transaction to the transaction pool
// and wait until the transaction has been included in a block and return the receipt, or the timeout.
func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hexutil.Bytes, timeoutMs *hexutil.Uint64) (map[string]interface{}, error) {
tx := new(types.Transaction)
if err := tx.UnmarshalBinary(input); err != nil {
return nil, err
}
// Convert legacy blob transaction proofs.
// TODO: remove in go-ethereum v1.17.x
if sc := tx.BlobTxSidecar(); sc != nil {
exp := api.currentBlobSidecarVersion()
if sc.Version == types.BlobSidecarVersion0 && exp == types.BlobSidecarVersion1 {
if err := sc.ToV1(); err != nil {
return nil, fmt.Errorf("blob sidecar conversion failed: %v", err)
}
tx = tx.WithBlobTxSidecar(sc)
}
}
ch := make(chan core.ChainEvent, 128)
sub := api.b.SubscribeChainEvent(ch)
defer sub.Unsubscribe()
hash, err := SubmitTransaction(ctx, api.b, tx)
if err != nil {
return nil, err
}
var (
maxTimeout = api.b.RPCTxSyncMaxTimeout()
defaultTimeout = api.b.RPCTxSyncDefaultTimeout()
timeout = defaultTimeout
)
if timeoutMs != nil && *timeoutMs > 0 {
req := time.Duration(*timeoutMs) * time.Millisecond
if req > maxTimeout {
timeout = maxTimeout
} else {
timeout = req
}
}
receiptCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Fast path.
if r, err := api.GetTransactionReceipt(receiptCtx, hash); err == nil && r != nil {
return r, nil
}
// Monitor the receipts
for {
select {
case <-receiptCtx.Done():
// If server-side wait window elapsed, return the structured timeout.
if errors.Is(receiptCtx.Err(), context.DeadlineExceeded) {
return nil, &txSyncTimeoutError{
msg: fmt.Sprintf("The transaction was added to the transaction pool but wasn't processed in %v", timeout),
hash: hash,
}
}
return nil, receiptCtx.Err()
case err, ok := <-sub.Err():
if !ok {
return nil, errSubClosed
}
return nil, err
case ev, ok := <-ch:
if !ok {
return nil, errSubClosed
}
rs, txs := ev.Receipts, ev.Transactions
if len(rs) == 0 || len(rs) != len(txs) {
continue
}
for i := range rs {
if rs[i].TxHash == hash {
if rs[i].BlockNumber != nil && rs[i].BlockHash != (common.Hash{}) {
signer := types.LatestSigner(api.b.ChainConfig())
return MarshalReceipt(
rs[i],
rs[i].BlockHash,
rs[i].BlockNumber.Uint64(),
signer,
txs[i],
int(rs[i].TransactionIndex),
), nil
}
return api.GetTransactionReceipt(receiptCtx, hash)
}
}
}
}
}
// Sign calculates an ECDSA signature for:
// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message).
//

View file

@ -440,6 +440,19 @@ type testBackend struct {
pending *types.Block
pendingReceipts types.Receipts
chainFeed *event.Feed
autoMine bool
sentTx *types.Transaction
sentTxHash common.Hash
syncDefaultTimeout time.Duration
syncMaxTimeout time.Duration
}
func fakeBlockHash(txh common.Hash) common.Hash {
return crypto.Keccak256Hash([]byte("testblock"), txh.Bytes())
}
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend {
@ -466,6 +479,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
acc: acc,
pending: blocks[n],
pendingReceipts: receipts[n],
chainFeed: new(event.Feed),
}
return backend
}
@ -587,19 +601,64 @@ func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *t
return vm.NewEVM(context, state, b.chain.Config(), *vmConfig)
}
func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
panic("implement me")
return b.chainFeed.Subscribe(ch)
}
func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
panic("implement me")
}
func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
panic("implement me")
func (b *testBackend) SendTx(ctx context.Context, tx *types.Transaction) error {
b.sentTx = tx
b.sentTxHash = tx.Hash()
if b.autoMine {
// Synthesize a "mined" receipt at head+1
num := b.chain.CurrentHeader().Number.Uint64() + 1
receipt := &types.Receipt{
TxHash: tx.Hash(),
Status: types.ReceiptStatusSuccessful,
BlockHash: fakeBlockHash(tx.Hash()),
BlockNumber: new(big.Int).SetUint64(num),
TransactionIndex: 0,
CumulativeGasUsed: 21000,
GasUsed: 21000,
}
// Broadcast a ChainEvent that includes the receipts and txs
b.chainFeed.Send(core.ChainEvent{
Header: &types.Header{
Number: new(big.Int).SetUint64(num),
},
Receipts: types.Receipts{receipt},
Transactions: types.Transactions{tx},
})
}
return nil
}
func (b testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) {
func (b *testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) {
// Treat the auto-mined tx as canonically placed at head+1.
if b.autoMine && txHash == b.sentTxHash {
num := b.chain.CurrentHeader().Number.Uint64() + 1
return true, b.sentTx, fakeBlockHash(txHash), num, 0
}
tx, blockHash, blockNumber, index := rawdb.ReadCanonicalTransaction(b.db, txHash)
return tx != nil, tx, blockHash, blockNumber, index
}
func (b testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) {
func (b *testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) {
if b.autoMine && tx != nil && tx.Hash() == b.sentTxHash &&
blockHash == fakeBlockHash(tx.Hash()) &&
blockIndex == 0 &&
blockNumber == b.chain.CurrentHeader().Number.Uint64()+1 {
return &types.Receipt{
Type: tx.Type(),
Status: types.ReceiptStatusSuccessful,
CumulativeGasUsed: 21000,
GasUsed: 21000,
EffectiveGasPrice: big.NewInt(1),
BlockHash: blockHash,
BlockNumber: new(big.Int).SetUint64(blockNumber),
TransactionIndex: 0,
TxHash: tx.Hash(),
}, nil
}
return b.chain.GetCanonicalReceipt(tx, blockHash, blockNumber, blockIndex)
}
func (b testBackend) TxIndexDone() bool {
@ -3889,3 +3948,109 @@ func (b configTimeBackend) HeaderByNumber(_ context.Context, n rpc.BlockNumber)
func (b configTimeBackend) CurrentHeader() *types.Header {
return &types.Header{Time: b.time}
}
func (b *testBackend) RPCTxSyncDefaultTimeout() time.Duration {
if b.syncDefaultTimeout != 0 {
return b.syncDefaultTimeout
}
return 2 * time.Second
}
func (b *testBackend) RPCTxSyncMaxTimeout() time.Duration {
if b.syncMaxTimeout != 0 {
return b.syncMaxTimeout
}
return 5 * time.Minute
}
func (b *backendMock) RPCTxSyncDefaultTimeout() time.Duration { return 2 * time.Second }
func (b *backendMock) RPCTxSyncMaxTimeout() time.Duration { return 5 * time.Minute }
func makeSignedRaw(t *testing.T, api *TransactionAPI, from, to common.Address, value *big.Int) (hexutil.Bytes, *types.Transaction) {
t.Helper()
fillRes, err := api.FillTransaction(context.Background(), TransactionArgs{
From: &from,
To: &to,
Value: (*hexutil.Big)(value),
})
if err != nil {
t.Fatalf("FillTransaction failed: %v", err)
}
signRes, err := api.SignTransaction(context.Background(), argsFromTransaction(fillRes.Tx, from))
if err != nil {
t.Fatalf("SignTransaction failed: %v", err)
}
return signRes.Raw, signRes.Tx
}
// makeSelfSignedRaw is a convenience for a 0-ETH self-transfer.
func makeSelfSignedRaw(t *testing.T, api *TransactionAPI, addr common.Address) (hexutil.Bytes, *types.Transaction) {
return makeSignedRaw(t, api, addr, addr, big.NewInt(0))
}
func TestSendRawTransactionSync_Success(t *testing.T) {
t.Parallel()
genesis := &core.Genesis{
Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{},
}
b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil)
b.autoMine = true // immediately “mines” the tx in-memory
api := NewTransactionAPI(b, new(AddrLocker))
raw, _ := makeSelfSignedRaw(t, api, b.acc.Address)
receipt, err := api.SendRawTransactionSync(context.Background(), raw, nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if receipt == nil {
t.Fatalf("expected non-nil receipt")
}
if _, ok := receipt["blockNumber"]; !ok {
t.Fatalf("expected blockNumber in receipt, got %#v", receipt)
}
}
func TestSendRawTransactionSync_Timeout(t *testing.T) {
t.Parallel()
genesis := &core.Genesis{
Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{},
}
b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil)
b.autoMine = false // don't mine, should time out
api := NewTransactionAPI(b, new(AddrLocker))
raw, _ := makeSelfSignedRaw(t, api, b.acc.Address)
timeout := hexutil.Uint64(200) // 200ms
receipt, err := api.SendRawTransactionSync(context.Background(), raw, &timeout)
if receipt != nil {
t.Fatalf("expected nil receipt, got %#v", receipt)
}
if err == nil {
t.Fatalf("expected timeout error, got nil")
}
// assert error shape & data (hash)
var de interface {
ErrorCode() int
ErrorData() interface{}
}
if !errors.As(err, &de) {
t.Fatalf("expected data error with code/data, got %T %v", err, err)
}
if de.ErrorCode() != errCodeTxSyncTimeout {
t.Fatalf("expected code %d, got %d", errCodeTxSyncTimeout, de.ErrorCode())
}
tx := new(types.Transaction)
if e := tx.UnmarshalBinary(raw); e != nil {
t.Fatal(e)
}
if got, want := de.ErrorData(), tx.Hash().Hex(); got != want {
t.Fatalf("expected ErrorData=%s, got %v", want, got)
}
}

View file

@ -53,6 +53,8 @@ type Backend interface {
RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection
RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
UnprotectedAllowed() bool // allows only for EIP155 transactions.
RPCTxSyncDefaultTimeout() time.Duration
RPCTxSyncMaxTimeout() time.Duration
// Blockchain API
SetHead(number uint64)

View file

@ -21,6 +21,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/vm"
@ -33,6 +34,11 @@ type revertError struct {
reason string // revert reason hex encoded
}
type txSyncTimeoutError struct {
msg string
hash common.Hash
}
// ErrorCode returns the JSON error code for a revert.
// See: https://ethereum.org/en/developers/docs/apis/json-rpc/#error-codes
func (e *revertError) ErrorCode() int {
@ -108,6 +114,7 @@ const (
errCodeInvalidParams = -32602
errCodeReverted = -32000
errCodeVMError = -32015
errCodeTxSyncTimeout = 4
)
func txValidationError(err error) *invalidTxError {
@ -168,3 +175,7 @@ type blockGasLimitReachedError struct{ message string }
func (e *blockGasLimitReachedError) Error() string { return e.message }
func (e *blockGasLimitReachedError) ErrorCode() int { return errCodeBlockGasLimitReached }
func (e *txSyncTimeoutError) Error() string { return e.msg }
func (e *txSyncTimeoutError) ErrorCode() int { return errCodeTxSyncTimeout }
func (e *txSyncTimeoutError) ErrorData() interface{} { return e.hash.Hex() }

View file

@ -201,7 +201,7 @@ loop:
if !isFunc {
panic(re.vm.ToValue("js error: timer/timeout callback is not a function"))
}
call(goja.Null(), timer.call.Arguments...)
call(goja.Null(), timer.call.Arguments[2:]...)
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
if timer.interval && inreg {

View file

@ -22,6 +22,7 @@ import (
"path/filepath"
"runtime"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/rpc"
@ -90,7 +91,7 @@ func DefaultDataDir() string {
// is non-empty, use it, otherwise DTRT and check %LOCALAPPDATA%.
fallback := filepath.Join(home, "AppData", "Roaming", "Ethereum")
appdata := windowsAppData()
if appdata == "" || isNonEmptyDir(fallback) {
if appdata == "" || common.IsNonEmptyDir(fallback) {
return fallback
}
return filepath.Join(appdata, "Ethereum")
@ -113,16 +114,6 @@ func windowsAppData() string {
return v
}
func isNonEmptyDir(dir string) bool {
f, err := os.Open(dir)
if err != nil {
return false
}
names, _ := f.Readdir(1)
f.Close()
return len(names) > 0
}
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home

View file

@ -490,6 +490,11 @@ func (srv *Server) setupDiscovery() error {
}
srv.discv5, err = discover.ListenV5(sconn, srv.localnode, cfg)
if err != nil {
// Clean up v4 if v5 setup fails.
if srv.discv4 != nil {
srv.discv4.Close()
srv.discv4 = nil
}
return err
}
}
@ -813,7 +818,9 @@ func (srv *Server) listenLoop() {
time.Sleep(time.Millisecond * 200)
continue
} else if err != nil {
srv.log.Debug("Read error", "err", err)
if !errors.Is(err, net.ErrClosed) {
srv.log.Debug("Read error", "err", err)
}
slots <- struct{}{}
return
}

View file

@ -579,6 +579,33 @@ func TestServerInboundThrottle(t *testing.T) {
}
}
func TestServerDiscoveryV5FailureRollsBackV4(t *testing.T) {
badBootstrap := enode.NewV4(&newkey().PublicKey, net.ParseIP("127.0.0.1"), 30303, 0) // invalid V5 of a V4 node
srv := &Server{
Config: Config{
PrivateKey: newkey(),
ListenAddr: "",
DiscAddr: "127.0.0.1:0",
MaxPeers: 5,
DiscoveryV4: true,
DiscoveryV5: true,
BootstrapNodesV5: []*enode.Node{badBootstrap},
Logger: testlog.Logger(t, log.LvlTrace),
},
}
err := srv.Start()
if err == nil {
t.Fatal("expected discovery v5 startup failure")
}
if !strings.Contains(err.Error(), "bad bootstrap node") {
t.Fatalf("unexpected error: %v", err)
}
if srv.DiscoveryV4() != nil {
t.Fatal("discovery v4 not cleaned after failure")
}
srv.Stop()
}
func listenFakeAddr(network, laddr string, remoteAddr net.Addr) (net.Listener, error) {
l, err := net.Listen(network, laddr)
if err == nil {

View file

@ -61,11 +61,17 @@ var (
ShanghaiTime: newUint64(1681338455),
CancunTime: newUint64(1710338135),
PragueTime: newUint64(1746612311),
OsakaTime: newUint64(1764798551),
BPO1Time: newUint64(1765290071),
BPO2Time: newUint64(1767747671),
DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"),
Ethash: new(EthashConfig),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
Osaka: DefaultOsakaBlobConfig,
BPO1: DefaultBPO1BlobConfig,
BPO2: DefaultBPO2BlobConfig,
},
}
// HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network.
@ -225,9 +231,11 @@ var (
CancunTime: newUint64(0),
TerminalTotalDifficulty: big.NewInt(0),
PragueTime: newUint64(0),
OsakaTime: newUint64(0),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
Osaka: DefaultOsakaBlobConfig,
},
}

View file

@ -32,7 +32,6 @@ import (
)
var (
ErrBadResult = errors.New("bad result in JSON-RPC response")
ErrClientQuit = errors.New("client is closed")
ErrNoResult = errors.New("JSON-RPC response has no result")
ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call")

View file

@ -973,7 +973,7 @@ func (l *flakeyListener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err == nil {
timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout)))
timeout := max(time.Millisecond*10, time.Duration(rand.Int63n(int64(l.maxKillTimeout))))
time.AfterFunc(timeout, func() {
log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout))
c.Close()

View file

@ -211,7 +211,8 @@ func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error {
func (t *TransitionTrie) Copy() *TransitionTrie {
return &TransitionTrie{
overlay: t.overlay.Copy(),
base: t.base.Copy(),
// base in immutable, so there is no need to copy it
base: t.base,
storage: t.storage,
}
}

View file

@ -25,10 +25,10 @@ import (
)
const (
indexBlockDescSize = 14 // The size of index block descriptor
indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
indexBlockRestartLen = 256 // The restart interval length of index block
historyIndexBatch = 1_000_000 // The number of state history indexes for constructing or deleting as batch
indexBlockDescSize = 14 // The size of index block descriptor
indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
indexBlockRestartLen = 256 // The restart interval length of index block
historyIndexBatch = 512 * 1024 // The number of state history indexes for constructing or deleting as batch
)
// indexBlockDesc represents a descriptor for an index block, which contains a

View file

@ -40,6 +40,11 @@ const (
stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version
trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure
trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version
// estimations for calculating the batch size for atomic database commit
estimatedStateHistoryIndexSize = 3 // The average size of each state history index entry is approximately 23 bytes
estimatedTrienodeHistoryIndexSize = 3 // The average size of each trienode history index entry is approximately 2-3 bytes
estimatedIndexBatchSizeFactor = 32 // The factor counts for the write amplification for each entry
)
// indexVersion returns the latest index version for the given history type.
@ -150,6 +155,22 @@ func (b *batchIndexer) process(h history, id uint64) error {
return b.finish(false)
}
// makeBatch constructs a database batch based on the number of pending entries.
// The batch size is roughly estimated to minimize repeated resizing rounds,
// as accurately predicting the exact size is technically challenging.
func (b *batchIndexer) makeBatch() ethdb.Batch {
var size int
switch b.typ {
case typeStateHistory:
size = estimatedStateHistoryIndexSize
case typeTrienodeHistory:
size = estimatedTrienodeHistoryIndexSize
default:
panic(fmt.Sprintf("unknown history type %d", b.typ))
}
return b.db.NewBatchWithSize(size * estimatedIndexBatchSizeFactor * b.pending)
}
// finish writes the accumulated state indexes into the disk if either the
// memory limitation is reached or it's requested forcibly.
func (b *batchIndexer) finish(force bool) error {
@ -160,7 +181,7 @@ func (b *batchIndexer) finish(force bool) error {
return nil
}
var (
batch = b.db.NewBatch()
batch = b.makeBatch()
batchMu sync.RWMutex
start = time.Now()
eg errgroup.Group

View file

@ -22,7 +22,6 @@ import (
"fmt"
"iter"
"maps"
"math"
"slices"
"sort"
"time"
@ -202,17 +201,6 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) {
binary.Write(&headerSection, binary.BigEndian, h.meta.block) // 8 byte
for _, owner := range h.owners {
// Fill the header section with offsets at key and value section
headerSection.Write(owner.Bytes()) // 32 bytes
binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes
// The offset to the value section is theoretically unnecessary, since the
// individual value offset is already tracked in the key section. However,
// we still keep it here for two reasons:
// - It's cheap to store (only 4 bytes for each trie).
// - It can be useful for decoding the trie data when key is not required (e.g., in hash mode).
binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes
// Fill the key section with node index
var (
prevKey []byte
@ -266,6 +254,21 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) {
if _, err := keySection.Write(trailer); err != nil {
return nil, nil, nil, err
}
// Fill the header section with the offsets of the key and value sections.
// Note that the key/value offsets are intentionally tracked *after* encoding
// them into their respective sections, ensuring each offset refers to the end
// position. For n trie chunks, n offset pairs are sufficient to uniquely locate
// the corresponding data.
headerSection.Write(owner.Bytes()) // 32 bytes
binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes
// The offset to the value section is theoretically unnecessary, since the
// individual value offset is already tracked in the key section. However,
// we still keep it here for two reasons:
// - It's cheap to store (only 4 bytes for each trie).
// - It can be useful for decoding the trie data when key is not required (e.g., in hash mode).
binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes
}
return headerSection.Bytes(), keySection.Bytes(), valueSection.Bytes(), nil
}
@ -370,11 +373,15 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st
for keyOff < keyLimit {
// Validate the key and value offsets within the single trie data chunk
if items%trienodeDataBlockRestartLen == 0 {
if keyOff != int(keyOffsets[items/trienodeDataBlockRestartLen]) {
return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[items/trienodeDataBlockRestartLen], keyOff)
restartIndex := items / trienodeDataBlockRestartLen
if restartIndex >= len(keyOffsets) {
return nil, fmt.Errorf("restart index out of range: %d, available restarts: %d", restartIndex, len(keyOffsets))
}
if valOff != int(valOffsets[items/trienodeDataBlockRestartLen]) {
return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[items/trienodeDataBlockRestartLen], valOff)
if keyOff != int(keyOffsets[restartIndex]) {
return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[restartIndex], keyOff)
}
if valOff != int(valOffsets[restartIndex]) {
return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[restartIndex], valOff)
}
}
// Resolve the entry from key section
@ -471,22 +478,22 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection
for i := range len(owners) {
// Resolve the boundary of key section
keyStart := keyOffsets[i]
keyLimit := len(keySection)
if i != len(owners)-1 {
keyLimit = int(keyOffsets[i+1])
var keyStart, keyLimit uint32
if i != 0 {
keyStart = keyOffsets[i-1]
}
if int(keyStart) > len(keySection) || keyLimit > len(keySection) {
keyLimit = keyOffsets[i]
if int(keyStart) > len(keySection) || int(keyLimit) > len(keySection) {
return fmt.Errorf("invalid key offsets: keyStart: %d, keyLimit: %d, size: %d", keyStart, keyLimit, len(keySection))
}
// Resolve the boundary of value section
valStart := valueOffsets[i]
valLimit := len(valueSection)
if i != len(owners)-1 {
valLimit = int(valueOffsets[i+1])
var valStart, valLimit uint32
if i != 0 {
valStart = valueOffsets[i-1]
}
if int(valStart) > len(valueSection) || valLimit > len(valueSection) {
valLimit = valueOffsets[i]
if int(valStart) > len(valueSection) || int(valLimit) > len(valueSection) {
return fmt.Errorf("invalid value offsets: valueStart: %d, valueLimit: %d, size: %d", valStart, valLimit, len(valueSection))
}
@ -506,33 +513,27 @@ type iRange struct {
limit uint32
}
func (ir iRange) len() uint32 {
return ir.limit - ir.start
}
// singleTrienodeHistoryReader provides read access to a single trie within the
// trienode history. It stores an offset to the trie's position in the history,
// along with a set of per-node offsets that can be resolved on demand.
type singleTrienodeHistoryReader struct {
id uint64
reader ethdb.AncientReader
valueRange iRange // value range within the total value section
valueRange iRange // value range within the global value section
valueInternalOffsets map[string]iRange // value offset within the single trie data
}
func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) {
// TODO(rjl493456442) partial freezer read should be supported
keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id)
keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id, uint64(keyRange.start), uint64(keyRange.len()))
if err != nil {
return nil, err
}
keyStart := int(keyRange.start)
keyLimit := int(keyRange.limit)
if keyRange.limit == math.MaxUint32 {
keyLimit = len(keyData)
}
if len(keyData) < keyStart || len(keyData) < keyLimit {
return nil, fmt.Errorf("key section too short, start: %d, limit: %d, size: %d", keyStart, keyLimit, len(keyData))
}
valueOffsets := make(map[string]iRange)
_, err = decodeSingle(keyData[keyStart:keyLimit], func(key []byte, start int, limit int) error {
_, err = decodeSingle(keyData, func(key []byte, start int, limit int) error {
valueOffsets[string(key)] = iRange{
start: uint32(start),
limit: uint32(limit),
@ -556,20 +557,7 @@ func (sr *singleTrienodeHistoryReader) read(path string) ([]byte, error) {
if !exists {
return nil, fmt.Errorf("trienode %v not found", []byte(path))
}
// TODO(rjl493456442) partial freezer read should be supported
valueData, err := rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id)
if err != nil {
return nil, err
}
if len(valueData) < int(sr.valueRange.start) {
return nil, fmt.Errorf("value section too short, start: %d, size: %d", sr.valueRange.start, len(valueData))
}
entryStart := sr.valueRange.start + offset.start
entryLimit := sr.valueRange.start + offset.limit
if len(valueData) < int(entryStart) || len(valueData) < int(entryLimit) {
return nil, fmt.Errorf("value section too short, start: %d, limit: %d, size: %d", entryStart, entryLimit, len(valueData))
}
return valueData[int(entryStart):int(entryLimit)], nil
return rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id, uint64(sr.valueRange.start+offset.start), uint64(offset.len()))
}
// trienodeHistoryReader provides read access to node data in the trie node history.
@ -610,27 +598,23 @@ func (r *trienodeHistoryReader) decodeHeader() error {
}
for i, owner := range owners {
// Decode the key range for this trie chunk
var keyLimit uint32
if i == len(owners)-1 {
keyLimit = math.MaxUint32
} else {
keyLimit = keyOffsets[i+1]
var keyStart uint32
if i != 0 {
keyStart = keyOffsets[i-1]
}
r.keyRanges[owner] = iRange{
start: keyOffsets[i],
limit: keyLimit,
start: keyStart,
limit: keyOffsets[i],
}
// Decode the value range for this trie chunk
var valLimit uint32
if i == len(owners)-1 {
valLimit = math.MaxUint32
} else {
valLimit = valOffsets[i+1]
var valStart uint32
if i != 0 {
valStart = valOffsets[i-1]
}
r.valRanges[owner] = iRange{
start: valOffsets[i],
limit: valLimit,
start: valStart,
limit: valOffsets[i],
}
}
return nil

View file

@ -19,6 +19,6 @@ package version
const (
Major = 1 // Major version component of the current release
Minor = 16 // Minor version component of the current release
Patch = 5 // Patch version component of the current release
Patch = 6 // Patch version component of the current release
Meta = "stable" // Version metadata to append to the version string
)