Merge branch 'master' into release/1.16

This commit is contained in:
Felix Lange 2025-09-25 18:57:39 +02:00
commit 41714b4975
211 changed files with 7940 additions and 1815 deletions

View file

@ -55,4 +55,4 @@ jobs:
cache: false
- name: Run tests
run: go test ./...
run: go run build/ci.go test

View file

@ -13,7 +13,7 @@ jobs:
with:
script: |
const prTitle = context.payload.pull_request.title;
const titleRegex = /^(\.?[\w\s,{}/]+): .+/;
const titleRegex = /^([\w\s,{}/.]+): .+/;
if (!titleRegex.test(prTitle)) {
core.setFailed(`PR title "${prTitle}" does not match required format: directory, ...: description`);

View file

@ -150,6 +150,11 @@ func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller
}
}
// Address returns the deployment address of the contract.
func (c *BoundContract) Address() common.Address {
return c.address
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named

View file

@ -100,22 +100,29 @@ func TestWaitDeployed(t *testing.T) {
}
func TestWaitDeployedCornerCases(t *testing.T) {
backend := simulated.NewBackend(
types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
var (
backend = simulated.NewBackend(
types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
)
head, _ = backend.Client().HeaderByNumber(t.Context(), nil) // Should be child's, good enough
gasPrice = new(big.Int).Add(head.BaseFee, big.NewInt(1))
signer = types.LatestSigner(params.AllDevChainProtocolChanges)
code = common.FromHex("6060604052600a8060106000396000f360606040526008565b00")
ctx, cancel = context.WithCancel(t.Context())
)
defer backend.Close()
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
// Create a transaction to an account.
code := "6060604052600a8060106000396000f360606040526008565b00"
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// 1. WaitDeploy on a transaction that does not deploy a contract, verify it
// returns an error.
tx := types.MustSignNewTx(testKey, signer, &types.LegacyTx{
Nonce: 0,
To: &common.Address{0x01},
Gas: 300000,
GasPrice: gasPrice,
Data: code,
})
if err := backend.Client().SendTransaction(ctx, tx); err != nil {
t.Errorf("failed to send transaction: %q", err)
}
@ -124,14 +131,23 @@ func TestWaitDeployedCornerCases(t *testing.T) {
t.Errorf("error mismatch: want %q, got %q, ", bind.ErrNoAddressInReceipt, err)
}
// Create a transaction that is not mined.
tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
// 2. Create a contract, but cancel the WaitDeploy before it is mined.
tx = types.MustSignNewTx(testKey, signer, &types.LegacyTx{
Nonce: 1,
Gas: 300000,
GasPrice: gasPrice,
Data: code,
})
// Wait in another thread so that we can quickly cancel it after submitting
// the transaction.
done := make(chan struct{})
go func() {
contextCanceled := errors.New("context canceled")
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash()); err.Error() != contextCanceled.Error() {
t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err)
defer close(done)
want := errors.New("context canceled")
_, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash())
if err == nil || errors.Is(want, err) {
t.Errorf("error mismatch: want %v, got %v", want, err)
}
}()
@ -139,4 +155,11 @@ func TestWaitDeployedCornerCases(t *testing.T) {
t.Errorf("failed to send transaction: %q", err)
}
cancel()
// Wait for goroutine to exit or for a timeout.
select {
case <-done:
case <-time.After(time.Second * 2):
t.Fatalf("failed to cancel wait deploy")
}
}

View file

@ -99,9 +99,10 @@ func (ks *KeyStore) init(keydir string) {
// TODO: In order for this finalizer to work, there must be no references
// to ks. addressCache doesn't keep a reference but unlocked keys do,
// so the finalizer will not trigger until all timed unlocks have expired.
runtime.SetFinalizer(ks, func(m *KeyStore) {
m.cache.close()
})
runtime.AddCleanup(ks, func(c *accountCache) {
c.close()
}, ks.cache)
// Create the initial list of wallets from the cache
accs := ks.cache.accounts()
ks.wallets = make([]accounts.Wallet, len(accs))
@ -195,11 +196,14 @@ func (ks *KeyStore) Subscribe(sink chan<- accounts.WalletEvent) event.Subscripti
// forces a manual refresh (only triggers for systems where the filesystem notifier
// is not running).
func (ks *KeyStore) updater() {
ticker := time.NewTicker(walletRefreshCycle)
defer ticker.Stop()
for {
// Wait for an account update or a refresh timeout
select {
case <-ks.changes:
case <-time.After(walletRefreshCycle):
case <-ticker.C:
}
// Run the wallet refresher
ks.refreshWallets()

View file

@ -32,7 +32,7 @@ var (
testServer2 = testServer("testServer2")
testBlock1 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 123,
Slot: 127,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 456,
@ -41,7 +41,7 @@ var (
},
})
testBlock2 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 124,
Slot: 128,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 457,
@ -49,6 +49,14 @@ var (
},
},
})
testFinal1 = types.NewExecutionHeader(&deneb.ExecutionPayloadHeader{
BlockNumber: 395,
BlockHash: zrntcommon.Hash32(common.HexToHash("abbe7625624bf8ddd84723709e2758956289465dd23475f02387e0854942666")),
})
testFinal2 = types.NewExecutionHeader(&deneb.ExecutionPayloadHeader{
BlockNumber: 420,
BlockHash: zrntcommon.Hash32(common.HexToHash("9182a6ef8723654de174283750932ccc092378549836bf4873657eeec474598")),
})
)
type testServer string
@ -66,9 +74,10 @@ func TestBlockSync(t *testing.T) {
ts.AddServer(testServer1, 1)
ts.AddServer(testServer2, 1)
expHeadBlock := func(expHead *types.BeaconBlock) {
expHeadEvent := func(expHead *types.BeaconBlock, expFinal *types.ExecutionHeader) {
t.Helper()
var expNumber, headNumber uint64
var expFinalHash, finalHash common.Hash
if expHead != nil {
p, err := expHead.ExecutionPayload()
if err != nil {
@ -76,19 +85,26 @@ func TestBlockSync(t *testing.T) {
}
expNumber = p.NumberU64()
}
if expFinal != nil {
expFinalHash = expFinal.BlockHash()
}
select {
case event := <-headCh:
headNumber = event.Block.NumberU64()
finalHash = event.Finalized
default:
}
if headNumber != expNumber {
t.Errorf("Wrong head block, expected block number %d, got %d)", expNumber, headNumber)
}
if finalHash != expFinalHash {
t.Errorf("Wrong finalized block, expected block hash %064x, got %064x)", expFinalHash[:], finalHash[:])
}
}
// no block requests expected until head tracker knows about a head
ts.Run(1)
expHeadBlock(nil)
expHeadEvent(nil, nil)
// set block 1 as prefetch head, announced by server 2
head1 := blockHeadInfo(testBlock1)
@ -103,12 +119,13 @@ func TestBlockSync(t *testing.T) {
ts.AddAllowance(testServer2, 1)
ts.Run(3)
// head block still not expected as the fetched block is not the validated head yet
expHeadBlock(nil)
expHeadEvent(nil, nil)
// set as validated head, expect no further requests but block 1 set as head block
ht.validated.Header = testBlock1.Header()
ht.finalized, ht.finalizedPayload = testBlock1.Header(), testFinal1
ts.Run(4)
expHeadBlock(testBlock1)
expHeadEvent(testBlock1, testFinal1)
// set block 2 as prefetch head, announced by server 1
head2 := blockHeadInfo(testBlock2)
@ -126,17 +143,26 @@ func TestBlockSync(t *testing.T) {
// expect req2 retry to server 2
ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot))
// now head block should be unavailable again
expHeadBlock(nil)
expHeadEvent(nil, nil)
// valid response, now head block should be block 2 immediately as it is already validated
// but head event is still not expected because an epoch boundary was crossed and the
// expected finality update has not arrived yet
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2)
ts.Run(8)
expHeadBlock(testBlock2)
expHeadEvent(nil, nil)
// expected finality update arrived, now a head event is expected
ht.finalized, ht.finalizedPayload = testBlock2.Header(), testFinal2
ts.Run(9)
expHeadEvent(testBlock2, testFinal2)
}
type testHeadTracker struct {
prefetch types.HeadInfo
validated types.SignedHeader
prefetch types.HeadInfo
validated types.SignedHeader
finalized types.Header
finalizedPayload *types.ExecutionHeader
}
func (h *testHeadTracker) PrefetchHead() types.HeadInfo {
@ -151,13 +177,14 @@ func (h *testHeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) {
}, h.validated.Header != (types.Header{})
}
// TODO add test case for finality
func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
finalized := types.NewExecutionHeader(new(deneb.ExecutionPayloadHeader))
if h.validated.Header == (types.Header{}) || h.finalizedPayload == nil {
return types.FinalityUpdate{}, false
}
return types.FinalityUpdate{
Attested: types.HeaderWithExecProof{Header: h.validated.Header},
Finalized: types.HeaderWithExecProof{PayloadHeader: finalized},
Attested: types.HeaderWithExecProof{Header: h.finalized},
Finalized: types.HeaderWithExecProof{Header: h.finalized, PayloadHeader: h.finalizedPayload},
Signature: h.validated.Signature,
SignatureSlot: h.validated.SignatureSlot,
}, h.validated.Header != (types.Header{})
}, true
}

View file

@ -17,7 +17,7 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
BlobsBundle *BlobsBundle `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
@ -42,7 +42,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
BlobsBundle *BlobsBundle `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`

View file

@ -33,8 +33,22 @@ import (
type PayloadVersion byte
var (
// PayloadV1 is the identifier of ExecutionPayloadV1 introduced in paris fork.
// https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#executionpayloadv1
PayloadV1 PayloadVersion = 0x1
// PayloadV2 is the identifier of ExecutionPayloadV2 introduced in shanghai fork.
//
// https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#executionpayloadv2
// ExecutionPayloadV2 has the syntax of ExecutionPayloadV1 and appends a
// single field: withdrawals.
PayloadV2 PayloadVersion = 0x2
// PayloadV3 is the identifier of ExecutionPayloadV3 introduced in cancun fork.
//
// https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#executionpayloadv3
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
// fields: blobGasUsed and excessBlobGas.
PayloadV3 PayloadVersion = 0x3
)
@ -106,13 +120,18 @@ type StatelessPayloadStatusV1 struct {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
BlobsBundle *BlobsBundle `json:"blobsBundle"`
Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness,omitempty"`
}
type BlobsBundleV1 struct {
// BlobsBundle includes the marshalled sidecar data. Note this structure is
// shared by BlobsBundleV1 and BlobsBundleV2 for the sake of simplicity.
//
// - BlobsBundleV1: proofs contain exactly len(blobs) kzg proofs.
// - BlobsBundleV2: proofs contain exactly CELLS_PER_EXT_BLOB * len(blobs) cell proofs.
type BlobsBundle struct {
Commitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
Blobs []hexutil.Bytes `json:"blobs"`
@ -125,7 +144,7 @@ type BlobAndProofV1 struct {
type BlobAndProofV2 struct {
Blob hexutil.Bytes `json:"blob"`
CellProofs []hexutil.Bytes `json:"proofs"`
CellProofs []hexutil.Bytes `json:"proofs"` // proofs MUST contain exactly CELLS_PER_EXT_BLOB cell proofs.
}
// JSON type overrides for ExecutionPayloadEnvelope.
@ -327,18 +346,27 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
}
// Add blobs.
bundle := BlobsBundleV1{
bundle := BlobsBundle{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
Proofs: make([]hexutil.Bytes, 0),
}
for _, sidecar := range sidecars {
for j := range sidecar.Blobs {
bundle.Blobs = append(bundle.Blobs, hexutil.Bytes(sidecar.Blobs[j][:]))
bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:]))
bundle.Blobs = append(bundle.Blobs, sidecar.Blobs[j][:])
bundle.Commitments = append(bundle.Commitments, sidecar.Commitments[j][:])
}
// - Before the Osaka fork, only version-0 blob transactions should be packed,
// with the proof length equal to len(blobs).
//
// - After the Osaka fork, only version-1 blob transactions should be packed,
// with the proof length equal to CELLS_PER_EXT_BLOB * len(blobs).
//
// Ideally, length validation should be performed based on the bundle version.
// In practice, this is unnecessary because blob transaction filtering is
// already done during payload construction.
for _, proof := range sidecar.Proofs {
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(proof[:]))
bundle.Proofs = append(bundle.Proofs, proof[:])
}
}

View file

@ -20,6 +20,7 @@ import (
"crypto/sha256"
"fmt"
"math"
"math/big"
"os"
"slices"
"sort"
@ -90,12 +91,8 @@ func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainC
// LoadForks parses the beacon chain configuration file (config.yaml) and extracts
// the list of forks.
func (c *ChainConfig) LoadForks(path string) error {
file, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read beacon chain config file: %v", err)
}
config := make(map[string]string)
func (c *ChainConfig) LoadForks(file []byte) error {
config := make(map[string]any)
if err := yaml.Unmarshal(file, &config); err != nil {
return fmt.Errorf("failed to parse beacon chain config file: %v", err)
}
@ -108,18 +105,36 @@ func (c *ChainConfig) LoadForks(path string) error {
for key, value := range config {
if strings.HasSuffix(key, "_FORK_VERSION") {
name := key[:len(key)-len("_FORK_VERSION")]
if v, err := hexutil.Decode(value); err == nil {
switch version := value.(type) {
case int:
versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4))
case uint64:
versions[name] = new(big.Int).SetUint64(version).FillBytes(make([]byte, 4))
case string:
v, err := hexutil.Decode(version)
if err != nil {
return fmt.Errorf("failed to decode hex fork id %q in beacon chain config file: %v", version, err)
}
versions[name] = v
} else {
return fmt.Errorf("failed to decode hex fork id %q in beacon chain config file: %v", value, err)
default:
return fmt.Errorf("invalid fork version %q in beacon chain config file", version)
}
}
if strings.HasSuffix(key, "_FORK_EPOCH") {
name := key[:len(key)-len("_FORK_EPOCH")]
if v, err := strconv.ParseUint(value, 10, 64); err == nil {
switch epoch := value.(type) {
case int:
epochs[name] = uint64(epoch)
case uint64:
epochs[name] = epoch
case string:
v, err := strconv.ParseUint(epoch, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse epoch number %q in beacon chain config file: %v", epoch, err)
}
epochs[name] = v
} else {
return fmt.Errorf("failed to parse epoch number %q in beacon chain config file: %v", value, err)
default:
return fmt.Errorf("invalid fork epoch %q in beacon chain config file", epoch)
}
}
}

View file

@ -0,0 +1,34 @@
package params
import (
"bytes"
"testing"
)
func TestChainConfig_LoadForks(t *testing.T) {
const config = `
GENESIS_FORK_VERSION: 0x00000000
ALTAIR_FORK_VERSION: 0x00000001
ALTAIR_FORK_EPOCH: 1
EIP7928_FORK_VERSION: 0xb0000038
EIP7928_FORK_EPOCH: 18446744073709551615
BLOB_SCHEDULE: []
`
c := &ChainConfig{}
err := c.LoadForks([]byte(config))
if err != nil {
t.Fatal(err)
}
for _, fork := range c.Forks {
if fork.Name == "GENESIS" && (fork.Epoch != 0) {
t.Errorf("unexpected genesis fork epoch %d", fork.Epoch)
}
if fork.Name == "ALTAIR" && (fork.Epoch != 1 || !bytes.Equal(fork.Version, []byte{0, 0, 0, 1})) {
t.Errorf("unexpected altair fork epoch %d version %x", fork.Epoch, fork.Version)
}
}
}

View file

@ -1,113 +1,85 @@
# This file contains sha256 checksums of optional build dependencies.
# version:spec-tests fusaka-devnet-3%40v1.0.0
# version:spec-tests v5.1.0
# https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/fusaka-devnet-3%40v1.0.0
576261e1280e5300c458aa9b05eccb2fec5ff80a0005940dc52fa03fdd907249 fixtures_fusaka-devnet-3.tar.gz
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
# version:golang 1.25.0
# version:golang 1.25.1
# https://go.dev/dl/
4bd01e91297207bfa450ea40d4d5a93b1b531a5e438473b2a06e18e077227225 go1.25.0.src.tar.gz
e5234a7dac67bc86c528fe9752fc9d63557918627707a733ab4cac1a6faed2d4 go1.25.0.aix-ppc64.tar.gz
5bd60e823037062c2307c71e8111809865116714d6f6b410597cf5075dfd80ef go1.25.0.darwin-amd64.tar.gz
95e836238bcf8f9a71bffea43344cbd35ee1f16db3aaced2f98dbac045d102db go1.25.0.darwin-amd64.pkg
544932844156d8172f7a28f77f2ac9c15a23046698b6243f633b0a0b00c0749c go1.25.0.darwin-arm64.tar.gz
202a0d8338c152cb4c9f04782429e9ba8bef31d9889272380837e4043c9d800a go1.25.0.darwin-arm64.pkg
5ed3cf9a810a1483822538674f1336c06b51aa1b94d6d545a1a0319a48177120 go1.25.0.dragonfly-amd64.tar.gz
abea5d5c6697e6b5c224731f2158fe87c602996a2a233ac0c4730cd57bf8374e go1.25.0.freebsd-386.tar.gz
86e6fe0a29698d7601c4442052dac48bd58d532c51cccb8f1917df648138730b go1.25.0.freebsd-amd64.tar.gz
d90b78e41921f72f30e8bbc81d9dec2cff7ff384a33d8d8debb24053e4336bfe go1.25.0.freebsd-arm.tar.gz
451d0da1affd886bfb291b7c63a6018527b269505db21ce6e14724f22ab0662e go1.25.0.freebsd-arm64.tar.gz
7b565f76bd8bda46549eeaaefe0e53b251e644c230577290c0f66b1ecdb3cdbe go1.25.0.freebsd-riscv64.tar.gz
b1e1fdaab1ad25aa1c08d7a36c97d45d74b98b89c3f78c6d2145f77face54a2c go1.25.0.illumos-amd64.tar.gz
8c602dd9d99bc9453b3995d20ce4baf382cc50855900a0ece5de9929df4a993a go1.25.0.linux-386.tar.gz
2852af0cb20a13139b3448992e69b868e50ed0f8a1e5940ee1de9e19a123b613 go1.25.0.linux-amd64.tar.gz
05de75d6994a2783699815ee553bd5a9327d8b79991de36e38b66862782f54ae go1.25.0.linux-arm64.tar.gz
a5a8f8198fcf00e1e485b8ecef9ee020778bf32a408a4e8873371bfce458cd09 go1.25.0.linux-armv6l.tar.gz
cab86b1cf761b1cb3bac86a8877cfc92e7b036fc0d3084123d77013d61432afc go1.25.0.linux-loong64.tar.gz
d66b6fb74c3d91b9829dc95ec10ca1f047ef5e89332152f92e136cf0e2da5be1 go1.25.0.linux-mips.tar.gz
4082e4381a8661bc2a839ff94ba3daf4f6cde20f8fb771b5b3d4762dc84198a2 go1.25.0.linux-mips64.tar.gz
70002c299ec7f7175ac2ef673b1b347eecfa54ae11f34416a6053c17f855afcc go1.25.0.linux-mips64le.tar.gz
b00a3a39eff099f6df9f1c7355bf28e4589d0586f42d7d4a394efb763d145a73 go1.25.0.linux-mipsle.tar.gz
df166f33bd98160662560a72ff0b4ba731f969a80f088922bddcf566a88c1ec1 go1.25.0.linux-ppc64.tar.gz
0f18a89e7576cf2c5fa0b487a1635d9bcbf843df5f110e9982c64df52a983ad0 go1.25.0.linux-ppc64le.tar.gz
c018ff74a2c48d55c8ca9b07c8e24163558ffec8bea08b326d6336905d956b67 go1.25.0.linux-riscv64.tar.gz
34e5a2e19f2292fbaf8783e3a241e6e49689276aef6510a8060ea5ef54eee408 go1.25.0.linux-s390x.tar.gz
f8586cdb7aa855657609a5c5f6dbf523efa00c2bbd7c76d3936bec80aa6c0aba go1.25.0.netbsd-386.tar.gz
ae8dc1469385b86a157a423bb56304ba45730de8a897615874f57dd096db2c2a go1.25.0.netbsd-amd64.tar.gz
1ff7e4cc764425fc9dd6825eaee79d02b3c7cafffbb3691687c8d672ade76cb7 go1.25.0.netbsd-arm.tar.gz
e1b310739f26724216aa6d7d7208c4031f9ff54c9b5b9a796ddc8bebcb4a5f16 go1.25.0.netbsd-arm64.tar.gz
4802a9b20e533da91adb84aab42e94aa56cfe3e5475d0550bed3385b182e69d8 go1.25.0.openbsd-386.tar.gz
c016cd984bebe317b19a4f297c4f50def120dc9788490540c89f28e42f1dabe1 go1.25.0.openbsd-amd64.tar.gz
a1e31d0bf22172ddde42edf5ec811ef81be43433df0948ece52fecb247ccfd8d go1.25.0.openbsd-arm.tar.gz
343ea8edd8c218196e15a859c6072d0dd3246fbbb168481ab665eb4c4140458d go1.25.0.openbsd-arm64.tar.gz
694c14da1bcaeb5e3332d49bdc2b6d155067648f8fe1540c5de8f3cf8e157154 go1.25.0.openbsd-ppc64.tar.gz
aa510ad25cf54c06cd9c70b6d80ded69cb20188ac6e1735655eef29ff7e7885f go1.25.0.openbsd-riscv64.tar.gz
46f8cef02086cf04bf186c5912776b56535178d4cb319cd19c9fdbdd29231986 go1.25.0.plan9-386.tar.gz
29b34391d84095e44608a228f63f2f88113a37b74a79781353ec043dfbcb427b go1.25.0.plan9-amd64.tar.gz
0a047107d13ebe7943aaa6d54b1d7bbd2e45e68ce449b52915a818da715799c2 go1.25.0.plan9-arm.tar.gz
9977f9e4351984364a3b2b78f8b88bfd1d339812356d5237678514594b7d3611 go1.25.0.solaris-amd64.tar.gz
df9f39db82a803af0db639e3613a36681ab7a42866b1384b3f3a1045663961a7 go1.25.0.windows-386.zip
afd9e0a8d2665ff122c8302bb4a3ce4a5331e4e630ddc388be1f9238adfa8fe3 go1.25.0.windows-386.msi
89efb4f9b30812eee083cc1770fdd2913c14d301064f6454851428f9707d190b go1.25.0.windows-amd64.zip
936bd87109da515f79d80211de5bc6cbda071f2cc577f7e6af1a9e754ea34819 go1.25.0.windows-amd64.msi
27bab004c72b3d7bd05a69b6ec0fc54a309b4b78cc569dd963d8b3ec28bfdb8c go1.25.0.windows-arm64.zip
357d030b217ff68e700b6cfc56097bc21ad493bb45b79733a052d112f5031ed9 go1.25.0.windows-arm64.msi
d010c109cee94d80efe681eab46bdea491ac906bf46583c32e9f0dbb0bd1a594 go1.25.1.src.tar.gz
1d622468f767a1b9fe1e1e67bd6ce6744d04e0c68712adc689748bbeccb126bb go1.25.1.darwin-amd64.tar.gz
68deebb214f39d542e518ebb0598a406ab1b5a22bba8ec9ade9f55fb4dd94a6c go1.25.1.darwin-arm64.tar.gz
d03cdcbc9bd8baf5cf028de390478e9e2b3e4d0afe5a6582dedc19bfe6a263b2 go1.25.1.linux-386.tar.gz
7716a0d940a0f6ae8e1f3b3f4f36299dc53e31b16840dbd171254312c41ca12e go1.25.1.linux-amd64.tar.gz
65a3e34fb2126f55b34e1edfc709121660e1be2dee6bdf405fc399a63a95a87d go1.25.1.linux-arm64.tar.gz
eb949be683e82a99e9861dafd7057e31ea40b161eae6c4cd18fdc0e8c4ae6225 go1.25.1.linux-armv6l.tar.gz
be13d5479b8c75438f2efcaa8c191fba3af684b3228abc9c99c7aa8502f34424 go1.25.1.windows-386.zip
4a974de310e7ee1d523d2fcedb114ba5fa75408c98eb3652023e55ccf3fa7cab go1.25.1.windows-amd64.zip
45ab4290adbd6ee9e7f18f0d57eaa9008fdbef590882778ed93eac3c8cca06c5 go1.25.1.aix-ppc64.tar.gz
2e3c1549bed3124763774d648f291ac42611232f48320ebbd23517c909c09b81 go1.25.1.dragonfly-amd64.tar.gz
dc0198dd4ec520e13f26798def8750544edf6448d8e9c43fd2a814e4885932af go1.25.1.freebsd-386.tar.gz
c4f1a7e7b258406e6f3b677ecdbd97bbb23ff9c0d44be4eb238a07d360f69ac8 go1.25.1.freebsd-amd64.tar.gz
7772fc5ff71ed39297ec0c1599fc54e399642c9b848eac989601040923b0de9c go1.25.1.freebsd-arm.tar.gz
5bb011d5d5b6218b12189f07aa0be618ab2002662fff1ca40afba7389735c207 go1.25.1.freebsd-arm64.tar.gz
ccac716240cb049bebfafcb7eebc3758512178a4c51fc26da9cc032035d850c8 go1.25.1.freebsd-riscv64.tar.gz
cc53910ffb9fcfdd988a9fa25b5423bae1cfa01b19616be646700e1f5453b466 go1.25.1.illumos-amd64.tar.gz
efe809f923bcedab44bf7be2b3af8d182b512b1bf9c07d302e0c45d26c8f56f3 go1.25.1.linux-loong64.tar.gz
c0de33679f6ed68991dc42dc4a602e74a666e3e166c1748ee1b5d1a7ea2ffbb2 go1.25.1.linux-mips.tar.gz
c270f7b0c0bdfbcd54fef4481227c40d41bb518f9ae38ee930870f04a0a6a589 go1.25.1.linux-mips64.tar.gz
80be871ba9c944f34d1868cdf5047e1cf2e1289fe08cdb90e2453d2f0d6965ae go1.25.1.linux-mips64le.tar.gz
9f09defa9bb22ebf2cde76162f40958564e57ce5c2b3649bc063bebcbc9294c1 go1.25.1.linux-mipsle.tar.gz
2c76b7d278c1d43ad19d478ad3f0f05e7b782b64b90870701b314fa48b5f43c6 go1.25.1.linux-ppc64.tar.gz
8b0c8d3ee5b1b5c28b6bd63dc4438792012e01d03b4bf7a61d985c87edab7d1f go1.25.1.linux-ppc64le.tar.gz
22fe934a9d0c9c57275716c55b92d46ebd887cec3177c9140705efa9f84ba1e2 go1.25.1.linux-riscv64.tar.gz
9cfe517ba423f59f3738ca5c3d907c103253cffbbcc2987142f79c5de8c1bf93 go1.25.1.linux-s390x.tar.gz
6af8a08353e76205d5b743dd7a3f0126684f96f62be0a31b75daf9837e512c46 go1.25.1.netbsd-386.tar.gz
e5d534ff362edb1bd8c8e10892b6a027c4c1482454245d1529167676498684c7 go1.25.1.netbsd-amd64.tar.gz
88bcf39254fdcea6a199c1c27d787831b652427ce60851ae9e41a3d7eb477f45 go1.25.1.netbsd-arm.tar.gz
d7c2eabe1d04ee47bcaea2816fdd90dbd25d90d4dfa756faa9786c788e4f3a4e go1.25.1.netbsd-arm64.tar.gz
14a2845977eb4dde11d929858c437a043467c427db87899935e90cee04a38d72 go1.25.1.openbsd-386.tar.gz
d27ac54b38a13a09c81e67c82ac70d387037341c85c3399291c73e13e83fdd8c go1.25.1.openbsd-amd64.tar.gz
0f4ab5f02500afa4befd51fed1e8b45e4d07ca050f641cc3acc76eaa4027b2c3 go1.25.1.openbsd-arm.tar.gz
d46c3bd156843656f7f3cb0dec27ea51cd926ec3f7b80744bf8156e67c1c812f go1.25.1.openbsd-arm64.tar.gz
c550514c67f22e409be10e40eace761e2e43069f4ef086ae6e60aac736c2b679 go1.25.1.openbsd-ppc64.tar.gz
8a09a8714a2556eb13fc1f10b7ce2553fcea4971e3330fc3be0efd24aab45734 go1.25.1.openbsd-riscv64.tar.gz
b0e1fefaf0c7abd71f139a54eee9767944aff5f0bc9d69c968234804884e552f go1.25.1.plan9-386.tar.gz
e94732c94f149690aa0ab11c26090577211b4a988137cb2c03ec0b54e750402e go1.25.1.plan9-amd64.tar.gz
7eb80e9de1e817d9089a54e8c7c5c8d8ed9e5fb4d4a012fc0f18fc422a484f0c go1.25.1.plan9-arm.tar.gz
1261dfad7c4953c0ab90381bc1242dc54e394db7485c59349428d532b2273343 go1.25.1.solaris-amd64.tar.gz
04bc3c078e9e904c4d58d6ac2532a5bdd402bd36a9ff0b5949b3c5e6006a05ee go1.25.1.windows-arm64.zip
# version:golangci 2.0.2
# version:golangci 2.4.0
# https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v2.0.2/
a88cbdc86b483fe44e90bf2dcc3fec2af8c754116e6edf0aa6592cac5baa7a0e golangci-lint-2.0.2-darwin-amd64.tar.gz
664550e7954f5f4451aae99b4f7382c1a47039c66f39ca605f5d9af1a0d32b49 golangci-lint-2.0.2-darwin-arm64.tar.gz
bda0f0f27d300502faceda8428834a76ca25986f6d9fc2bd41d201c3ed73f08e golangci-lint-2.0.2-freebsd-386.tar.gz
1cbd0c7ade3fb027d61d38a646ec1b51be5846952b4b04a5330e7f4687f2270c golangci-lint-2.0.2-freebsd-amd64.tar.gz
1e828a597726198b2e35acdbcc5f3aad85244d79846d2d2bdb05241c5a535f9e golangci-lint-2.0.2-freebsd-armv6.tar.gz
848b49315dc5cddd0c9ce35e96ab33d584db0ea8fb57bcbf9784f1622bec0269 golangci-lint-2.0.2-freebsd-armv7.tar.gz
cabf9a6beab574c7f98581eb237919e580024759e3cdc05c4d516b044dce6770 golangci-lint-2.0.2-illumos-amd64.tar.gz
2fde80d15ed6527791f106d606120620e913c3a663c90a8596861d0a4461169e golangci-lint-2.0.2-linux-386.deb
804bc6e350a8c613aaa0a33d8d45414a80157b0ba1b2c2335ac859f85ad98ebd golangci-lint-2.0.2-linux-386.rpm
e64beb72fecf581e57d88ae3adb1c9d4bf022694b6bd92e3c8e460910bbdc37d golangci-lint-2.0.2-linux-386.tar.gz
9c55aed174d7a52bb1d4006b36e7edee9023631f6b814a80cb39c9860d6f75c3 golangci-lint-2.0.2-linux-amd64.deb
c55a2ef741a687b4c679696931f7fd4a467babd64c9457cf17bb9632fd1cecd1 golangci-lint-2.0.2-linux-amd64.rpm
89cc8a7810dc63b9a37900da03e37c3601caf46d42265d774e0f1a5d883d53e2 golangci-lint-2.0.2-linux-amd64.tar.gz
a3e78583c4e7ea1b63e82559f126bb3a5b12788676f158526752d53e67824b99 golangci-lint-2.0.2-linux-arm64.deb
bd5dd52b5c9f18aa7a2904eda9a9f91c628e98623fe70b7afcbb847e2de84422 golangci-lint-2.0.2-linux-arm64.rpm
789d5b91219ac68c2336f77d41cd7e33a910420594780f455893f8453d09595b golangci-lint-2.0.2-linux-arm64.tar.gz
534cd4c464a66178714ed68152c1ed7aa73e5700bf409e4ed1a8363adf96afca golangci-lint-2.0.2-linux-armv6.deb
cf7d02905a5fc80b96c9a64621693b4cc7337b1ce29986c19fd72608dafe66c5 golangci-lint-2.0.2-linux-armv6.rpm
a0d81cb527d8fe878377f2356b5773e219b0b91832a6b59e7b9bcf9a90fe0b0e golangci-lint-2.0.2-linux-armv6.tar.gz
dedd5be7fff8cba8fe15b658a59347ea90d7d02a9fff87f09c7687e6da05a8b6 golangci-lint-2.0.2-linux-armv7.deb
85521b6f3ad2f5a2bc9bfe14b9b08623f764964048f75ed6dfcfaf8eb7d57cc1 golangci-lint-2.0.2-linux-armv7.rpm
96471046c7780dda4ea680f65e92c2ef56ff58d40bcffaf6cfe9d6d48e3c27aa golangci-lint-2.0.2-linux-armv7.tar.gz
815d914a7738e4362466b2d11004e8618b696b49e8ace13df2c2b25f28fb1e17 golangci-lint-2.0.2-linux-loong64.deb
f16381e3d8a0f011b95e086d83d620248432b915d01f4beab4d29cfe4dc388b0 golangci-lint-2.0.2-linux-loong64.rpm
1bd8d7714f9c92db6a0f23bae89f39c85ba047bec8eeb42b8748d30ae3228d18 golangci-lint-2.0.2-linux-loong64.tar.gz
ea6e9d4aabb526aa298e47e8b026d8893d918c5eb919ba0ab403e315def74cc5 golangci-lint-2.0.2-linux-mips64.deb
519d8d53af83fdc9c25cc3fba8b663331ac22ef68131d4b0084cb6f425b6f79a golangci-lint-2.0.2-linux-mips64.rpm
80d655a0a1ac1b19dcef4b58fa2a7dadb646cc50ad08d460b5c53cdb421165e4 golangci-lint-2.0.2-linux-mips64.tar.gz
aa0e75384bb482c865d4dfc95d23ceb25666bf20461b67a832f0eea6670312ec golangci-lint-2.0.2-linux-mips64le.deb
f2a8b500fb69bdea1b01df6267aaa5218fa4a58aeb781c1a20d0d802fe465a52 golangci-lint-2.0.2-linux-mips64le.rpm
e66a0c0c9a275f02d27a7caa9576112622306f001d73dfc082cf1ae446fc1242 golangci-lint-2.0.2-linux-mips64le.tar.gz
e85ad51aac6428be2d8a37000d053697371a538a5bcbc1644caa7c5e77f6d0af golangci-lint-2.0.2-linux-ppc64le.deb
906798365eac1944af2a9b9a303e6fd49ec9043307bc681b7a96277f7f8beea5 golangci-lint-2.0.2-linux-ppc64le.rpm
f7f1a271b0af274d6c9ce000f5dc6e1fb194350c67bcc62494f96f791882ba92 golangci-lint-2.0.2-linux-ppc64le.tar.gz
eea8bf643a42bf05de9780530db22923e5ab0d588f0e173594dc6518f2a25d2a golangci-lint-2.0.2-linux-riscv64.deb
4ff40f9fe2954400836e2a011ba4744d00ffab5068a51368552dfce6aba3b81b golangci-lint-2.0.2-linux-riscv64.rpm
531d8f225866674977d630afbf0533eb02f9bec607fb13895f7a2cd7b2e0a648 golangci-lint-2.0.2-linux-riscv64.tar.gz
6f827647046c603f40d97ea5aadc6f48cd0bb5d19f7a3d56500c3b833d2a0342 golangci-lint-2.0.2-linux-s390x.deb
387a090e9576d19ca86aac738172e58e07c19f2784a13bb387f4f0d75fb9c8d3 golangci-lint-2.0.2-linux-s390x.rpm
57de1fb7722a9feb2d11ed0a007a93959d05b9db5929a392abc222e30012467e golangci-lint-2.0.2-linux-s390x.tar.gz
ed95e0492ea86bf79eb661f0334474b2a4255093685ff587eccd797c5a54db7e golangci-lint-2.0.2-netbsd-386.tar.gz
eab81d729778166415d349a80e568b2f2b3a781745a9be3212a92abb1e732daf golangci-lint-2.0.2-netbsd-amd64.tar.gz
d20add73f7c2de2c3b01ed4fd7b63ffcf0a6597d5ea228d1699e92339a3cd047 golangci-lint-2.0.2-netbsd-arm64.tar.gz
4e4f44e6057879cd62424ff1800a767d25a595c0e91d6d48809eea9186b4c739 golangci-lint-2.0.2-netbsd-armv6.tar.gz
51ec17b16d8743ae4098a0171f04f0ed4d64561e3051b982778b0e6c306a1b03 golangci-lint-2.0.2-netbsd-armv7.tar.gz
5482cf27b93fae1765c70ee2a95d4074d038e9dee61bdd61d017ce8893d3a4a8 golangci-lint-2.0.2-source.tar.gz
a35d8fdf3e14079a10880dbbb7586b46faec89be96f086b244b3e565aac80313 golangci-lint-2.0.2-windows-386.zip
fe4b946cc01366b989001215687003a9c4a7098589921f75e6228d6d8cffc15c golangci-lint-2.0.2-windows-amd64.zip
646bd9250ef8c771d85cd22fe8e6f2397ae39599179755e3bbfa9ef97ad44090 golangci-lint-2.0.2-windows-arm64.zip
ce1dc0bad6f8a61d64e6b3779eeb773479c175125d6f686b0e67ef9c8432d16e golangci-lint-2.0.2-windows-armv6.zip
92684a48faabe792b11ac27ca8b25551eff940b0a1e84ad7244e98b4994962db golangci-lint-2.0.2-windows-armv7.zip
# https://github.com/golangci/golangci-lint/releases/download/v2.4.0/
7904ce63f79db44934939cf7a063086ea0ea98e9b19eba0a9d52ccdd0d21951c golangci-lint-2.4.0-darwin-amd64.tar.gz
cd4dd53fa09b6646baff5fd22b8c64d91db02c21c7496df27992d75d34feec59 golangci-lint-2.4.0-darwin-arm64.tar.gz
d58f426ebe14cc257e81562b4bf37a488ffb4ffbbb3ec73041eb3b38bb25c0e1 golangci-lint-2.4.0-freebsd-386.tar.gz
6ec4a6177fc6c0dd541fbcb3a7612845266d020d35cc6fa92959220cdf64ca39 golangci-lint-2.4.0-freebsd-amd64.tar.gz
4d473e3e71c01feaa915a0604fb35758b41284fb976cdeac3f842118d9ee7e17 golangci-lint-2.4.0-freebsd-armv6.tar.gz
58727746c6530801a3f9a702a5945556a5eb7e88809222536dd9f9d54cafaeff golangci-lint-2.4.0-freebsd-armv7.tar.gz
fbf28c662760e24c32f82f8d16dffdb4a82de7726a52ba1fad94f890c22997ea golangci-lint-2.4.0-illumos-amd64.tar.gz
a15a000a8981ef665e971e0f67e2acda9066a9e37a59344393b7351d8fb49c81 golangci-lint-2.4.0-linux-386.tar.gz
fae792524c04424c0ac369f5b8076f04b45cf29fc945a370e55d369a8dc11840 golangci-lint-2.4.0-linux-amd64.tar.gz
70ac11f55b80ec78fd3a879249cc9255121b8dfd7f7ed4fc46ed137f4abf17e7 golangci-lint-2.4.0-linux-arm64.tar.gz
4acdc40e5cebe99e4e7ced358a05b2e71789f409b41cb4f39bbb86ccfa14b1dc golangci-lint-2.4.0-linux-armv6.tar.gz
2a68749568fa22b4a97cb88dbea655595563c795076536aa6c087f7968784bf3 golangci-lint-2.4.0-linux-armv7.tar.gz
9e3369afb023711036dcb0b4f45c9fe2792af962fa1df050c9f6ac101a6c5d73 golangci-lint-2.4.0-linux-loong64.tar.gz
bb9143d6329be2c4dbfffef9564078e7da7d88e7dde6c829b6263d98e072229e golangci-lint-2.4.0-linux-mips64.tar.gz
5ad1765b40d56cd04d4afd805b3ba6f4bfd9b36181da93c31e9b17e483d8608d golangci-lint-2.4.0-linux-mips64le.tar.gz
918936fb9c0d5ba96bef03cf4348b03938634cfcced49be1e9bb29cb5094fa73 golangci-lint-2.4.0-linux-ppc64le.tar.gz
f7474c638e1fb67ebbdc654b55ca0125377ea0bc88e8fee8d964a4f24eacf828 golangci-lint-2.4.0-linux-riscv64.tar.gz
b617a9543997c8bfceaffa88a75d4e595030c6add69fba800c1e4d8f5fe253dd golangci-lint-2.4.0-linux-s390x.tar.gz
7db027b03a9ba328f795215b04f594036837bc7dd0dd7cd16776b02a6167981c golangci-lint-2.4.0-netbsd-386.tar.gz
52d8f9393f4313df0a62b752c37775e3af0b818e43e8dd28954351542d7c60bc golangci-lint-2.4.0-netbsd-amd64.tar.gz
5c0086027fb5a4af3829e530c8115db4b35d11afe1914322eef528eb8cd38c69 golangci-lint-2.4.0-netbsd-arm64.tar.gz
6b779d6ed1aed87cefe195cc11759902b97a76551b593312c6833f2635a3488f golangci-lint-2.4.0-netbsd-armv6.tar.gz
f00d1f4b7ec3468a0f9fffd0d9ea036248b029b7621cbc9a59c449ef94356d09 golangci-lint-2.4.0-netbsd-armv7.tar.gz
3ce671b0b42b58e35066493aab75a7e2826c9e079988f1ba5d814a4029faaf87 golangci-lint-2.4.0-windows-386.zip
003112f7a56746feaabf20b744054bf9acdf900c9e77176383623c4b1d76aaa9 golangci-lint-2.4.0-windows-amd64.zip
dc0c2092af5d47fc2cd31a1dfe7b4c7e765fab22de98bd21ef2ffcc53ad9f54f golangci-lint-2.4.0-windows-arm64.zip
0263d23e20a260cb1592d35e12a388f99efe2c51b3611fdc66fbd9db1fce664d golangci-lint-2.4.0-windows-armv6.zip
9403c03bf648e6313036e0273149d44bad1b9ad53889b6d00e4ccb842ba3c058 golangci-lint-2.4.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#

View file

@ -64,6 +64,11 @@ import (
)
var (
goModules = []string{
".",
"./cmd/keeper",
}
// Files that end up in the geth*.zip archive.
gethArchiveFiles = []string{
"COPYING",
@ -216,7 +221,7 @@ func doInstall(cmdline []string) {
// Default: collect all 'main' packages in cmd/ and build those.
packages := flag.Args()
if len(packages) == 0 {
packages = build.FindMainPackages("./cmd")
packages = build.FindMainPackages(&tc, "./cmd/...")
}
// Do the build!
@ -295,6 +300,7 @@ func doTest(cmdline []string) {
if *dlgo {
tc.Root = build.DownloadGo(csdb)
}
gotest := tc.Go("test")
// CI needs a bit more time for the statetests (default 45m).
@ -322,18 +328,26 @@ func doTest(cmdline []string) {
gotest.Args = append(gotest.Args, "-short")
}
packages := []string{"./..."}
if len(flag.CommandLine.Args()) > 0 {
packages = flag.CommandLine.Args()
packages := flag.CommandLine.Args()
if len(packages) > 0 {
gotest.Args = append(gotest.Args, packages...)
build.MustRun(gotest)
return
}
// No packages specified, run all tests for all modules.
gotest.Args = append(gotest.Args, "./...")
for _, mod := range goModules {
test := *gotest
test.Dir = mod
build.MustRun(&test)
}
gotest.Args = append(gotest.Args, packages...)
build.MustRun(gotest)
}
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string {
ext := ".tar.gz"
base := "fixtures_fusaka-devnet-3"
base := "fixtures_develop"
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFileFromKnownURL(archivePath); err != nil {
log.Fatal(err)
@ -351,40 +365,46 @@ func doCheckGenerate() {
cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.")
tc = new(build.GoToolchain)
)
// Compute the origin hashes of all the files
var hashes map[string][32]byte
var err error
hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatal("Error computing hashes", "err", err)
}
// Run any go generate steps we might be missing
var (
protocPath = downloadProtoc(*cachedir)
protocGenGoPath = downloadProtocGenGo(*cachedir)
)
c := tc.Go("generate", "./...")
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
build.MustRun(c)
// Check if generate file hashes have changed
generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatalf("Error re-computing hashes: %v", err)
}
updates := build.DiffHashes(hashes, generated)
for _, file := range updates {
log.Printf("File changed: %s", file)
}
if len(updates) != 0 {
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
for _, mod := range goModules {
// Compute the origin hashes of all the files
hashes, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatal("Error computing hashes", "err", err)
}
c := tc.Go("generate", "./...")
c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator)))
c.Dir = mod
build.MustRun(c)
// Check if generate file hashes have changed
generated, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"})
if err != nil {
log.Fatalf("Error re-computing hashes: %v", err)
}
updates := build.DiffHashes(hashes, generated)
for _, file := range updates {
log.Printf("File changed: %s", file)
}
if len(updates) != 0 {
log.Fatal("One or more generated files were updated by running 'go generate ./...'")
}
}
fmt.Println("No stale files detected.")
// Run go mod tidy check.
build.MustRun(tc.Go("mod", "tidy", "-diff"))
for _, mod := range goModules {
tidy := tc.Go("mod", "tidy", "-diff")
tidy.Dir = mod
build.MustRun(tidy)
}
fmt.Println("No untidy module files detected.")
}
@ -424,14 +444,30 @@ func doLint(cmdline []string) {
cachedir = flag.String("cachedir", "./build/cache", "directory for caching golangci-lint binary.")
)
flag.CommandLine.Parse(cmdline)
packages := []string{"./..."}
if len(flag.CommandLine.Args()) > 0 {
packages = flag.CommandLine.Args()
}
linter := downloadLinter(*cachedir)
lflags := []string{"run", "--config", ".golangci.yml"}
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
linter, err := filepath.Abs(linter)
if err != nil {
log.Fatal(err)
}
config, err := filepath.Abs(".golangci.yml")
if err != nil {
log.Fatal(err)
}
lflags := []string{"run", "--config", config}
packages := flag.CommandLine.Args()
if len(packages) > 0 {
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
} else {
// Run for all modules in workspace.
for _, mod := range goModules {
args := append(lflags, "./...")
lintcmd := exec.Command(linter, args...)
lintcmd.Dir = mod
build.MustRunWithOutput(lintcmd)
}
}
fmt.Println("You have achieved perfection.")
}

View file

@ -1133,7 +1133,10 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
// transmit the same tx but with correct sidecar from the good peer.
var req *eth.GetPooledTransactionsPacket
req, err = readUntil[eth.GetPooledTransactionsPacket](context.Background(), conn)
ctx, cancel := context.WithTimeout(context.Background(), 12*time.Second)
defer cancel()
req, err = readUntil[eth.GetPooledTransactionsPacket](ctx, conn)
if err != nil {
errc <- fmt.Errorf("reading pooled tx request failed: %v", err)
return

View file

@ -274,10 +274,10 @@ func checkAccumulator(e *era.Era) error {
for it.Next() {
// 1) next() walks the block index, so we're able to implicitly verify it.
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error())
}
block, receipts, err := it.BlockAndReceipts()
if it.Error() != nil {
if err != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
// 2) recompute tx root and verify against header.
@ -294,6 +294,9 @@ func checkAccumulator(e *era.Era) error {
td.Add(td, block.Difficulty())
tds = append(tds, new(big.Int).Set(td))
}
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error())
}
// 4+5) Verify accumulator and total difficulty.
got, err := era.ComputeAccumulator(hashes, tds)
if err != nil {

View file

@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -152,7 +151,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gasUsed = uint64(0)
blobGasUsed = uint64(0)
receipts = make(types.Receipts, 0)
txIndex = 0
)
gaspool.AddGas(pre.Env.GasLimit)
vmContext := vm.BlockContext{
@ -193,6 +191,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Time: pre.Env.ParentTimestamp,
ExcessBlobGas: pre.Env.ParentExcessBlobGas,
BlobGasUsed: pre.Env.ParentBlobGasUsed,
BaseFee: pre.Env.ParentBaseFee,
}
header := &types.Header{
Time: pre.Env.Timestamp,
@ -250,24 +249,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue
}
}
statedb.SetTxContext(tx.Hash(), txIndex)
statedb.SetTxContext(tx.Hash(), len(receipts))
var (
snapshot = statedb.Snapshot()
prevGas = gaspool.Gas()
)
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil {
evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
}
// (ret []byte, usedGas uint64, failed bool, err error)
msgResult, err := core.ApplyMessage(evm, msg, gaspool)
receipt, err := core.ApplyTransactionWithEVM(msg, gaspool, statedb, vmContext.BlockNumber, blockHash, pre.Env.Timestamp, tx, &gasUsed, evm)
if err != nil {
statedb.RevertToSnapshot(snapshot)
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
gaspool.SetGas(prevGas)
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil {
evm.Config.Tracer.OnTxEnd(nil, err)
}
continue
}
includedTxs = append(includedTxs, tx)
@ -275,50 +267,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
}
blobGasUsed += txBlobGas
gasUsed += msgResult.UsedGas
// Receipt:
{
var root []byte
if chainConfig.IsByzantium(vmContext.BlockNumber) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes()
}
// Create a new receipt for the transaction, storing the intermediate root and
// gas used by the tx.
receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: gasUsed}
if msgResult.Failed() {
receipt.Status = types.ReceiptStatusFailed
} else {
receipt.Status = types.ReceiptStatusSuccessful
}
receipt.TxHash = tx.Hash()
receipt.GasUsed = msgResult.UsedGas
// If the transaction created a contract, store the creation address in the receipt.
if msg.To == nil {
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
}
// Set the receipt logs and create the bloom filter.
receipt.Logs = statedb.GetLogs(tx.Hash(), vmContext.BlockNumber.Uint64(), blockHash, vmContext.Time)
receipt.Bloom = types.CreateBloom(receipt)
// These three are non-consensus fields:
//receipt.BlockHash
//receipt.BlockNumber
receipt.TransactionIndex = uint(txIndex)
receipts = append(receipts, receipt)
if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil {
evm.Config.Tracer.OnTxEnd(receipt, nil)
}
}
txIndex++
receipts = append(receipts, receipt)
}
statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber))
// Add mining reward? (-1 means rewards are disabled)
if miningReward >= 0 {
// Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
@ -423,7 +376,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
sdb := state.NewDatabase(tdb, nil)
statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis)
statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance)
for k, v := range a.Storage {

View file

@ -322,7 +322,7 @@ func runCmd(ctx *cli.Context) error {
}
} else {
if len(code) > 0 {
prestate.SetCode(receiver, code)
prestate.SetCode(receiver, code, tracing.CodeChangeUnspecified)
}
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!

View file

@ -296,6 +296,14 @@ func TestT8n(t *testing.T) {
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // Osaka test, EIP-7918 blob gas with parent base fee
base: "./testdata/34",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Osaka", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
} {
args := []string{"t8n"}
args = append(args, tc.output.get()...)

View file

@ -29,7 +29,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
}
],

View file

@ -17,7 +17,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
},
{
@ -31,7 +32,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x1"
}
],

View file

@ -16,7 +16,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x520b",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x5",
"transactionIndex": "0x0"
}
],

View file

@ -32,7 +32,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0xa861",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
},
{
@ -45,7 +46,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5aa5",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x1"
}
],

View file

@ -28,7 +28,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
}
],

View file

@ -33,7 +33,10 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0xa865",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blobGasUsed": "0x20000",
"blobGasPrice": "0x1",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
}
],

View file

@ -31,7 +31,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
}
],

View file

@ -29,7 +29,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x521f",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x5",
"transactionIndex": "0x0"
}
],

View file

@ -30,7 +30,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
},
{
@ -44,7 +45,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x1"
}
],

View file

@ -48,7 +48,8 @@
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x15fa9",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000",
"blockNumber": "0x1",
"transactionIndex": "0x0"
}
],

6
cmd/evm/testdata/34/README.md vendored Normal file
View file

@ -0,0 +1,6 @@
This test verifies that Osaka fork blob gas calculation works correctly when
parentBaseFee is provided. It tests the EIP-7918 reserve price calculation
which requires parent.BaseFee to be properly set.
Regression test for: nil pointer dereference when parent.BaseFee was not
included in the parent header during Osaka fork blob gas calculations.

6
cmd/evm/testdata/34/alloc.json vendored Normal file
View file

@ -0,0 +1,6 @@
{
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x1000000000000000000",
"nonce": "0x0"
}
}

18
cmd/evm/testdata/34/env.json vendored Normal file
View file

@ -0,0 +1,18 @@
{
"currentCoinbase": "0x0000000000000000000000000000000000000000",
"currentDifficulty": "0x0",
"currentRandom": "0x0000000000000000000000000000000000000000000000000000000000000000",
"currentGasLimit": "0x5f5e100",
"currentNumber": "0x1",
"currentTimestamp": "0x1000",
"parentTimestamp": "0x0",
"currentBaseFee": "0x10",
"parentBaseFee": "0x0a",
"parentGasUsed": "0x0",
"parentGasLimit": "0x5f5e100",
"currentExcessBlobGas": "0x0",
"parentExcessBlobGas": "0x0",
"parentBlobGasUsed": "0x20000",
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"withdrawals": []
}

23
cmd/evm/testdata/34/exp.json vendored Normal file
View file

@ -0,0 +1,23 @@
{
"alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x1000000000000000000"
}
},
"result": {
"stateRoot": "0x01c28492482a1a1f66224726ef1059a7036fce69d1d2c991b65cd013725d5742",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": null,
"receipts": [],
"gasUsed": "0x0",
"currentBaseFee": "0x10",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"blobGasUsed": "0x0",
"requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"requests": []
}
}

1
cmd/evm/testdata/34/txs.json vendored Normal file
View file

@ -0,0 +1 @@
[]

View file

@ -59,6 +59,8 @@ var (
Flags: slices.Concat([]cli.Flag{
utils.CachePreimagesFlag,
utils.OverrideOsaka,
utils.OverrideBPO1,
utils.OverrideBPO2,
utils.OverrideVerkle,
}, utils.DatabaseFlags),
Description: `
@ -108,6 +110,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
utils.StateSizeTrackingFlag,
utils.TxLookupLimitFlag,
utils.VMTraceFlag,
utils.VMTraceJsonConfigFlag,
@ -273,6 +276,14 @@ func initGenesis(ctx *cli.Context) error {
v := ctx.Uint64(utils.OverrideOsaka.Name)
overrides.OverrideOsaka = &v
}
if ctx.IsSet(utils.OverrideBPO1.Name) {
v := ctx.Uint64(utils.OverrideBPO1.Name)
overrides.OverrideBPO1 = &v
}
if ctx.IsSet(utils.OverrideBPO2.Name) {
v := ctx.Uint64(utils.OverrideBPO2.Name)
overrides.OverrideBPO2 = &v
}
if ctx.IsSet(utils.OverrideVerkle.Name) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
overrides.OverrideVerkle = &v
@ -281,7 +292,7 @@ func initGenesis(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, false)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close()
_, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
@ -635,7 +646,7 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
defer triedb.Close()
state, err := state.New(root, state.NewDatabase(triedb, nil))

View file

@ -209,7 +209,7 @@ func constructDevModeBanner(ctx *cli.Context, cfg gethConfig) string {
0x%x (10^49 ETH)
`, cfg.Eth.Miner.PendingFeeRecipient)
if cfg.Eth.Miner.PendingFeeRecipient == utils.DeveloperAddr {
devModeBanner += fmt.Sprintf(`
devModeBanner += fmt.Sprintf(`
Private Key
------------------
0x%x
@ -227,6 +227,14 @@ func makeFullNode(ctx *cli.Context) *node.Node {
v := ctx.Uint64(utils.OverrideOsaka.Name)
cfg.Eth.OverrideOsaka = &v
}
if ctx.IsSet(utils.OverrideBPO1.Name) {
v := ctx.Uint64(utils.OverrideBPO1.Name)
cfg.Eth.OverrideBPO1 = &v
}
if ctx.IsSet(utils.OverrideBPO2.Name) {
v := ctx.Uint64(utils.OverrideBPO2.Name)
cfg.Eth.OverrideBPO2 = &v
}
if ctx.IsSet(utils.OverrideVerkle.Name) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
cfg.Eth.OverrideVerkle = &v

View file

@ -524,7 +524,7 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
defer triedb.Close()
var (
@ -859,7 +859,7 @@ func inspectHistory(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, db, false, false, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, false, false)
defer triedb.Close()
var (

View file

@ -91,7 +91,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) {
have = censor(have, tStart, tEnd)
want = censor(want, tStart, tEnd)
if have != want {
t.Logf(nicediff([]byte(have), []byte(want)))
t.Log(nicediff([]byte(have), []byte(want)))
t.Fatalf("format %v, line %d\nhave %v\nwant %v", format, i, have, want)
}
}
@ -142,7 +142,7 @@ func TestJsonLogging(t *testing.T) {
}
if !bytes.Equal(have, want) {
// show an intelligent diff
t.Logf(nicediff(have, want))
t.Log(nicediff(have, want))
t.Errorf("file content wrong")
}
}
@ -211,7 +211,7 @@ func TestFileOut(t *testing.T) {
}
if !bytes.Equal(have, want) {
// show an intelligent diff
t.Logf(nicediff(have, want))
t.Log(nicediff(have, want))
t.Errorf("file content wrong")
}
}
@ -231,7 +231,7 @@ func TestRotatingFileOut(t *testing.T) {
}
if !bytes.Equal(have, want) {
// show an intelligent diff
t.Logf(nicediff(have, want))
t.Log(nicediff(have, want))
t.Errorf("file content wrong")
}
}

View file

@ -63,6 +63,8 @@ var (
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideOsaka,
utils.OverrideBPO1,
utils.OverrideBPO2,
utils.OverrideVerkle,
utils.EnablePersonal, // deprecated
utils.TxPoolLocalsFlag,
@ -133,6 +135,8 @@ var (
utils.VMEnableDebugFlag,
utils.VMTraceFlag,
utils.VMTraceJsonConfigFlag,
utils.VMWitnessStatsFlag,
utils.VMStatelessSelfValidationFlag,
utils.NetworkIdFlag,
utils.EthStatsURLFlag,
utils.GpoBlocksFlag,
@ -180,6 +184,7 @@ var (
utils.RPCGlobalGasCapFlag,
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag,
utils.RPCGlobalLogQueryLimit,
utils.AllowUnprotectedTxs,
utils.BatchRequestLimit,
utils.BatchResponseMaxSize,
@ -200,6 +205,7 @@ var (
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
utils.StateSizeTrackingFlag,
}
)

View file

@ -217,7 +217,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
defer triedb.Close()
var (
@ -282,7 +282,7 @@ func traverseState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
@ -391,7 +391,7 @@ func traverseRawState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
@ -558,20 +558,14 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
defer triedb.Close()
snapConfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapConfig, db, triedb, root)
stateIt, err := utils.NewStateIterator(triedb, db, root)
if err != nil {
return err
}
accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start))
accIt, err := stateIt.AccountIterator(root, common.BytesToHash(conf.Start))
if err != nil {
return err
}
@ -605,7 +599,7 @@ func dumpState(ctx *cli.Context) error {
if !conf.SkipStorage {
da.Storage = make(map[common.Hash]string)
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
stIt, err := stateIt.StorageIterator(root, accIt.Hash(), common.Hash{})
if err != nil {
return err
}
@ -640,7 +634,7 @@ func snapshotExportPreimages(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
defer triedb.Close()
var root common.Hash
@ -658,17 +652,11 @@ func snapshotExportPreimages(ctx *cli.Context) error {
}
root = headBlock.Root()
}
snapConfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapConfig, chaindb, triedb, root)
stateIt, err := utils.NewStateIterator(triedb, chaindb, root)
if err != nil {
return err
}
return utils.ExportSnapshotPreimages(chaindb, snaptree, ctx.Args().First(), root)
return utils.ExportSnapshotPreimages(chaindb, stateIt, ctx.Args().First(), root)
}
// checkAccount iterates the snap data layers, and looks up the given account

BIN
cmd/keeper/1192c3_block.rlp Normal file

Binary file not shown.

Binary file not shown.

69
cmd/keeper/README.md Normal file
View file

@ -0,0 +1,69 @@
# Keeper - geth as a zkvm guest
Keeper command is a specialized tool for validating stateless execution of Ethereum blocks. It's designed to run as a zkvm guest.
## Overview
The keeper reads an RLP-encoded payload containing:
- A block to execute
- A witness with the necessary state data
- A chainID
It then executes the block statelessly and validates that the computed state root and receipt root match the values in the block header.
## Building Keeper
The keeper uses build tags to compile platform-specific input methods and chain configurations:
### Example Implementation
See `getpayload_example.go` for a complete example with embedded Hoodi block data:
```bash
# Build example with different chain configurations
go build -tags "example" ./cmd/keeper
```
### Ziren zkVM Implementation
Build for the Ziren zkVM platform, which is a MIPS ISA-based zkvm:
```bash
GOOS=linux GOARCH=mipsle GOMIPS=softfloat go build -tags "ziren" ./cmd/keeper
```
As an example runner, refer to https://gist.github.com/gballet/7b669a99eb3ab2b593324e3a76abd23d
## Creating a Custom Platform Implementation
To add support for a new platform (e.g., "myplatform"), create a new file with the appropriate build tag:
### 1. Create `getinput_myplatform.go`
```go
//go:build myplatform
package main
import (
"github.com/ethereum/go-ethereum/params"
// ... other imports as needed
)
// getInput returns the RLP-encoded payload
func getInput() []byte {
// Your platform-specific code to retrieve the RLP-encoded payload
// This might read from:
// - Memory-mapped I/O
// - Hardware registers
// - Serial port
// - Network interface
// - File system
// The payload must be RLP-encoded and contain:
// - Block with transactions
// - Witness with parent headers and state data
return encodedPayload
}
```

38
cmd/keeper/chainconfig.go Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"github.com/ethereum/go-ethereum/params"
)
// getChainConfig returns the appropriate chain configuration based on the chainID.
// Returns an error for unsupported chain IDs.
func getChainConfig(chainID uint64) (*params.ChainConfig, error) {
switch chainID {
case 0, params.MainnetChainConfig.ChainID.Uint64():
return params.MainnetChainConfig, nil
case params.SepoliaChainConfig.ChainID.Uint64():
return params.SepoliaChainConfig, nil
case params.HoodiChainConfig.ChainID.Uint64():
return params.HoodiChainConfig, nil
default:
return nil, fmt.Errorf("unsupported chain ID: %d", chainID)
}
}

View file

@ -0,0 +1,102 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build example
package main
import (
_ "embed"
"fmt"
"os"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// ExtWitness is a witness RLP encoding for transferring across clients.
// This is taken from PR #32216 until it's merged.
// It contains block headers, contract codes, state nodes, and storage keys
// required for stateless execution verification.
type ExtWitness struct {
Headers []*types.Header `json:"headers"`
Codes []hexutil.Bytes `json:"codes"`
State []hexutil.Bytes `json:"state"`
Keys []hexutil.Bytes `json:"keys"`
}
// This is taken from PR #32216 until it's merged
// fromExtWitness converts the consensus witness format into our internal one.
func fromExtWitness(ext *ExtWitness) (*stateless.Witness, error) {
w := &stateless.Witness{}
w.Headers = ext.Headers
w.Codes = make(map[string]struct{}, len(ext.Codes))
for _, code := range ext.Codes {
w.Codes[string(code)] = struct{}{}
}
w.State = make(map[string]struct{}, len(ext.State))
for _, node := range ext.State {
w.State[string(node)] = struct{}{}
}
return w, nil
}
//go:embed 1192c3_witness.rlp
var witnessRlp []byte
//go:embed 1192c3_block.rlp
var blockRlp []byte
// getInput is a platform-specific function that will recover the input payload
// and returns it as a slice. It is expected to be an RLP-encoded Payload structure
// that contains the witness and the block.
// This is a demo version, that is intended to run on a regular computer, so what
// it does is embed a small Hoodi block, encodes the Payload structure containing
// the block and its witness as RLP, and returns the encoding.
func getInput() []byte {
var block types.Block
err := rlp.DecodeBytes(blockRlp, &block)
if err != nil {
panic(err)
}
var extwitness ExtWitness
err = rlp.DecodeBytes(witnessRlp, &extwitness)
if err != nil {
panic(err)
}
witness, err := fromExtWitness(&extwitness)
if err != nil {
panic(err)
}
payload := Payload{
ChainID: params.HoodiChainConfig.ChainID.Uint64(),
Block: &block,
Witness: witness,
}
encoded, err := rlp.EncodeToBytes(payload)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to encode payload: %v\n", err)
os.Exit(20)
}
return encoded
}

View file

@ -0,0 +1,31 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build ziren
package main
import (
zkruntime "github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime"
)
// getInput reads the input payload from the zkVM runtime environment.
// The zkVM host provides the RLP-encoded Payload structure containing
// the block and witness data through the runtime's input mechanism.
func getInput() []byte {
input := zkruntime.Read[[]byte]()
return input
}

49
cmd/keeper/go.mod Normal file
View file

@ -0,0 +1,49 @@
module github.com/ethereum/go-ethereum/cmd/keeper
go 1.24.0
require (
github.com/ethereum/go-ethereum v0.0.0-00010101000000-000000000000
github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
)
require (
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/gnark-crypto v0.18.0 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/emicklei/dot v1.6.2 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect
github.com/ethereum/go-verkle v0.2.2 // indirect
github.com/ferranbt/fastssz v0.1.4 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.36.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
replace (
github.com/ethereum/go-ethereum => ../../
github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime => github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5
)

150
cmd/keeper/go.sum Normal file
View file

@ -0,0 +1,150 @@
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4=
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw=
github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=
github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=
github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU=
github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY=
github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4=
github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw=
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 h1:MxKlbmI7Dta6O6Nsc9OAer/rOltjoL11CVLMqCiYnxU=
github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5/go.mod h1:zk/SUgiiVz2U1ufZ+yM2MHPbD93W25KH5zK3qAxXbT4=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

68
cmd/keeper/main.go Normal file
View file

@ -0,0 +1,68 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"runtime/debug"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp"
)
// Payload represents the input data for stateless execution containing
// a block and its associated witness data for verification.
type Payload struct {
ChainID uint64
Block *types.Block
Witness *stateless.Witness
}
func init() {
debug.SetGCPercent(-1) // Disable garbage collection
}
func main() {
input := getInput()
var payload Payload
rlp.DecodeBytes(input, &payload)
chainConfig, err := getChainConfig(payload.ChainID)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to get chain config: %v\n", err)
os.Exit(13)
}
vmConfig := vm.Config{}
crossStateRoot, crossReceiptRoot, err := core.ExecuteStateless(chainConfig, vmConfig, payload.Block, payload.Witness)
if err != nil {
fmt.Fprintf(os.Stderr, "stateless self-validation failed: %v\n", err)
os.Exit(10)
}
if crossStateRoot != payload.Block.Root() {
fmt.Fprintf(os.Stderr, "stateless self-validation root mismatch (cross: %x local: %x)\n", crossStateRoot, payload.Block.Root())
os.Exit(11)
}
if crossReceiptRoot != payload.Block.ReceiptHash() {
fmt.Fprintf(os.Stderr, "stateless self-validation receipt root mismatch (cross: %x local: %x)\n", crossReceiptRoot, payload.Block.ReceiptHash())
os.Exit(12)
}
}

View file

@ -1,4 +1,4 @@
// Copyright 2019 The go-ethereum Authors
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@ -14,14 +14,13 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build tools
// +build tools
//go:build !example && !ziren
package tools
package main
import (
// Tool imports for go:generate.
_ "github.com/fjl/gencodec"
_ "golang.org/x/tools/cmd/stringer"
_ "google.golang.org/protobuf/cmd/protoc-gen-go"
)
// getInput is a stub implementation for when no platform-specific build tags are set.
// This allows golangci-lint to typecheck the code without errors.
// The actual implementations are provided in platform-specific files.
func getInput() []byte {
panic("stub")
}

View file

@ -48,6 +48,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/triedb"
"github.com/urfave/cli/v2"
)
@ -567,9 +568,64 @@ func ExportPreimages(db ethdb.Database, fn string) error {
return nil
}
// StateIterator is a temporary structure for traversing state in order. It serves
// as an aggregator for both path scheme and hash scheme implementations and should
// be removed once the hash scheme is fully deprecated.
type StateIterator struct {
scheme string
root common.Hash
triedb *triedb.Database
snapshots *snapshot.Tree
}
// NewStateIterator constructs the state iterator with the specific root.
func NewStateIterator(triedb *triedb.Database, db ethdb.Database, root common.Hash) (*StateIterator, error) {
if triedb.Scheme() == rawdb.PathScheme {
return &StateIterator{
scheme: rawdb.PathScheme,
root: root,
triedb: triedb,
}, nil
}
config := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snapshots, err := snapshot.New(config, db, triedb, root)
if err != nil {
return nil, err
}
return &StateIterator{
scheme: rawdb.HashScheme,
root: root,
triedb: triedb,
snapshots: snapshots,
}, nil
}
// AccountIterator creates a new account iterator for the specified root hash and
// seeks to a starting account hash.
func (it *StateIterator) AccountIterator(root common.Hash, start common.Hash) (snapshot.AccountIterator, error) {
if it.scheme == rawdb.PathScheme {
return it.triedb.AccountIterator(root, start)
}
return it.snapshots.AccountIterator(root, start)
}
// StorageIterator creates a new storage iterator for the specified root hash and
// account. The iterator will be moved to the specific start position.
func (it *StateIterator) StorageIterator(root common.Hash, accountHash common.Hash, start common.Hash) (snapshot.StorageIterator, error) {
if it.scheme == rawdb.PathScheme {
return it.triedb.StorageIterator(root, accountHash, start)
}
return it.snapshots.StorageIterator(root, accountHash, start)
}
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
// the snapshot for a given root.
func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
func ExportSnapshotPreimages(chaindb ethdb.Database, stateIt *StateIterator, fn string, root common.Hash) error {
log.Info("Exporting preimages", "file", fn)
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
@ -602,7 +658,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn
)
go func() {
defer close(hashCh)
accIt, err := snaptree.AccountIterator(root, common.Hash{})
accIt, err := stateIt.AccountIterator(root, common.Hash{})
if err != nil {
log.Error("Failed to create account iterator", "error", err)
return
@ -619,7 +675,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn
hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
stIt, err := stateIt.StorageIterator(root, accIt.Hash(), common.Hash{})
if err != nil {
log.Error("Failed to create storage iterator", "error", err)
return

View file

@ -24,7 +24,6 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"net"
"net/http"
@ -248,6 +247,16 @@ var (
Usage: "Manually specify the Osaka fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideBPO1 = &cli.Uint64Flag{
Name: "override.bpo1",
Usage: "Manually specify the bpo1 fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideBPO2 = &cli.Uint64Flag{
Name: "override.bpo2",
Usage: "Manually specify the bpo2 fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideVerkle = &cli.Uint64Flag{
Name: "override.verkle",
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
@ -270,6 +279,12 @@ var (
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Category: flags.StateCategory,
}
StateSizeTrackingFlag = &cli.BoolFlag{
Name: "state.size-tracking",
Usage: "Enable state size tracking, retrieve state size with debug_stateSize.",
Value: ethconfig.Defaults.EnableStateSizeTracking,
Category: flags.StateCategory,
}
StateHistoryFlag = &cli.Uint64Flag{
Name: "history.state",
Usage: "Number of recent blocks to retain state history for, only relevant in state.scheme=path (default = 90,000 blocks, 0 = entire chain)",
@ -565,6 +580,16 @@ var (
Value: "{}",
Category: flags.VMCategory,
}
VMWitnessStatsFlag = &cli.BoolFlag{
Name: "vmwitnessstats",
Usage: "Enable collection of witness trie access statistics (automatically enables witness generation)",
Category: flags.VMCategory,
}
VMStatelessSelfValidationFlag = &cli.BoolFlag{
Name: "stateless-self-validation",
Usage: "Generate execution witnesses and self-check against them (testing purpose)",
Category: flags.VMCategory,
}
// API options.
RPCGlobalGasCapFlag = &cli.Uint64Flag{
Name: "rpc.gascap",
@ -584,6 +609,12 @@ var (
Value: ethconfig.Defaults.RPCTxFeeCap,
Category: flags.APICategory,
}
RPCGlobalLogQueryLimit = &cli.IntFlag{
Name: "rpc.logquerylimit",
Usage: "Maximum number of alternative addresses or topics allowed per search position in eth_getLogs filter criteria (0 = no cap)",
Value: ethconfig.Defaults.LogQueryLimit,
Category: flags.APICategory,
}
// Authenticated RPC HTTP settings
AuthListenFlag = &cli.StringFlag{
Name: "authrpc.addr",
@ -1588,7 +1619,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
// Ensure Go's GC ignores the database cache for trigger percentage
cache := ctx.Int(CacheFlag.Name)
gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024)))
gogc := max(20, min(100, 100/(float64(cache)/1024)))
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
godebug.SetGCPercent(int(gogc))
@ -1683,6 +1714,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheLogSizeFlag.Name) {
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
}
if ctx.IsSet(RPCGlobalLogQueryLimit.Name) {
cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name)
}
if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 {
// If snap-sync is requested, this flag is also required
if cfg.SyncMode == ethconfig.SnapSync {
@ -1701,6 +1735,16 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(VMEnableDebugFlag.Name) {
cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name)
}
if ctx.IsSet(VMWitnessStatsFlag.Name) {
cfg.EnableWitnessStats = ctx.Bool(VMWitnessStatsFlag.Name)
}
if ctx.IsSet(VMStatelessSelfValidationFlag.Name) {
cfg.StatelessSelfValidation = ctx.Bool(VMStatelessSelfValidationFlag.Name)
}
// Auto-enable StatelessSelfValidation when witness stats are enabled
if ctx.Bool(VMWitnessStatsFlag.Name) {
cfg.StatelessSelfValidation = true
}
if ctx.IsSet(RPCGlobalGasCapFlag.Name) {
cfg.RPCGasCap = ctx.Uint64(RPCGlobalGasCapFlag.Name)
@ -1726,6 +1770,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.EthDiscoveryURLs = SplitAndTrim(urls)
}
}
if ctx.Bool(StateSizeTrackingFlag.Name) {
cfg.EnableStateSizeTracking = true
}
// Override any default configs for hard coded networks.
switch {
case ctx.Bool(MainnetFlag.Name):
@ -1893,11 +1940,15 @@ func MakeBeaconLightConfig(ctx *cli.Context) bparams.ClientConfig {
} else {
Fatalf("Could not parse --%s: %v", BeaconGenesisRootFlag.Name, err)
}
configFile := ctx.String(BeaconConfigFlag.Name)
if err := config.ChainConfig.LoadForks(configFile); err != nil {
Fatalf("Could not load beacon chain config '%s': %v", configFile, err)
configPath := ctx.String(BeaconConfigFlag.Name)
file, err := os.ReadFile(configPath)
if err != nil {
Fatalf("failed to read beacon chain config file '%s': %v", configPath, err)
}
log.Info("Using custom beacon chain config", "file", configFile)
if err := config.ChainConfig.LoadForks(file); err != nil {
Fatalf("Could not load beacon chain config '%s': %v", configPath, err)
}
log.Info("Using custom beacon chain config", "file", configPath)
} else {
if ctx.IsSet(BeaconGenesisRootFlag.Name) {
Fatalf("Genesis root is specified but custom beacon chain config is missing")
@ -1988,7 +2039,8 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
LogCacheSize: ethcfg.FilterLogCacheSize,
LogQueryLimit: ethcfg.LogQueryLimit,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",
@ -2208,6 +2260,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
// - DATADIR/triedb/merkle.journal
// - DATADIR/triedb/verkle.journal
TrieJournalDirectory: stack.ResolvePath("triedb"),
// Enable state size tracking if enabled
StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name),
}
if options.ArchiveMode && !options.Preimages {
options.Preimages = true
@ -2231,6 +2286,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
vmcfg := vm.Config{
EnablePreimageRecording: ctx.Bool(VMEnableDebugFlag.Name),
EnableWitnessStats: ctx.Bool(VMWitnessStatsFlag.Name),
StatelessSelfValidation: ctx.Bool(VMStatelessSelfValidationFlag.Name) || ctx.Bool(VMWitnessStatsFlag.Name),
}
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
@ -2269,7 +2326,7 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}
// MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
config := &triedb.Config{
Preimages: preimage,
IsVerkle: isVerkle,
@ -2285,10 +2342,13 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
config.HashDB = hashdb.Defaults
return triedb.NewDatabase(disk, config)
}
var pathConfig pathdb.Config
if readOnly {
config.PathDB = pathdb.ReadOnly
pathConfig = *pathdb.ReadOnly
} else {
config.PathDB = pathdb.Defaults
pathConfig = *pathdb.Defaults
}
pathConfig.JournalDirectory = stack.ResolvePath("triedb")
config.PathDB = &pathConfig
return triedb.NewDatabase(disk, config)
}

View file

@ -408,7 +408,6 @@ func TestUnmarshalFixedUnprefixedText(t *testing.T) {
{input: "0x2", wantErr: ErrOddLength},
{input: "2", wantErr: ErrOddLength},
{input: "4444", wantErr: errors.New("hex string has length 4, want 8 for x")},
{input: "4444", wantErr: errors.New("hex string has length 4, want 8 for x")},
// check that output is not modified for partially correct input
{input: "444444gg", wantErr: ErrSyntax, want: []byte{0, 0, 0, 0}},
{input: "0x444444gg", wantErr: ErrSyntax, want: []byte{0, 0, 0, 0}},

View file

@ -110,7 +110,7 @@ func TestMustParseUint64(t *testing.T) {
func TestMustParseUint64Panic(t *testing.T) {
defer func() {
if recover() == nil {
t.Error("MustParseBig should've panicked")
t.Error("MustParseUint64 should've panicked")
}
}()
MustParseUint64("ggg")

View file

@ -71,16 +71,6 @@ func New(ethone consensus.Engine) *Beacon {
return &Beacon{ethone: ethone}
}
// isPostMerge reports whether the given block number is assumed to be post-merge.
// Here we check the MergeNetsplitBlock to allow configuring networks with a PoW or
// PoA chain for unit testing purposes.
func isPostMerge(config *params.ChainConfig, blockNum uint64, timestamp uint64) bool {
mergedAtGenesis := config.TerminalTotalDifficulty != nil && config.TerminalTotalDifficulty.Sign() == 0
return mergedAtGenesis ||
config.MergeNetsplitBlock != nil && blockNum >= config.MergeNetsplitBlock.Uint64() ||
config.ShanghaiTime != nil && timestamp >= *config.ShanghaiTime
}
// Author implements consensus.Engine, returning the verified author of the block.
func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
if !beacon.IsPoSHeader(header) {
@ -328,7 +318,7 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline.
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
if !isPostMerge(chain.Config(), header.Number.Uint64(), header.Time) {
if !chain.Config().IsPostMerge(header.Number.Uint64(), header.Time) {
return beacon.ethone.Prepare(chain, header)
}
header.Difficulty = beaconDifficulty
@ -442,7 +432,7 @@ func (beacon *Beacon) SealHash(header *types.Header) common.Hash {
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
if !isPostMerge(chain.Config(), parent.Number.Uint64()+1, time) {
if !chain.Config().IsPostMerge(parent.Number.Uint64()+1, time) {
return beacon.ethone.CalcDifficulty(chain, time, parent)
}
return beaconDifficulty

View file

@ -301,7 +301,7 @@ func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uin
func benchWriteChain(b *testing.B, full bool, count uint64) {
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
for i := 0; i < b.N; i++ {
for b.Loop() {
pdb, err := pebble.New(b.TempDir(), 1024, 128, "", false)
if err != nil {
b.Fatalf("error opening database: %v", err)
@ -326,9 +326,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
db.Close()
options := DefaultConfig().WithArchive(true)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
pdb, err = pebble.New(dir, 1024, 128, "", false)
if err != nil {
b.Fatalf("error opening database: %v", err)

View file

@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
"math"
"math/big"
"runtime"
"slices"
@ -168,10 +167,13 @@ type BlockChainConfig struct {
TrieNoAsyncFlush bool // Whether the asynchronous buffer flushing is disallowed
TrieJournalDirectory string // Directory path to the journal used for persisting trie data across node restarts
Preimages bool // Whether to store preimage of trie key to the disk
StateHistory uint64 // Number of blocks from head whose state histories are reserved.
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
ArchiveMode bool // Whether to enable the archive mode
Preimages bool // Whether to store preimage of trie key to the disk
StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
ArchiveMode bool // Whether to enable the archive mode
// Number of blocks from the chain head for which state histories are retained.
// If set to 0, all state histories across the entire chain will be retained;
StateHistory uint64
// State snapshot related options
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
@ -193,6 +195,9 @@ type BlockChainConfig struct {
// If the value is zero, all transactions of the entire chain will be indexed.
// If the value is -1, indexing is disabled.
TxLookupLimit int64
// StateSizeTracking indicates whether the state size tracking is enabled.
StateSizeTracking bool
}
// DefaultConfig returns the default config.
@ -330,6 +335,7 @@ type BlockChain struct {
prefetcher Prefetcher
processor Processor // Block transaction processor interface
logger *tracing.Hooks
stateSizer *state.SizeTracker // State size tracking
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
}
@ -523,6 +529,17 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
if bc.cfg.TxLookupLimit >= 0 {
bc.txIndexer = newTxIndexer(uint64(bc.cfg.TxLookupLimit), bc)
}
// Start state size tracker
if bc.cfg.StateSizeTracking {
stateSizer, err := state.NewSizeTracker(bc.db, bc.triedb)
if err == nil {
bc.stateSizer = stateSizer
log.Info("Enabled state size metrics")
} else {
log.Info("Failed to setup size tracker", "err", err)
}
}
return bc, nil
}
@ -1249,6 +1266,10 @@ func (bc *BlockChain) stopWithoutSaving() {
// Signal shutdown to all goroutines.
bc.InterruptInsert(true)
// Stop state size tracker
if bc.stateSizer != nil {
bc.stateSizer.Stop()
}
// Now wait for all chain modifications to end and persistent goroutines to exit.
//
// Note: Close waits for the mutex to become available, i.e. any running chain
@ -1583,10 +1604,14 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil {
return err
}
// Emit the state update to the state sizestats if it's active
if bc.stateSizer != nil {
bc.stateSizer.Notify(stateUpdate)
}
// If node is running in path mode, skip explicit gc operation
// which is unnecessary in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
@ -1881,7 +1906,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
}
// The traced section of block import.
start := time.Now()
res, err := bc.processBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
if err != nil {
return nil, it.index, err
}
@ -1947,9 +1972,13 @@ type blockProcessingResult struct {
witness *stateless.Witness
}
// processBlock executes and validates the given block. If there was no error
func (bpr *blockProcessingResult) Witness() *stateless.Witness {
return bpr.witness
}
// ProcessBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
var (
err error
startTime = time.Now()
@ -2127,7 +2156,7 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
}
// Report the collected witness statistics
if witnessStats != nil {
witnessStats.ReportMetrics()
witnessStats.ReportMetrics(block.NumberU64())
}
// Update the metrics touched during block commit
@ -2633,13 +2662,11 @@ func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err er
// logForkReadiness will write a log when a future fork is scheduled, but not
// active. This is useful so operators know their client is ready for the fork.
func (bc *BlockChain) logForkReadiness(block *types.Block) {
config := bc.Config()
current, last := config.LatestFork(block.Time()), config.LatestFork(math.MaxUint64)
current := bc.Config().LatestFork(block.Time())
// Short circuit if the timestamp of the last fork is undefined,
// or if the network has already passed the last configured fork.
t := config.Timestamp(last)
if t == nil || current >= last {
// Short circuit if the timestamp of the last fork is undefined.
t := bc.Config().Timestamp(current + 1)
if t == nil {
return
}
at := time.Unix(int64(*t), 0)
@ -2649,7 +2676,7 @@ func (bc *BlockChain) logForkReadiness(block *types.Block) {
// - Enough time has passed since last alert
now := time.Now()
if now.Before(at) && now.After(bc.lastForkReadyAlert.Add(forkReadyInterval)) {
log.Info("Ready for fork activation", "fork", last, "date", at.Format(time.RFC822),
log.Info("Ready for fork activation", "fork", current+1, "date", at.Format(time.RFC822),
"remaining", time.Until(at).Round(time.Second), "timestamp", at.Unix())
bc.lastForkReadyAlert = time.Now()
}
@ -2788,3 +2815,8 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load())
}
// StateSizer returns the state size tracker, or nil if it's not initialized
func (bc *BlockChain) StateSizer() *state.SizeTracker {
return bc.stateSizer
}

View file

@ -22,7 +22,7 @@ import (
"fmt"
"hash/fnv"
"math"
"sort"
"slices"
"github.com/ethereum/go-ethereum/common"
)
@ -245,7 +245,7 @@ func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue co
panic("potentialMatches: insufficient list of row alternatives")
}
}
sort.Sort(results)
slices.Sort(results)
// remove duplicates
j := 0
for i, match := range results {
@ -260,12 +260,7 @@ func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue co
// potentialMatches is a strictly monotonically increasing list of log value
// indices in the range of a filter map that are potential matches for certain
// filter criteria.
// potentialMatches implements sort.Interface.
// Note that nil is used as a wildcard and therefore means that all log value
// indices in the filter map range are potential matches. If there are no
// potential matches in the given map's range then an empty slice should be used.
type potentialMatches []uint64
func (p potentialMatches) Len() int { return len(p) }
func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] }
func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View file

@ -95,8 +95,14 @@ func TestCreation(t *testing.T) {
{1735372, 1706655071, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // Last Shanghai block
{1735372, 1706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // First Cancun block
{1735372, 1741159775, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // Last Cancun block
{1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // First Prague block
{1735372, 2741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // Future Prague block
{1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // First Prague block
{1735372, 1760427359, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // Last Prague block
{1735372, 1760427360, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // First Osaka block
{1735372, 1761017183, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // Last Osaka block
{1735372, 1761017184, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // First BPO1 block
{1735372, 1761607007, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // Last BPO1 block
{1735372, 1761607008, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // First BPO2 block
{1735372, 2000000000, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // Future BPO2 block
},
},
// Holesky test cases
@ -110,8 +116,14 @@ func TestCreation(t *testing.T) {
{123, 1707305663, ID{Hash: checksumToBytes(0xfd4f016b), Next: 1707305664}}, // Last Shanghai block
{123, 1707305664, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // First Cancun block
{123, 1740434111, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // Last Cancun block
{123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // First Prague block
{123, 2740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // Future Prague block
{123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // First Prague block
{123, 1759308479, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // Last Prague block
{123, 1759308480, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // First Osaka block
{123, 1759799999, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // Last Osaka block
{123, 1759800000, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // First BPO1 block
{123, 1760389823, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // Last BPO1 block
{123, 1760389824, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // First BPO2 block
{123, 2000000000, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // Future BPO1 block
},
},
// Hoodi test cases
@ -121,8 +133,14 @@ func TestCreation(t *testing.T) {
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris, Shanghai, Cancun block
{123, 1742999831, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Last Cancun block
{123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // First Prague block
{123, 2740434112, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // Future Prague block
{123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // First Prague block
{123, 1761677591, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // Last Prague block
{123, 1761677592, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // First Osaka block
{123, 1762365719, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // Last Osaka block
{123, 1762365720, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // First BPO1 block
{123, 1762955543, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // Last BPO1 block
{123, 1762955544, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // First BPO2 block
{123, 2000000000, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // Future BPO2 block
},
},
}

View file

@ -153,7 +153,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
if account.Balance != nil {
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
}
statedb.SetCode(addr, account.Code)
statedb.SetCode(addr, account.Code, tracing.CodeChangeGenesis)
statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
@ -179,7 +179,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
// already captures the allocations.
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
}
statedb.SetCode(addr, account.Code)
statedb.SetCode(addr, account.Code, tracing.CodeChangeGenesis)
statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
@ -259,6 +259,8 @@ func (e *GenesisMismatchError) Error() string {
// ChainOverrides contains the changes to chain config.
type ChainOverrides struct {
OverrideOsaka *uint64
OverrideBPO1 *uint64
OverrideBPO2 *uint64
OverrideVerkle *uint64
}
@ -270,6 +272,12 @@ func (o *ChainOverrides) apply(cfg *params.ChainConfig) error {
if o.OverrideOsaka != nil {
cfg.OsakaTime = o.OverrideOsaka
}
if o.OverrideBPO1 != nil {
cfg.BPO1Time = o.OverrideBPO1
}
if o.OverrideBPO2 != nil {
cfg.BPO2Time = o.OverrideBPO2
}
if o.OverrideVerkle != nil {
cfg.VerkleTime = o.OverrideVerkle
}
@ -514,6 +522,11 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
if head.BlobGasUsed == nil {
head.BlobGasUsed = new(uint64)
}
} else {
if g.ExcessBlobGas != nil {
log.Warn("Invalid genesis, unexpected ExcessBlobGas set before Cancun, allowing it for testing purposes")
head.ExcessBlobGas = g.ExcessBlobGas
}
}
if conf.IsPrague(num, g.Timestamp) {
head.RequestsHash = &types.EmptyRequestsHash

View file

@ -60,6 +60,7 @@ func (ts *TransitionState) Copy() *TransitionState {
CurrentSlotHash: ts.CurrentSlotHash,
CurrentPreimageOffset: ts.CurrentPreimageOffset,
StorageProcessed: ts.StorageProcessed,
BaseRoot: ts.BaseRoot,
}
if ts.CurrentAccountAddress != nil {
addr := *ts.CurrentAccountAddress

View file

@ -246,7 +246,7 @@ func TestBadBlockStorage(t *testing.T) {
}
for i := 0; i < len(badBlocks)-1; i++ {
if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, badBlocks[i].NumberU64(), i+1, badBlocks[i+1].NumberU64())
}
}
@ -511,7 +511,7 @@ func TestWriteAncientHeaderChain(t *testing.T) {
t.Fatalf("unexpected body returned")
}
if blob := ReadReceiptsRLP(db, header.Hash(), header.Number.Uint64()); len(blob) != 0 {
t.Fatalf("unexpected body returned")
t.Fatalf("unexpected receipts returned")
}
}
}

View file

@ -174,16 +174,3 @@ func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
log.Warn("Failed to write unclean-shutdown marker", "err", err)
}
}
// ReadTransitionStatus retrieves the eth2 transition status from the database
func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(transitionStatusKey)
return data
}
// WriteTransitionStatus stores the eth2 transition status to the database
func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
if err := db.Put(transitionStatusKey, data); err != nil {
log.Crit("Failed to store the eth2 transition status", "err", err)
}
}

View file

@ -150,7 +150,7 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c
if len(blob) == 0 {
return false
}
return crypto.Keccak256Hash(blob) == hash // exists but not match
return crypto.Keccak256Hash(blob) == hash // exist and match
default:
panic(fmt.Sprintf("Unknown scheme %v", scheme))
}
@ -173,7 +173,7 @@ func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash
return nil
}
if crypto.Keccak256Hash(blob) != hash {
return nil // exists but not match
return nil // exist but not match
}
return blob
default:

View file

@ -48,12 +48,16 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
)
defer db.Close()
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < len(data); i++ {
op.AppendRaw("a", uint64(i), data[i])
if err := op.AppendRaw("a", uint64(i), data[i]); err != nil {
return err
}
}
return nil
})
}); err != nil {
t.Fatalf("Failed to write ancient data %v", err)
}
db.TruncateTail(10)
db.TruncateHead(90)
@ -109,12 +113,16 @@ func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
)
defer db.Close()
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), data[i])
if err := op.AppendRaw("a", uint64(i), data[i]); err != nil {
return err
}
}
return nil
})
}); err != nil {
t.Fatalf("Failed to write ancient data %v", err)
}
db.TruncateTail(10)
db.TruncateHead(90)
@ -189,7 +197,9 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
// The ancient write to tables should be aligned
_, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), dataA[i])
if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil {
return err
}
}
return nil
})
@ -200,8 +210,12 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
// Test normal ancient write
size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), dataA[i])
op.AppendRaw("b", uint64(i), dataB[i])
if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil {
return err
}
if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil {
return err
}
}
return nil
})
@ -217,8 +231,12 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
db.TruncateHead(90)
_, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 90; i < 100; i++ {
op.AppendRaw("a", uint64(i), dataA[i])
op.AppendRaw("b", uint64(i), dataB[i])
if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil {
return err
}
if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil {
return err
}
}
return nil
})
@ -227,11 +245,15 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
}
// Write should work after truncating everything
db.TruncateTail(0)
db.TruncateHead(0)
_, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), dataA[i])
op.AppendRaw("b", uint64(i), dataB[i])
if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil {
return err
}
if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil {
return err
}
}
return nil
})
@ -245,14 +267,18 @@ func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) {
defer db.Close()
// We write 100 zero-bytes to the freezer and immediately mutate the slice
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
data := make([]byte, 100)
op.AppendRaw("a", uint64(0), data)
if err := op.AppendRaw("a", uint64(0), data); err != nil {
return err
}
for i := range data {
data[i] = 0xff
}
return nil
})
}); err != nil {
t.Fatalf("Failed to write ancient data %v", err)
}
// Now read it.
data, err := db.Ancient("a", uint64(0))
if err != nil {
@ -275,23 +301,31 @@ func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.R
)
defer db.Close()
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), data[i])
if err := op.AppendRaw("a", uint64(i), data[i]); err != nil {
return err
}
}
return nil
})
}); err != nil {
t.Fatalf("Failed to write ancient data %v", err)
}
db.TruncateTail(10)
db.TruncateHead(90)
// Ancient write should work after resetting
db.Reset()
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i := 0; i < 100; i++ {
op.AppendRaw("a", uint64(i), data[i])
if err := op.AppendRaw("a", uint64(i), data[i]); err != nil {
return err
}
}
return nil
})
}); err != nil {
t.Fatalf("Failed to write ancient data %v", err)
}
})
}

View file

@ -239,7 +239,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
// fails, otherwise it succeeds. In either case, the freezer should be positioned
// at 10 after both operations are done.
if truncateErr != nil {
t.Fatal("concurrent truncate failed:", err)
t.Fatal("concurrent truncate failed:", truncateErr)
}
if !(errors.Is(modifyErr, nil) || errors.Is(modifyErr, errOutOrderInsertion)) {
t.Fatal("wrong error from concurrent modify:", modifyErr)

View file

@ -95,7 +95,7 @@ var (
uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db
// transitionStatusKey tracks the eth2 transition status.
transitionStatusKey = []byte("eth2-transition")
transitionStatusKey = []byte("eth2-transition") // deprecated
// snapSyncStatusFlagKey flags that status of snap sync.
snapSyncStatusFlagKey = []byte("SnapSyncStatus")

View file

@ -149,8 +149,7 @@ func BenchmarkHashing(b *testing.B) {
var got common.Hash
var hasher = sha3.NewLegacyKeccak256()
b.Run("iteratorhashing", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
var hash common.Hash
it, err := rlp.NewListIterator(bodyRlp)
if err != nil {
@ -172,8 +171,7 @@ func BenchmarkHashing(b *testing.B) {
})
var exp common.Hash
b.Run("fullbodyhashing", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
var body types.Body
rlp.DecodeBytes(bodyRlp, &body)
for _, tx := range body.Transactions {
@ -182,8 +180,7 @@ func BenchmarkHashing(b *testing.B) {
}
})
b.Run("fullblockhashing", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
var block types.Block
rlp.DecodeBytes(blockRlp, &block)
for _, tx := range block.Transactions() {

View file

@ -229,8 +229,7 @@ func BenchmarkSearch(b *testing.B) {
layer = fill(layer)
}
key := crypto.Keccak256Hash([]byte{0x13, 0x38})
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
layer.AccountRLP(key)
}
}
@ -269,8 +268,7 @@ func BenchmarkSearchSlot(b *testing.B) {
for i := 0; i < 128; i++ {
layer = fill(layer)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
layer.Storage(accountKey, storageKey)
}
}
@ -300,9 +298,7 @@ func BenchmarkFlatten(b *testing.B) {
}
return newDiffLayer(parent, common.Hash{}, accounts, storage)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
for b.Loop() {
var layer snapshot
layer = emptyLayer()
for i := 1; i < 128; i++ {
@ -352,9 +348,7 @@ func BenchmarkJournal(b *testing.B) {
for i := 1; i < 128; i++ {
layer = fill(layer)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
layer.Journal(new(bytes.Buffer))
}
}

View file

@ -928,7 +928,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
b.Run("binary iterator keys", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
got := 0
it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() {
@ -940,7 +940,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
}
})
b.Run("binary iterator values", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
got := 0
it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() {
@ -953,7 +953,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
}
})
b.Run("fast iterator keys", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
defer it.Release()
@ -967,7 +967,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
}
})
b.Run("fast iterator values", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
defer it.Release()
@ -1025,7 +1025,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
b.Run("binary iterator (keys)", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
got := 0
it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() {
@ -1037,7 +1037,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
}
})
b.Run("binary iterator (values)", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
got := 0
it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{})
for it.Next() {
@ -1051,7 +1051,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
}
})
b.Run("fast iterator (keys)", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
defer it.Release()
@ -1065,7 +1065,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
}
})
b.Run("fast iterator (values)", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{})
defer it.Release()

View file

@ -333,7 +333,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue
}
if !exist {
log.Error("Storage slot is not found in pending area", s.address, "slot", key)
log.Error("Storage slot is not found in pending area", "address", s.address, "slot", key)
continue
}
if (value != common.Hash{}) {

View file

@ -25,7 +25,7 @@ import (
func BenchmarkCutOriginal(b *testing.B) {
value := common.HexToHash("0x01")
for i := 0; i < b.N; i++ {
for b.Loop() {
bytes.TrimLeft(value[:], "\x00")
}
}
@ -33,14 +33,14 @@ func BenchmarkCutOriginal(b *testing.B) {
func BenchmarkCutsetterFn(b *testing.B) {
value := common.HexToHash("0x01")
cutSetFn := func(r rune) bool { return r == 0 }
for i := 0; i < b.N; i++ {
for b.Loop() {
bytes.TrimLeftFunc(value[:], cutSetFn)
}
}
func BenchmarkCutCustomTrim(b *testing.B) {
value := common.HexToHash("0x01")
for i := 0; i < b.N; i++ {
for b.Loop() {
common.TrimLeftZeroes(value[:])
}
}

638
core/state/state_sizer.go Normal file
View file

@ -0,0 +1,638 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"container/heap"
"errors"
"fmt"
"maps"
"runtime"
"slices"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/triedb"
"golang.org/x/sync/errgroup"
)
const (
statEvictThreshold = 128 // the depth of statistic to be preserved
)
// Database key scheme for states.
var (
accountKeySize = int64(len(rawdb.SnapshotAccountPrefix) + common.HashLength)
storageKeySize = int64(len(rawdb.SnapshotStoragePrefix) + common.HashLength*2)
accountTrienodePrefixSize = int64(len(rawdb.TrieNodeAccountPrefix))
storageTrienodePrefixSize = int64(len(rawdb.TrieNodeStoragePrefix) + common.HashLength)
codeKeySize = int64(len(rawdb.CodePrefix) + common.HashLength)
)
// SizeStats represents either the current state size statistics or the size
// differences resulting from a state transition.
type SizeStats struct {
StateRoot common.Hash // State root hash at the time of measurement
BlockNumber uint64 // Associated block number at the time of measurement
Accounts int64 // Total number of accounts in the state
AccountBytes int64 // Total storage size used by all account data (in bytes)
Storages int64 // Total number of storage slots across all accounts
StorageBytes int64 // Total storage size used by all storage slot data (in bytes)
AccountTrienodes int64 // Total number of account trie nodes in the state
AccountTrienodeBytes int64 // Total storage size occupied by account trie nodes (in bytes)
StorageTrienodes int64 // Total number of storage trie nodes in the state
StorageTrienodeBytes int64 // Total storage size occupied by storage trie nodes (in bytes)
ContractCodes int64 // Total number of contract codes in the state
ContractCodeBytes int64 // Total size of all contract code (in bytes)
}
func (s SizeStats) String() string {
return fmt.Sprintf("Accounts: %d(%s), Storages: %d(%s), AccountTrienodes: %d(%s), StorageTrienodes: %d(%s), Codes: %d(%s)",
s.Accounts, common.StorageSize(s.AccountBytes),
s.Storages, common.StorageSize(s.StorageBytes),
s.AccountTrienodes, common.StorageSize(s.AccountTrienodeBytes),
s.StorageTrienodes, common.StorageSize(s.StorageTrienodeBytes),
s.ContractCodes, common.StorageSize(s.ContractCodeBytes),
)
}
// add applies the given state diffs and produces a new version of the statistics.
func (s SizeStats) add(diff SizeStats) SizeStats {
s.StateRoot = diff.StateRoot
s.BlockNumber = diff.BlockNumber
s.Accounts += diff.Accounts
s.AccountBytes += diff.AccountBytes
s.Storages += diff.Storages
s.StorageBytes += diff.StorageBytes
s.AccountTrienodes += diff.AccountTrienodes
s.AccountTrienodeBytes += diff.AccountTrienodeBytes
s.StorageTrienodes += diff.StorageTrienodes
s.StorageTrienodeBytes += diff.StorageTrienodeBytes
s.ContractCodes += diff.ContractCodes
s.ContractCodeBytes += diff.ContractCodeBytes
return s
}
// calSizeStats measures the state size changes of the provided state update.
func calSizeStats(update *stateUpdate) (SizeStats, error) {
stats := SizeStats{
BlockNumber: update.blockNumber,
StateRoot: update.root,
}
// Measure the account changes
for addr, oldValue := range update.accountsOrigin {
addrHash := crypto.Keccak256Hash(addr.Bytes())
newValue, exists := update.accounts[addrHash]
if !exists {
return SizeStats{}, fmt.Errorf("account %x not found", addr)
}
oldLen, newLen := len(oldValue), len(newValue)
switch {
case oldLen > 0 && newLen == 0:
// Account deletion
stats.Accounts -= 1
stats.AccountBytes -= accountKeySize + int64(oldLen)
case oldLen == 0 && newLen > 0:
// Account creation
stats.Accounts += 1
stats.AccountBytes += accountKeySize + int64(newLen)
default:
// Account update
stats.AccountBytes += int64(newLen - oldLen)
}
}
// Measure storage changes
for addr, slots := range update.storagesOrigin {
addrHash := crypto.Keccak256Hash(addr.Bytes())
subset, exists := update.storages[addrHash]
if !exists {
return SizeStats{}, fmt.Errorf("storage %x not found", addr)
}
for key, oldValue := range slots {
var (
exists bool
newValue []byte
)
if update.rawStorageKey {
newValue, exists = subset[crypto.Keccak256Hash(key.Bytes())]
} else {
newValue, exists = subset[key]
}
if !exists {
return SizeStats{}, fmt.Errorf("storage slot %x-%x not found", addr, key)
}
oldLen, newLen := len(oldValue), len(newValue)
switch {
case oldLen > 0 && newLen == 0:
// Storage deletion
stats.Storages -= 1
stats.StorageBytes -= storageKeySize + int64(oldLen)
case oldLen == 0 && newLen > 0:
// Storage creation
stats.Storages += 1
stats.StorageBytes += storageKeySize + int64(newLen)
default:
// Storage update
stats.StorageBytes += int64(newLen - oldLen)
}
}
}
// Measure trienode changes
for owner, subset := range update.nodes.Sets {
var (
keyPrefix int64
isAccount = owner == (common.Hash{})
)
if isAccount {
keyPrefix = accountTrienodePrefixSize
} else {
keyPrefix = storageTrienodePrefixSize
}
// Iterate over Origins since every modified node has an origin entry
for path, oldNode := range subset.Origins {
newNode, exists := subset.Nodes[path]
if !exists {
return SizeStats{}, fmt.Errorf("node %x-%v not found", owner, path)
}
keySize := keyPrefix + int64(len(path))
switch {
case len(oldNode) > 0 && len(newNode.Blob) == 0:
// Node deletion
if isAccount {
stats.AccountTrienodes -= 1
stats.AccountTrienodeBytes -= keySize + int64(len(oldNode))
} else {
stats.StorageTrienodes -= 1
stats.StorageTrienodeBytes -= keySize + int64(len(oldNode))
}
case len(oldNode) == 0 && len(newNode.Blob) > 0:
// Node creation
if isAccount {
stats.AccountTrienodes += 1
stats.AccountTrienodeBytes += keySize + int64(len(newNode.Blob))
} else {
stats.StorageTrienodes += 1
stats.StorageTrienodeBytes += keySize + int64(len(newNode.Blob))
}
default:
// Node update
if isAccount {
stats.AccountTrienodeBytes += int64(len(newNode.Blob) - len(oldNode))
} else {
stats.StorageTrienodeBytes += int64(len(newNode.Blob) - len(oldNode))
}
}
}
}
// Measure code changes. Note that the reported contract code size may be slightly
// inaccurate due to database deduplication (code is stored by its hash). However,
// this deviation is negligible and acceptable for measurement purposes.
for _, code := range update.codes {
stats.ContractCodes += 1
stats.ContractCodeBytes += codeKeySize + int64(len(code.blob))
}
return stats, nil
}
type stateSizeQuery struct {
root *common.Hash // nil means latest
err error // non-nil if the state size is not yet initialized
result chan *SizeStats // nil means the state is unknown
}
// SizeTracker handles the state size initialization and tracks of state size metrics.
type SizeTracker struct {
db ethdb.KeyValueStore
triedb *triedb.Database
abort chan struct{}
aborted chan struct{}
updateCh chan *stateUpdate
queryCh chan *stateSizeQuery
}
// NewSizeTracker creates a new state size tracker and starts it automatically
func NewSizeTracker(db ethdb.KeyValueStore, triedb *triedb.Database) (*SizeTracker, error) {
if triedb.Scheme() != rawdb.PathScheme {
return nil, errors.New("state size tracker is not compatible with hash mode")
}
t := &SizeTracker{
db: db,
triedb: triedb,
abort: make(chan struct{}),
aborted: make(chan struct{}),
updateCh: make(chan *stateUpdate),
queryCh: make(chan *stateSizeQuery),
}
go t.run()
return t, nil
}
func (t *SizeTracker) Stop() {
close(t.abort)
<-t.aborted
}
// sizeStatsHeap is a heap.Interface implementation over statesize statistics for
// retrieving the oldest statistics for eviction.
type sizeStatsHeap []SizeStats
func (h sizeStatsHeap) Len() int { return len(h) }
func (h sizeStatsHeap) Less(i, j int) bool { return h[i].BlockNumber < h[j].BlockNumber }
func (h sizeStatsHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *sizeStatsHeap) Push(x any) {
*h = append(*h, x.(SizeStats))
}
func (h *sizeStatsHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// run performs the state size initialization and handles updates
func (t *SizeTracker) run() {
defer close(t.aborted)
var last common.Hash
stats, err := t.init() // launch background thread for state size init
if err != nil {
return
}
h := sizeStatsHeap(slices.Collect(maps.Values(stats)))
heap.Init(&h)
for {
select {
case u := <-t.updateCh:
base, found := stats[u.originRoot]
if !found {
log.Debug("Ignored the state size without parent", "parent", u.originRoot, "root", u.root, "number", u.blockNumber)
continue
}
diff, err := calSizeStats(u)
if err != nil {
continue
}
stat := base.add(diff)
stats[u.root] = stat
last = u.root
heap.Push(&h, stats[u.root])
for u.blockNumber-h[0].BlockNumber > statEvictThreshold {
delete(stats, h[0].StateRoot)
heap.Pop(&h)
}
log.Debug("Update state size", "number", stat.BlockNumber, "root", stat.StateRoot, "stat", stat)
case r := <-t.queryCh:
var root common.Hash
if r.root != nil {
root = *r.root
} else {
root = last
}
if s, ok := stats[root]; ok {
r.result <- &s
} else {
r.result <- nil
}
case <-t.abort:
return
}
}
}
type buildResult struct {
stat SizeStats
root common.Hash
blockNumber uint64
elapsed time.Duration
err error
}
func (t *SizeTracker) init() (map[common.Hash]SizeStats, error) {
// Wait for snapshot completion and then init
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
wait:
for {
select {
case <-ticker.C:
if t.triedb.SnapshotCompleted() {
break wait
}
case <-t.updateCh:
continue
case r := <-t.queryCh:
r.err = errors.New("state size is not initialized yet")
r.result <- nil
case <-t.abort:
return nil, errors.New("size tracker closed")
}
}
var (
updates = make(map[common.Hash]*stateUpdate)
children = make(map[common.Hash][]common.Hash)
done chan buildResult
)
for {
select {
case u := <-t.updateCh:
updates[u.root] = u
children[u.originRoot] = append(children[u.originRoot], u.root)
log.Debug("Received state update", "root", u.root, "blockNumber", u.blockNumber)
case r := <-t.queryCh:
r.err = errors.New("state size is not initialized yet")
r.result <- nil
case <-ticker.C:
// Only check timer if build hasn't started yet
if done != nil {
continue
}
root := rawdb.ReadSnapshotRoot(t.db)
if root == (common.Hash{}) {
continue
}
entry, exists := updates[root]
if !exists {
continue
}
done = make(chan buildResult)
go t.build(entry.root, entry.blockNumber, done)
log.Info("Measuring persistent state size", "root", root.Hex(), "number", entry.blockNumber)
case result := <-done:
if result.err != nil {
return nil, result.err
}
var (
stats = make(map[common.Hash]SizeStats)
apply func(root common.Hash, stat SizeStats) error
)
apply = func(root common.Hash, base SizeStats) error {
for _, child := range children[root] {
entry, ok := updates[child]
if !ok {
return fmt.Errorf("the state update is not found, %x", child)
}
diff, err := calSizeStats(entry)
if err != nil {
return err
}
stats[child] = base.add(diff)
if err := apply(child, stats[child]); err != nil {
return err
}
}
return nil
}
if err := apply(result.root, result.stat); err != nil {
return nil, err
}
// Set initial latest stats
stats[result.root] = result.stat
log.Info("Measured persistent state size", "root", result.root, "number", result.blockNumber, "stat", result.stat, "elapsed", common.PrettyDuration(result.elapsed))
return stats, nil
case <-t.abort:
return nil, errors.New("size tracker closed")
}
}
}
func (t *SizeTracker) build(root common.Hash, blockNumber uint64, done chan buildResult) {
// Metrics will be directly updated by each goroutine
var (
accounts, accountBytes int64
storages, storageBytes int64
codes, codeBytes int64
accountTrienodes, accountTrienodeBytes int64
storageTrienodes, storageTrienodeBytes int64
group errgroup.Group
start = time.Now()
)
// Start all table iterations concurrently with direct metric updates
group.Go(func() error {
count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotAccountPrefix, "account")
if err != nil {
return err
}
accounts, accountBytes = count, bytes
return nil
})
group.Go(func() error {
count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotStoragePrefix, "storage")
if err != nil {
return err
}
storages, storageBytes = count, bytes
return nil
})
group.Go(func() error {
count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeAccountPrefix, "accountnode")
if err != nil {
return err
}
accountTrienodes, accountTrienodeBytes = count, bytes
return nil
})
group.Go(func() error {
count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeStoragePrefix, "storagenode")
if err != nil {
return err
}
storageTrienodes, storageTrienodeBytes = count, bytes
return nil
})
group.Go(func() error {
count, bytes, err := t.iterateTable(t.abort, rawdb.CodePrefix, "contractcode")
if err != nil {
return err
}
codes, codeBytes = count, bytes
return nil
})
// Wait for all goroutines to complete
if err := group.Wait(); err != nil {
done <- buildResult{err: err}
} else {
stat := SizeStats{
StateRoot: root,
BlockNumber: blockNumber,
Accounts: accounts,
AccountBytes: accountBytes,
Storages: storages,
StorageBytes: storageBytes,
AccountTrienodes: accountTrienodes,
AccountTrienodeBytes: accountTrienodeBytes,
StorageTrienodes: storageTrienodes,
StorageTrienodeBytes: storageTrienodeBytes,
ContractCodes: codes,
ContractCodeBytes: codeBytes,
}
done <- buildResult{
root: root,
blockNumber: blockNumber,
stat: stat,
elapsed: time.Since(start),
}
}
}
// iterateTable performs iteration over a specific table and returns the results.
func (t *SizeTracker) iterateTable(closed chan struct{}, prefix []byte, name string) (int64, int64, error) {
var (
start = time.Now()
logged = time.Now()
count, bytes int64
)
iter := t.db.NewIterator(prefix, nil)
defer iter.Release()
log.Debug("Iterating state", "category", name)
for iter.Next() {
count++
bytes += int64(len(iter.Key()) + len(iter.Value()))
if time.Since(logged) > time.Second*8 {
logged = time.Now()
select {
case <-closed:
log.Debug("State iteration cancelled", "category", name)
return 0, 0, errors.New("size tracker closed")
default:
log.Debug("Iterating state", "category", name, "count", count, "size", common.StorageSize(bytes))
}
}
}
// Check for iterator errors
if err := iter.Error(); err != nil {
log.Error("Iterator error", "category", name, "err", err)
return 0, 0, err
}
log.Debug("Finished state iteration", "category", name, "count", count, "size", common.StorageSize(bytes), "elapsed", common.PrettyDuration(time.Since(start)))
return count, bytes, nil
}
// iterateTableParallel performs parallel iteration over a table by splitting into
// hex ranges. For storage tables, it splits on the first byte of the account hash
// (after the prefix).
func (t *SizeTracker) iterateTableParallel(closed chan struct{}, prefix []byte, name string) (int64, int64, error) {
var (
totalCount int64
totalBytes int64
start = time.Now()
workers = runtime.NumCPU()
group errgroup.Group
mu sync.Mutex
)
group.SetLimit(workers)
log.Debug("Starting parallel state iteration", "category", name, "workers", workers)
if len(prefix) > 0 {
if blob, err := t.db.Get(prefix); err == nil && len(blob) > 0 {
// If there's a direct hit on the prefix, include it in the stats
totalCount = 1
totalBytes = int64(len(prefix) + len(blob))
}
}
for i := 0; i < 256; i++ {
h := byte(i)
group.Go(func() error {
count, bytes, err := t.iterateTable(closed, slices.Concat(prefix, []byte{h}), fmt.Sprintf("%s-%02x", name, h))
if err != nil {
return err
}
mu.Lock()
totalCount += count
totalBytes += bytes
mu.Unlock()
return nil
})
}
if err := group.Wait(); err != nil {
return 0, 0, err
}
log.Debug("Finished parallel state iteration", "category", name, "count", totalCount, "size", common.StorageSize(totalBytes), "elapsed", common.PrettyDuration(time.Since(start)))
return totalCount, totalBytes, nil
}
// Notify is an async method used to send the state update to the size tracker.
// It ignores empty updates (where no state changes occurred).
// If the channel is full, it drops the update to avoid blocking.
func (t *SizeTracker) Notify(update *stateUpdate) {
if update == nil || update.empty() {
return
}
select {
case t.updateCh <- update:
case <-t.abort:
return
}
}
// Query returns the state size specified by the root, or nil if not available.
// If the root is nil, query the size of latest chain head;
// If the root is non-nil, query the size of the specified state;
func (t *SizeTracker) Query(root *common.Hash) (*SizeStats, error) {
r := &stateSizeQuery{
root: root,
result: make(chan *SizeStats, 1),
}
select {
case <-t.aborted:
return nil, errors.New("state sizer has been closed")
case t.queryCh <- r:
return <-r.result, r.err
}
}

View file

@ -0,0 +1,231 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
func TestSizeTracker(t *testing.T) {
db := rawdb.NewMemoryDatabase()
defer db.Close()
tdb := triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
sdb := NewDatabase(tdb, nil)
// Generate 50 blocks to establish a baseline
baselineBlockNum := uint64(50)
currentRoot := types.EmptyRootHash
addr1 := common.BytesToAddress([]byte{1, 0, 0, 1})
addr2 := common.BytesToAddress([]byte{1, 0, 0, 2})
addr3 := common.BytesToAddress([]byte{1, 0, 0, 3})
// Create initial state with fixed accounts
state, _ := New(currentRoot, sdb)
state.AddBalance(addr1, uint256.NewInt(1000), tracing.BalanceChangeUnspecified)
state.SetNonce(addr1, 1, tracing.NonceChangeUnspecified)
state.SetState(addr1, common.HexToHash("0x1111"), common.HexToHash("0xaaaa"))
state.SetState(addr1, common.HexToHash("0x2222"), common.HexToHash("0xbbbb"))
state.AddBalance(addr2, uint256.NewInt(2000), tracing.BalanceChangeUnspecified)
state.SetNonce(addr2, 2, tracing.NonceChangeUnspecified)
state.SetCode(addr2, []byte{0x60, 0x80, 0x60, 0x40, 0x52}, tracing.CodeChangeUnspecified)
state.AddBalance(addr3, uint256.NewInt(3000), tracing.BalanceChangeUnspecified)
state.SetNonce(addr3, 3, tracing.NonceChangeUnspecified)
currentRoot, _, err := state.CommitWithUpdate(1, true, false)
if err != nil {
t.Fatalf("Failed to commit initial state: %v", err)
}
if err := tdb.Commit(currentRoot, false); err != nil {
t.Fatalf("Failed to commit initial trie: %v", err)
}
for i := 1; i < 50; i++ { // blocks 2-50
blockNum := uint64(i + 1)
newState, err := New(currentRoot, sdb)
if err != nil {
t.Fatalf("Failed to create new state at block %d: %v", blockNum, err)
}
testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig())
newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified)
newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified)
if i%2 == 0 {
newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig()))
}
if i%3 == 0 {
newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
}
root, _, err := newState.CommitWithUpdate(blockNum, true, false)
if err != nil {
t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
}
if err := tdb.Commit(root, false); err != nil {
t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err)
}
currentRoot = root
}
baselineRoot := currentRoot
// Wait for snapshot completion
for !tdb.SnapshotCompleted() {
time.Sleep(100 * time.Millisecond)
}
// Calculate baseline from the intermediate persisted state
baselineTracker := &SizeTracker{
db: db,
triedb: tdb,
abort: make(chan struct{}),
}
done := make(chan buildResult)
go baselineTracker.build(baselineRoot, baselineBlockNum, done)
var baselineResult buildResult
select {
case baselineResult = <-done:
if baselineResult.err != nil {
t.Fatalf("Failed to get baseline stats: %v", baselineResult.err)
}
case <-time.After(30 * time.Second):
t.Fatal("Timeout waiting for baseline stats")
}
baseline := baselineResult.stat
// Now start the tracker and notify it of updates that happen AFTER the baseline
tracker, err := NewSizeTracker(db, tdb)
if err != nil {
t.Fatalf("Failed to create size tracker: %v", err)
}
defer tracker.Stop()
var trackedUpdates []SizeStats
currentRoot = baselineRoot
// Generate additional blocks beyond the baseline and track them
for i := 49; i < 130; i++ { // blocks 51-132
blockNum := uint64(i + 2)
newState, err := New(currentRoot, sdb)
if err != nil {
t.Fatalf("Failed to create new state at block %d: %v", blockNum, err)
}
testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig())
newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified)
newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified)
if i%2 == 0 {
newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig()))
}
if i%3 == 0 {
newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
}
root, update, err := newState.CommitWithUpdate(blockNum, true, false)
if err != nil {
t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
}
if err := tdb.Commit(root, false); err != nil {
t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err)
}
diff, err := calSizeStats(update)
if err != nil {
t.Fatalf("Failed to calculate size stats for block %d: %v", blockNum, err)
}
trackedUpdates = append(trackedUpdates, diff)
tracker.Notify(update)
currentRoot = root
}
finalRoot := rawdb.ReadSnapshotRoot(db)
// Ensure all commits are flushed to disk
if err := tdb.Close(); err != nil {
t.Fatalf("Failed to close triedb: %v", err)
}
// Reopen the database to simulate a restart
tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults})
defer tdb.Close()
finalTracker := &SizeTracker{
db: db,
triedb: tdb,
abort: make(chan struct{}),
}
finalDone := make(chan buildResult)
go finalTracker.build(finalRoot, uint64(132), finalDone)
var result buildResult
select {
case result = <-finalDone:
if result.err != nil {
t.Fatalf("Failed to build final stats: %v", result.err)
}
case <-time.After(30 * time.Second):
t.Fatal("Timeout waiting for final stats")
}
actualStats := result.stat
expectedStats := baseline
for _, diff := range trackedUpdates {
expectedStats = expectedStats.add(diff)
}
// The final measured stats should match our calculated expected stats exactly
if actualStats.Accounts != expectedStats.Accounts {
t.Errorf("Account count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Accounts, expectedStats.Accounts, actualStats.Accounts)
}
if actualStats.AccountBytes != expectedStats.AccountBytes {
t.Errorf("Account bytes mismatch: expected %d, got %d", expectedStats.AccountBytes, actualStats.AccountBytes)
}
if actualStats.Storages != expectedStats.Storages {
t.Errorf("Storage count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Storages, expectedStats.Storages, actualStats.Storages)
}
if actualStats.StorageBytes != expectedStats.StorageBytes {
t.Errorf("Storage bytes mismatch: expected %d, got %d", expectedStats.StorageBytes, actualStats.StorageBytes)
}
if actualStats.ContractCodes != expectedStats.ContractCodes {
t.Errorf("Contract code count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.ContractCodes, expectedStats.ContractCodes, actualStats.ContractCodes)
}
if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes {
t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes)
}
// TODO: failed on github actions, need to investigate
// if actualStats.AccountTrienodes != expectedStats.AccountTrienodes {
// t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes)
// }
// if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes {
// t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes)
// }
if actualStats.StorageTrienodes != expectedStats.StorageTrienodes {
t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes)
}
if actualStats.StorageTrienodeBytes != expectedStats.StorageTrienodeBytes {
t.Errorf("Storage trie node bytes mismatch: expected %d, got %d", expectedStats.StorageTrienodeBytes, actualStats.StorageTrienodeBytes)
}
}

View file

@ -458,7 +458,7 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64, reason tracing.Non
}
}
func (s *StateDB) SetCode(addr common.Address, code []byte) (prev []byte) {
func (s *StateDB) SetCode(addr common.Address, code []byte, reason tracing.CodeChangeReason) (prev []byte) {
stateObject := s.getOrNewStateObject(addr)
if stateObject != nil {
return stateObject.SetCode(crypto.Keccak256Hash(code), code)
@ -1155,7 +1155,7 @@ func (s *StateDB) GetTrie() Trie {
// commit gathers the state mutations accumulated along with the associated
// trie changes, resetting all internal flags with the new state as the base.
func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNumber uint64) (*stateUpdate, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@ -1307,13 +1307,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
origin := s.originalRoot
s.originalRoot = root
return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block)
if err != nil {
return nil, err
}
@ -1378,6 +1378,16 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping
return ret.root, nil
}
// CommitWithUpdate writes the state mutations and returns both the root hash and the state update.
// This is useful for tracking state changes at the blockchain level.
func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
if err != nil {
return common.Hash{}, nil, err
}
return ret.root, ret, nil
}
// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
//

View file

@ -89,7 +89,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
s.SetCode(addr, code)
s.SetCode(addr, code, tracing.CodeChangeUnspecified)
},
args: make([]int64, 2),
},

View file

@ -189,14 +189,20 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr
}
}
func (s *hookedStateDB) SetCode(address common.Address, code []byte) []byte {
prev := s.inner.SetCode(address, code)
if s.hooks.OnCodeChange != nil {
func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte {
prev := s.inner.SetCode(address, code, reason)
if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil {
prevHash := types.EmptyCodeHash
if len(prev) != 0 {
prevHash = crypto.Keccak256Hash(prev)
}
s.hooks.OnCodeChange(address, prevHash, prev, crypto.Keccak256Hash(code), code)
codeHash := crypto.Keccak256Hash(code)
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
}
}
return prev
}
@ -224,8 +230,12 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int {
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
}
if s.hooks.OnCodeChange != nil && len(prevCode) > 0 {
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
if len(prevCode) > 0 {
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
}
}
return prev
@ -242,12 +252,16 @@ func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, b
prev, changed := s.inner.SelfDestruct6780(address)
if s.hooks.OnBalanceChange != nil && changed && !prev.IsZero() {
if s.hooks.OnBalanceChange != nil && !prev.IsZero() {
s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct)
}
if s.hooks.OnCodeChange != nil && changed && len(prevCode) > 0 {
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
if changed && len(prevCode) > 0 {
if s.hooks.OnCodeChangeV2 != nil {
s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct)
} else if s.hooks.OnCodeChange != nil {
s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil)
}
}
return prev, changed

View file

@ -114,7 +114,7 @@ func TestHooks(t *testing.T) {
sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer)
sdb.SetNonce(common.Address{0xaa}, 1337, tracing.NonceChangeGenesis)
sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37})
sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}, tracing.CodeChangeUnspecified)
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11"))
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22"))
sdb.SetTransientState(common.Address{0xaa}, common.HexToHash("0x02"), common.HexToHash("0x01"))

View file

@ -65,7 +65,7 @@ func TestUpdateLeaks(t *testing.T) {
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
}
if i%3 == 0 {
state.SetCode(addr, []byte{i, i, i, i, i})
state.SetCode(addr, []byte{i, i, i, i, i}, tracing.CodeChangeUnspecified)
}
}
@ -101,7 +101,7 @@ func TestIntermediateLeaks(t *testing.T) {
state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
}
if i%3 == 0 {
state.SetCode(addr, []byte{i, i, i, i, i, tweak})
state.SetCode(addr, []byte{i, i, i, i, i, tweak}, tracing.CodeChangeUnspecified)
}
}
@ -374,7 +374,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
s.SetCode(addr, code)
s.SetCode(addr, code, tracing.CodeChangeUnspecified)
},
args: make([]int64, 2),
},
@ -403,7 +403,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
// which would cause a difference in state when unrolling
// the journal. (CreateContact assumes created was false prior to
// invocation, and the journal rollback sets it to false).
s.SetCode(addr, []byte{1})
s.SetCode(addr, []byte{1}, tracing.CodeChangeUnspecified)
}
},
},
@ -731,7 +731,7 @@ func TestCopyCommitCopy(t *testing.T) {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@ -772,7 +772,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
// Commit state, ensure states can be loaded from disk
root, _ := state.Commit(0, false, false)
@ -804,7 +804,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@ -859,7 +859,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
}
@ -874,7 +874,7 @@ func TestCommitCopy(t *testing.T) {
sval1, sval2 := common.HexToHash("b1"), common.HexToHash("b2")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey1, sval1) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@ -912,10 +912,10 @@ func TestCommitCopy(t *testing.T) {
}
// Slots cached in the stateDB, available after commit
if val := copied.GetState(addr, skey2); val != sval2 {
t.Fatalf("unexpected storage slot: have %x", sval1)
t.Fatalf("unexpected storage slot: have %x, want %x", val, sval2)
}
if val := copied.GetCommittedState(addr, skey2); val != sval2 {
t.Fatalf("unexpected storage slot: have %x", val)
t.Fatalf("unexpected storage slot: have %x, want %x", val, sval2)
}
}
@ -987,10 +987,10 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
addr := common.BytesToAddress([]byte("so"))
{
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
state.SetCode(addr, []byte{1, 2, 3})
state.SetCode(addr, []byte{1, 2, 3}, tracing.CodeChangeUnspecified)
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
state.SetCode(a2, []byte{1, 2, 4})
state.SetCode(a2, []byte{1, 2, 4}, tracing.CodeChangeUnspecified)
root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush

View file

@ -64,8 +64,10 @@ type accountUpdate struct {
// execution. It contains information about mutated contract codes, accounts,
// and storage slots, along with their original values.
type stateUpdate struct {
originRoot common.Hash // hash of the state before applying mutation
root common.Hash // hash of the state after applying mutation
originRoot common.Hash // hash of the state before applying mutation
root common.Hash // hash of the state after applying mutation
blockNumber uint64 // Associated block number
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
@ -95,7 +97,7 @@ func (sc *stateUpdate) empty() bool {
//
// rawStorageKey is a flag indicating whether to use the raw storage slot key or
// the hash of the slot key for constructing state update object.
func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, blockNumber uint64, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var (
accounts = make(map[common.Hash][]byte)
accountsOrigin = make(map[common.Address][]byte)
@ -164,6 +166,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
return &stateUpdate{
originRoot: originRoot,
root: root,
blockNumber: blockNumber,
accounts: accounts,
accountsOrigin: accountsOrigin,
storages: storages,

View file

@ -39,7 +39,7 @@ func filledStateDB() *StateDB {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
for i := 0; i < 100; i++ {
sk := common.BigToHash(big.NewInt(int64(i)))
@ -81,7 +81,7 @@ func TestVerklePrefetcher(t *testing.T) {
sval := testrand.Hash()
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
root, _ := state.Commit(0, true, false)

View file

@ -164,8 +164,12 @@ type Message struct {
// or the state prefetching.
SkipNonceChecks bool
// When SkipFromEOACheck is true, the message sender is not checked to be an EOA.
SkipFromEOACheck bool
// When set, the message is not treated as a transaction, and certain
// transaction-specific checks are skipped:
//
// - From is not verified to be an EOA
// - GasLimit is not checked against the protocol defined tx gaslimit
SkipTransactionChecks bool
}
// TransactionToMessage converts a transaction into a Message.
@ -182,7 +186,7 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
AccessList: tx.AccessList(),
SetCodeAuthorizations: tx.SetCodeAuthorizations(),
SkipNonceChecks: false,
SkipFromEOACheck: false,
SkipTransactionChecks: false,
BlobHashes: tx.BlobHashes(),
BlobGasFeeCap: tx.BlobGasFeeCap(),
}
@ -320,7 +324,12 @@ func (st *stateTransition) preCheck() error {
msg.From.Hex(), stNonce)
}
}
if !msg.SkipFromEOACheck {
isOsaka := st.evm.ChainConfig().IsOsaka(st.evm.Context.BlockNumber, st.evm.Context.Time)
if !msg.SkipTransactionChecks {
// Verify tx gas limit does not exceed EIP-7825 cap.
if isOsaka && msg.GasLimit > params.MaxTxGas {
return fmt.Errorf("%w (cap: %d, tx: %d)", ErrGasLimitTooHigh, params.MaxTxGas, msg.GasLimit)
}
// Make sure the sender is an EOA
code := st.state.GetCode(msg.From)
_, delegated := types.ParseDelegation(code)
@ -354,7 +363,6 @@ func (st *stateTransition) preCheck() error {
}
}
// Check the blob version validity
isOsaka := st.evm.ChainConfig().IsOsaka(st.evm.Context.BlockNumber, st.evm.Context.Time)
if msg.BlobHashes != nil {
// The to field of a blob tx type is mandatory, and a `BlobTx` transaction internally
// has it as a non-nillable value, so any msg derived from blob transaction has it non-nil.
@ -398,10 +406,6 @@ func (st *stateTransition) preCheck() error {
return fmt.Errorf("%w (sender %v)", ErrEmptyAuthList, msg.From)
}
}
// Verify tx gas limit does not exceed EIP-7825 cap.
if isOsaka && msg.GasLimit > params.MaxTxGas {
return fmt.Errorf("%w (cap: %d, tx: %d)", ErrGasLimitTooHigh, params.MaxTxGas, msg.GasLimit)
}
return st.buyGas()
}
@ -617,12 +621,12 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization)
st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization)
if auth.Address == (common.Address{}) {
// Delegation to zero address means clear.
st.state.SetCode(authority, nil)
st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear)
return nil
}
// Otherwise install delegation to auth.Address.
st.state.SetCode(authority, types.AddressToDelegation(auth.Address))
st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization)
return nil
}

View file

@ -19,20 +19,21 @@ package stateless
import (
"io"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// toExtWitness converts our internal witness representation to the consensus one.
func (w *Witness) toExtWitness() *extWitness {
ext := &extWitness{
// ToExtWitness converts our internal witness representation to the consensus one.
func (w *Witness) ToExtWitness() *ExtWitness {
ext := &ExtWitness{
Headers: w.Headers,
}
ext.Codes = make([][]byte, 0, len(w.Codes))
ext.Codes = make([]hexutil.Bytes, 0, len(w.Codes))
for code := range w.Codes {
ext.Codes = append(ext.Codes, []byte(code))
}
ext.State = make([][]byte, 0, len(w.State))
ext.State = make([]hexutil.Bytes, 0, len(w.State))
for node := range w.State {
ext.State = append(ext.State, []byte(node))
}
@ -40,7 +41,7 @@ func (w *Witness) toExtWitness() *extWitness {
}
// fromExtWitness converts the consensus witness format into our internal one.
func (w *Witness) fromExtWitness(ext *extWitness) error {
func (w *Witness) fromExtWitness(ext *ExtWitness) error {
w.Headers = ext.Headers
w.Codes = make(map[string]struct{}, len(ext.Codes))
@ -56,21 +57,22 @@ func (w *Witness) fromExtWitness(ext *extWitness) error {
// EncodeRLP serializes a witness as RLP.
func (w *Witness) EncodeRLP(wr io.Writer) error {
return rlp.Encode(wr, w.toExtWitness())
return rlp.Encode(wr, w.ToExtWitness())
}
// DecodeRLP decodes a witness from RLP.
func (w *Witness) DecodeRLP(s *rlp.Stream) error {
var ext extWitness
var ext ExtWitness
if err := s.Decode(&ext); err != nil {
return err
}
return w.fromExtWitness(&ext)
}
// extWitness is a witness RLP encoding for transferring across clients.
type extWitness struct {
Headers []*types.Header
Codes [][]byte
State [][]byte
// ExtWitness is a witness RLP encoding for transferring across clients.
type ExtWitness struct {
Headers []*types.Header `json:"headers"`
Codes []hexutil.Bytes `json:"codes"`
State []hexutil.Bytes `json:"state"`
Keys []hexutil.Bytes `json:"keys"`
}

View file

@ -17,76 +17,37 @@
package stateless
import (
"encoding/json"
"maps"
"slices"
"sort"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
var (
accountTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/account/depth/avg", nil)
accountTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/account/depth/min", nil)
accountTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/account/depth/max", nil)
var accountTrieLeavesAtDepth [16]*metrics.Counter
var storageTrieLeavesAtDepth [16]*metrics.Counter
storageTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/storage/depth/avg", nil)
storageTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/storage/depth/min", nil)
storageTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/storage/depth/max", nil)
)
// depthStats tracks min/avg/max statistics for trie access depths.
type depthStats struct {
totalDepth int64
samples int64
minDepth int64
maxDepth int64
}
// newDepthStats creates a new depthStats with default values.
func newDepthStats() *depthStats {
return &depthStats{minDepth: -1}
}
// add records a new depth sample.
func (d *depthStats) add(n int64) {
if n < 0 {
return
func init() {
for i := 0; i < 16; i++ {
accountTrieLeavesAtDepth[i] = metrics.NewRegisteredCounter("witness/trie/account/leaves/depth_"+strconv.Itoa(i), nil)
storageTrieLeavesAtDepth[i] = metrics.NewRegisteredCounter("witness/trie/storage/leaves/depth_"+strconv.Itoa(i), nil)
}
d.totalDepth += n
d.samples++
if d.minDepth == -1 || n < d.minDepth {
d.minDepth = n
}
if n > d.maxDepth {
d.maxDepth = n
}
}
// report uploads the collected statistics into the provided gauges.
func (d *depthStats) report(maxGauge, minGauge, avgGauge *metrics.Gauge) {
if d.samples == 0 {
return
}
maxGauge.Update(d.maxDepth)
minGauge.Update(d.minDepth)
avgGauge.Update(d.totalDepth / d.samples)
}
// WitnessStats aggregates statistics for account and storage trie accesses.
type WitnessStats struct {
accountTrie *depthStats
storageTrie *depthStats
accountTrieLeaves [16]int64
storageTrieLeaves [16]int64
}
// NewWitnessStats creates a new WitnessStats collector.
func NewWitnessStats() *WitnessStats {
return &WitnessStats{
accountTrie: newDepthStats(),
storageTrie: newDepthStats(),
}
return &WitnessStats{}
}
// Add records trie access depths from the given node paths.
@ -102,16 +63,30 @@ func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) {
// The last path is always a leaf.
if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) {
if owner == (common.Hash{}) {
s.accountTrie.add(int64(len(path)))
s.accountTrieLeaves[len(path)] += 1
} else {
s.storageTrie.add(int64(len(path)))
s.storageTrieLeaves[len(path)] += 1
}
}
}
}
// ReportMetrics reports the collected statistics to the global metrics registry.
func (s *WitnessStats) ReportMetrics() {
s.accountTrie.report(accountTrieDepthMax, accountTrieDepthMin, accountTrieDepthAvg)
s.storageTrie.report(storageTrieDepthMax, storageTrieDepthMin, storageTrieDepthAvg)
func (s *WitnessStats) ReportMetrics(blockNumber uint64) {
// Encode the metrics as JSON for easier consumption
accountLeavesJson, _ := json.Marshal(s.accountTrieLeaves)
storageLeavesJson, _ := json.Marshal(s.storageTrieLeaves)
// Log account trie depth statistics
log.Info("Account trie depth stats",
"block", blockNumber,
"leavesAtDepth", string(accountLeavesJson))
log.Info("Storage trie depth stats",
"block", blockNumber,
"leavesAtDepth", string(storageLeavesJson))
for i := 0; i < 16; i++ {
accountTrieLeavesAtDepth[i].Inc(s.accountTrieLeaves[i])
storageTrieLeavesAtDepth[i].Inc(s.storageTrieLeaves[i])
}
}

View file

@ -24,27 +24,32 @@ import (
func TestWitnessStatsAdd(t *testing.T) {
tests := []struct {
name string
nodes map[string][]byte
owner common.Hash
expectedAccountDepth int64
expectedStorageDepth int64
name string
nodes map[string][]byte
owner common.Hash
expectedAccountLeaves map[int64]int64
expectedStorageLeaves map[int64]int64
}{
{
name: "empty nodes",
nodes: map[string][]byte{},
owner: common.Hash{},
expectedAccountDepth: 0,
expectedStorageDepth: 0,
name: "empty nodes",
nodes: map[string][]byte{},
owner: common.Hash{},
},
{
name: "single account trie leaf at depth 0",
nodes: map[string][]byte{
"": []byte("data"),
},
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{0: 1},
},
{
name: "single account trie leaf",
nodes: map[string][]byte{
"abc": []byte("data"),
},
owner: common.Hash{},
expectedAccountDepth: 3,
expectedStorageDepth: 0,
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 1},
},
{
name: "account trie with internal nodes",
@ -53,9 +58,8 @@ func TestWitnessStatsAdd(t *testing.T) {
"ab": []byte("data2"),
"abc": []byte("data3"),
},
owner: common.Hash{},
expectedAccountDepth: 3, // Only "abc" is a leaf
expectedStorageDepth: 0,
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 1}, // Only "abc" is a leaf
},
{
name: "multiple account trie branches",
@ -67,9 +71,8 @@ func TestWitnessStatsAdd(t *testing.T) {
"bc": []byte("data5"),
"bcd": []byte("data6"),
},
owner: common.Hash{},
expectedAccountDepth: 6, // "abc" (3) + "bcd" (3) = 6
expectedStorageDepth: 0,
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 2}, // "abc" (3) + "bcd" (3)
},
{
name: "siblings are all leaves",
@ -78,9 +81,8 @@ func TestWitnessStatsAdd(t *testing.T) {
"ab": []byte("data2"),
"ac": []byte("data3"),
},
owner: common.Hash{},
expectedAccountDepth: 6, // 2 + 2 + 2 = 6
expectedStorageDepth: 0,
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{2: 3},
},
{
name: "storage trie leaves",
@ -90,9 +92,8 @@ func TestWitnessStatsAdd(t *testing.T) {
"123": []byte("data3"),
"124": []byte("data4"),
},
owner: common.HexToHash("0x1234"),
expectedAccountDepth: 0,
expectedStorageDepth: 6, // "123" (3) + "124" (3) = 6
owner: common.HexToHash("0x1234"),
expectedStorageLeaves: map[int64]int64{3: 2}, // "123" (3) + "124" (3)
},
{
name: "complex trie structure",
@ -107,9 +108,8 @@ func TestWitnessStatsAdd(t *testing.T) {
"235": []byte("data8"),
"3": []byte("data9"),
},
owner: common.Hash{},
expectedAccountDepth: 13, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1) = 13
expectedStorageDepth: 0,
owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{1: 1, 3: 4}, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1)
},
}
@ -118,14 +118,23 @@ func TestWitnessStatsAdd(t *testing.T) {
stats := NewWitnessStats()
stats.Add(tt.nodes, tt.owner)
var expectedAccountTrieLeaves [16]int64
for depth, count := range tt.expectedAccountLeaves {
expectedAccountTrieLeaves[depth] = count
}
var expectedStorageTrieLeaves [16]int64
for depth, count := range tt.expectedStorageLeaves {
expectedStorageTrieLeaves[depth] = count
}
// Check account trie depth
if stats.accountTrie.totalDepth != tt.expectedAccountDepth {
t.Errorf("Account trie total depth = %d, want %d", stats.accountTrie.totalDepth, tt.expectedAccountDepth)
if stats.accountTrieLeaves != expectedAccountTrieLeaves {
t.Errorf("Account trie total depth = %v, want %v", stats.accountTrieLeaves, expectedAccountTrieLeaves)
}
// Check storage trie depth
if stats.storageTrie.totalDepth != tt.expectedStorageDepth {
t.Errorf("Storage trie total depth = %d, want %d", stats.storageTrie.totalDepth, tt.expectedStorageDepth)
if stats.storageTrieLeaves != expectedStorageTrieLeaves {
t.Errorf("Storage trie total depth = %v, want %v", stats.storageTrieLeaves, expectedStorageTrieLeaves)
}
})
}
@ -144,11 +153,10 @@ func TestWitnessStatsMinMax(t *testing.T) {
}, common.Hash{})
// Only "abcde" is a leaf (depth 5)
if stats.accountTrie.minDepth != 5 {
t.Errorf("Account trie min depth = %d, want %d", stats.accountTrie.minDepth, 5)
}
if stats.accountTrie.maxDepth != 5 {
t.Errorf("Account trie max depth = %d, want %d", stats.accountTrie.maxDepth, 5)
for i, v := range stats.accountTrieLeaves {
if v != 0 && i != 5 {
t.Errorf("leaf found at invalid depth %d", i)
}
}
// Add more leaves with different depths
@ -158,11 +166,10 @@ func TestWitnessStatsMinMax(t *testing.T) {
}, common.Hash{})
// Now we have leaves at depths 1, 2, and 5
if stats.accountTrie.minDepth != 1 {
t.Errorf("Account trie min depth after update = %d, want %d", stats.accountTrie.minDepth, 1)
}
if stats.accountTrie.maxDepth != 5 {
t.Errorf("Account trie max depth after update = %d, want %d", stats.accountTrie.maxDepth, 5)
for i, v := range stats.accountTrieLeaves {
if v != 0 && (i != 5 && i != 2 && i != 1) {
t.Errorf("leaf found at invalid depth %d", i)
}
}
}
@ -179,7 +186,12 @@ func TestWitnessStatsAverage(t *testing.T) {
// All are leaves: 2 + 2 + 3 + 4 = 11 total, 4 samples
expectedAvg := int64(11) / int64(4)
actualAvg := stats.accountTrie.totalDepth / stats.accountTrie.samples
var actualAvg, totalSamples int64
for i, c := range stats.accountTrieLeaves {
actualAvg += c * int64(i)
totalSamples += c
}
actualAvg = actualAvg / totalSamples
if actualAvg != expectedAvg {
t.Errorf("Account trie average depth = %d, want %d", actualAvg, expectedAvg)

View file

@ -100,6 +100,10 @@ func (w *Witness) AddState(nodes map[string][]byte) {
}
}
func (w *Witness) AddKey() {
panic("not yet implemented")
}
// Copy deep-copies the witness object. Witness.Block isn't deep-copied as it
// is never mutated by Witness
func (w *Witness) Copy() *Witness {

View file

@ -4,6 +4,27 @@ All notable changes to the tracing interface will be documented in this file.
## [Unreleased]
### Deprecated methods
- `OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)`: This hook is deprecated in favor of `OnCodeChangeV2` which includes a reason parameter ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)).
### New methods
- `OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason)`: This hook is called when a code change occurs. It is a successor to `OnCodeChange` with an additional reason parameter ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)).
### New types
- `CodeChangeReason` is a new type used to provide a reason for code changes. It includes various reasons such as contract creation, genesis initialization, EIP-7702 authorization, self-destruct, and revert operations ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)).
## [v1.15.4](https://github.com/ethereum/go-ethereum/releases/tag/v1.15.4)
### Modified types
- `GasChangeReason` has been extended with auto-generated String() methods for better debugging and logging ([#31234](https://github.com/ethereum/go-ethereum/pull/31234)).
- `NonceChangeReason` has been extended with auto-generated String() methods for better debugging and logging ([#31234](https://github.com/ethereum/go-ethereum/pull/31234)).
## [v1.15.0](https://github.com/ethereum/go-ethereum/releases/tag/v1.15.0)
The tracing interface has been extended with backwards-compatible changes to support more use-cases and simplify tracer code. The most notable change is a state journaling library which emits reverse events when a call is reverted.
### Deprecated methods
@ -23,8 +44,13 @@ The tracing interface has been extended with backwards-compatible changes to sup
### Modified types
- `VMContext.StateDB` has been extended with `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash an account.
- `VMContext.StateDB` has been extended with the following method:
- `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash of an account.
- `BlockEvent` has been modified:
- The `TD` (Total Difficulty) field has been removed ([#30744](https://github.com/ethereum/go-ethereum/pull/30744)).
- `BalanceChangeReason` has been extended with the `BalanceChangeRevert` reason. More on that below.
- `GasChangeReason` has been extended with the following reason:
- `GasChangeTxDataFloor` is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the transaction data. This change will always be a negative change.
### State journaling
@ -49,6 +75,26 @@ The state changes that are covered by the journaling library are:
- `OnCodeChange`
- `OnStorageChange`
## [v1.14.12](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.12)
This release contains a change in behavior for `OnCodeChange` hook and an extension to the StateDB interface.
### Modified types
- `VMContext.StateDB` has been extended with the following method:
- `GetTransientState(addr common.Address, slot common.Hash) common.Hash` method used to access contract transient storage ([#30531](https://github.com/ethereum/go-ethereum/pull/30531)).
### `OnCodeChange` change
The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions.
## [v1.14.10](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.10)
### Modified types
- `OpContext` has been extended with the following method:
- `ContractCode() []byte` provides access to the contract bytecode within the OpContext interface ([#30466](https://github.com/ethereum/go-ethereum/pull/30466)).
## [v1.14.9](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9)
### Modified types
@ -56,13 +102,6 @@ The state changes that are covered by the journaling library are:
- `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork.
- `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision.
## [v1.14.12]
This release contains a change in behavior for `OnCodeChange` hook.
### `OnCodeChange` change
The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions.
## [v1.14.4]
@ -148,7 +187,12 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale
- `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract.
- `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above.
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.8...master
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0
[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.16.3...master
[v1.15.4]: https://github.com/ethereum/go-ethereum/releases/tag/v1.15.4
[v1.15.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.15.0
[v1.14.12]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.12
[v1.14.10]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.10
[v1.14.9]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9
[v1.14.4]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.4
[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0

View file

@ -0,0 +1,29 @@
// Code generated by "stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go"; DO NOT EDIT.
package tracing
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CodeChangeUnspecified-0]
_ = x[CodeChangeContractCreation-1]
_ = x[CodeChangeGenesis-2]
_ = x[CodeChangeAuthorization-3]
_ = x[CodeChangeAuthorizationClear-4]
_ = x[CodeChangeSelfDestruct-5]
_ = x[CodeChangeRevert-6]
}
const _CodeChangeReason_name = "UnspecifiedContractCreationGenesisAuthorizationAuthorizationClearSelfDestructRevert"
var _CodeChangeReason_index = [...]uint8{0, 11, 27, 34, 47, 65, 77, 83}
func (i CodeChangeReason) String() string {
if i >= CodeChangeReason(len(_CodeChangeReason_index)-1) {
return "CodeChangeReason(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _CodeChangeReason_name[_CodeChangeReason_index[i]:_CodeChangeReason_index[i+1]]
}

View file

@ -177,6 +177,9 @@ type (
// CodeChangeHook is called when the code of an account changes.
CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)
// CodeChangeHookV2 is called when the code of an account changes.
CodeChangeHookV2 = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason)
// StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
@ -211,6 +214,7 @@ type Hooks struct {
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
// Block hash read
@ -372,3 +376,31 @@ const (
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
)
// CodeChangeReason is used to indicate the reason for a code change.
type CodeChangeReason byte
//go:generate go run golang.org/x/tools/cmd/stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go
const (
CodeChangeUnspecified CodeChangeReason = 0
// CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations.
CodeChangeContractCreation CodeChangeReason = 1
// CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup.
CodeChangeGenesis CodeChangeReason = 2
// CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization.
CodeChangeAuthorization CodeChangeReason = 3
// CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address.
CodeChangeAuthorizationClear CodeChangeReason = 4
// CodeChangeSelfDestruct is when contract code is cleared due to self-destruct.
CodeChangeSelfDestruct CodeChangeReason = 5
// CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
CodeChangeRevert CodeChangeReason = 6
)

View file

@ -42,12 +42,15 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
return nil, errors.New("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil {
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil {
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
return nil, errors.New("cannot have both OnNonceChange and OnNonceChangeV2")
}
if hooks.OnCodeChange != nil && hooks.OnCodeChangeV2 != nil {
return nil, errors.New("cannot have both OnCodeChange and OnCodeChangeV2")
}
// Create a new Hooks instance and copy all hooks
wrapped := *hooks
@ -72,6 +75,9 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks.OnCodeChange != nil {
wrapped.OnCodeChange = j.OnCodeChange
}
if hooks.OnCodeChangeV2 != nil {
wrapped.OnCodeChangeV2 = j.OnCodeChangeV2
}
if hooks.OnStorageChange != nil {
wrapped.OnStorageChange = j.OnStorageChange
}
@ -174,6 +180,19 @@ func (j *journal) OnCodeChange(addr common.Address, prevCodeHash common.Hash, pr
}
}
func (j *journal) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason) {
j.entries = append(j.entries, codeChange{
addr: addr,
prevCodeHash: prevCodeHash,
prevCode: prevCode,
newCodeHash: codeHash,
newCode: code,
})
if j.hooks.OnCodeChangeV2 != nil {
j.hooks.OnCodeChangeV2(addr, prevCodeHash, prevCode, codeHash, code, reason)
}
}
func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) {
j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new})
if j.hooks.OnStorageChange != nil {
@ -225,7 +244,9 @@ func (n nonceChange) revert(hooks *Hooks) {
}
func (c codeChange) revert(hooks *Hooks) {
if hooks.OnCodeChange != nil {
if hooks.OnCodeChangeV2 != nil {
hooks.OnCodeChangeV2(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode, CodeChangeRevert)
} else if hooks.OnCodeChange != nil {
hooks.OnCodeChange(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode)
}
}

View file

@ -23,6 +23,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
type testTracer struct {
@ -56,6 +57,11 @@ func (t *testTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash,
t.code = code
}
func (t *testTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason) {
t.t.Logf("OnCodeChangeV2(%v, %v -> %v, %v)", addr, prevCodeHash, codeHash, reason)
t.code = code
}
func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new)
if t.storage == nil {
@ -232,6 +238,27 @@ func TestOnNonceChangeV2(t *testing.T) {
}
}
func TestOnCodeChangeV2(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnCodeChangeV2: tr.OnCodeChangeV2})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
code := []byte{1, 2, 3}
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnCodeChangeV2(addr, common.Hash{}, nil, crypto.Keccak256Hash(code), code, CodeChangeContractCreation)
wr.OnExit(2, nil, 100, nil, true)
}
// After revert, code should be nil
if tr.code != nil {
t.Fatalf("unexpected code after revert: %v", tr.code)
}
}
func TestAllHooksCalled(t *testing.T) {
tracer := newTracerAllHooks()
hooks := tracer.hooks()
@ -253,10 +280,6 @@ func TestAllHooksCalled(t *testing.T) {
if field.Type.Kind() != reflect.Func {
continue
}
// Skip non-hooks, i.e. Copy
if field.Name == "copy" {
continue
}
// Skip if field is not set
if wrappedValue.Field(i).IsNil() {
continue
@ -298,6 +321,7 @@ func newTracerAllHooks() *tracerAllHooks {
t.hooksCalled[hooksType.Field(i).Name] = false
}
delete(t.hooksCalled, "OnNonceChange")
delete(t.hooksCalled, "OnCodeChange")
return t
}
@ -322,7 +346,7 @@ func (t *tracerAllHooks) hooks() *Hooks {
hooksValue := reflect.ValueOf(h).Elem()
for i := 0; i < hooksValue.NumField(); i++ {
field := hooksValue.Type().Field(i)
if field.Name == "OnNonceChange" {
if field.Name == "OnNonceChange" || field.Name == "OnCodeChange" {
continue
}
hookMethod := reflect.MakeFunc(field.Type, func(args []reflect.Value) []reflect.Value {

View file

@ -21,12 +21,15 @@ import (
"container/heap"
"errors"
"fmt"
"maps"
"math"
"math/big"
"os"
"path/filepath"
"slices"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@ -55,11 +58,17 @@ const (
// tiny overflows causing all txs to move a shelf higher, wasting disk space.
txAvgSize = 4 * 1024
// txMaxSize is the maximum size a single transaction can have, outside
// the included blobs. Since blob transactions are pulled instead of pushed,
// and only a small metadata is kept in ram, the rest is on disk, there is
// no critical limit that should be enforced. Still, capping it to some sane
// limit can never hurt.
// txBlobOverhead is an approximation of the overhead that an additional blob
// has on transaction size. This is added to the slotter to avoid tiny
// overflows causing all txs to move a shelf higher, wasting disk space. A
// small buffer is added to the proof overhead.
txBlobOverhead = uint32(kzg4844.CellProofsPerBlob*len(kzg4844.Proof{}) + 64)
// txMaxSize is the maximum size a single transaction can have, including the
// blobs. Since blob transactions are pulled instead of pushed, and only a
// small metadata is kept in ram, the rest is on disk, there is no critical
// limit that should be enforced. Still, capping it to some sane limit can
// never hurt, which is aligned with maxBlobsPerTx constraint enforced internally.
txMaxSize = 1024 * 1024
// maxBlobsPerTx is the maximum number of blobs that a single transaction can
@ -83,6 +92,15 @@ const (
// limboedTransactionStore is the subfolder containing the currently included
// but not yet finalized transaction blobs.
limboedTransactionStore = "limbo"
// storeVersion is the current slotter layout used for the billy.Database
// store.
storeVersion = 1
// conversionTimeWindow defines the period after the Osaka fork during which
// the pool will still accept and convert legacy blob transactions. After this
// window, all legacy blob transactions will be rejected.
conversionTimeWindow = time.Hour * 2
)
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
@ -92,6 +110,7 @@ const (
type blobTxMeta struct {
hash common.Hash // Transaction hash to maintain the lookup table
vhashes []common.Hash // Blob versioned hashes to maintain the lookup table
version byte // Blob transaction version to determine proof type
id uint64 // Storage ID in the pool's persistent store
storageSize uint32 // Byte size in the pool's persistent store
@ -115,10 +134,16 @@ type blobTxMeta struct {
// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
// and assembles a helper struct to track in memory.
// Requires the transaction to have a sidecar (or that we introduce a special version tag for no-sidecar).
func newBlobTxMeta(id uint64, size uint64, storageSize uint32, tx *types.Transaction) *blobTxMeta {
if tx.BlobTxSidecar() == nil {
// This should never happen, as the pool only admits blob transactions with a sidecar
panic("missing blob tx sidecar")
}
meta := &blobTxMeta{
hash: tx.Hash(),
vhashes: tx.BlobHashes(),
version: tx.BlobTxSidecar().Version,
id: id,
storageSize: storageSize,
size: size,
@ -312,12 +337,13 @@ type BlobPool struct {
stored uint64 // Useful data size of all transactions on disk
limbo *limbo // Persistent data store for the non-finalized blobs
signer types.Signer // Transaction signer to use for sender recovery
chain BlockChain // Chain object to access the state through
signer types.Signer // Transaction signer to use for sender recovery
chain BlockChain // Chain object to access the state through
cQueue *conversionQueue // The queue for performing legacy sidecar conversion (TODO: remove after Osaka)
head *types.Header // Current head of the chain
state *state.StateDB // Current state at the head of the chain
gasTip *uint256.Int // Currently accepted minimum gas tip
head atomic.Pointer[types.Header] // Current head of the chain
state *state.StateDB // Current state at the head of the chain
gasTip atomic.Pointer[uint256.Int] // Currently accepted minimum gas tip
lookup *lookup // Lookup table mapping blobs to txs and txs to billy entries
index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
@ -342,6 +368,7 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo
hasPendingAuth: hasPendingAuth,
signer: types.LatestSigner(chain.Config()),
chain: chain,
cQueue: newConversionQueue(), // Deprecate it after the osaka fork
lookup: newLookup(),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
@ -383,8 +410,17 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
if err != nil {
return err
}
p.head, p.state = head, state
p.head.Store(head)
p.state = state
// Create new slotter for pre-Osaka blob configuration.
slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
// See if we need to migrate the queue blob store after fusaka
slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir)
if err != nil {
return err
}
// Index all transactions on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, blob []byte) {
@ -392,7 +428,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
fails = append(fails, id)
}
}
slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index)
if err != nil {
return err
@ -416,17 +451,17 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
p.recheck(addr, nil)
}
var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), head))
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
if p.head.ExcessBlobGas != nil {
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), p.head))
if head.ExcessBlobGas != nil {
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), head))
}
p.evict = newPriceHeap(basefee, blobfee, p.index)
// Pool initialized, attach the blob limbo to it to track blobs included
// recently but not yet finalized
p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
p.limbo, err = newLimbo(p.chain.Config(), limbodir)
if err != nil {
p.Close()
return err
@ -450,6 +485,9 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
// Close closes down the underlying persistent store.
func (p *BlobPool) Close() error {
// Terminate the conversion queue
p.cQueue.close()
var errs []error
if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
if err := p.limbo.Close(); err != nil {
@ -808,7 +846,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
log.Error("Failed to reset blobpool state", "err", err)
return
}
p.head = newHead
p.head.Store(newHead)
p.state = statedb
// Run the reorg between the old and new head and figure out which accounts
@ -831,7 +869,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
}
}
// Flush out any blobs from limbo that are older than the latest finality
if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
if p.chain.Config().IsCancun(newHead.Number, newHead.Time) {
p.limbo.finalize(p.chain.CurrentFinalBlock())
}
// Reset the price heap for the new set of basefee/blobfee pairs
@ -847,6 +885,172 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
basefeeGauge.Update(int64(basefee.Uint64()))
blobfeeGauge.Update(int64(blobfee.Uint64()))
p.updateStorageMetrics()
// Perform the conversion logic at the fork boundary
if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) {
// Deep copy all indexed transaction metadata.
var (
ids = make(map[common.Address]map[uint64]uint64)
txs = make(map[common.Address]map[uint64]common.Hash)
)
for sender, list := range p.index {
ids[sender] = make(map[uint64]uint64)
txs[sender] = make(map[uint64]common.Hash)
for _, m := range list {
ids[sender][m.nonce] = m.id
txs[sender][m.nonce] = m.hash
}
}
// Initiate the background conversion thread.
p.cQueue.launchBillyConversion(func() {
p.convertLegacySidecars(ids, txs)
})
}
}
// compareAndSwap checks if the specified transaction is still tracked in the pool
// and replace the metadata accordingly. It should only be used in the fork boundary
// bulk conversion. If it fails for some reason, the subsequent txs won't be dropped
// for simplicity which we assume it's very likely to happen.
//
// The returned flag indicates whether the replacement succeeded.
func (p *BlobPool) compareAndSwap(address common.Address, hash common.Hash, blob []byte, oldID uint64, oldStorageSize uint32) bool {
p.lock.Lock()
defer p.lock.Unlock()
newId, err := p.store.Put(blob)
if err != nil {
log.Error("Failed to store transaction", "hash", hash, "err", err)
return false
}
newSize := uint64(len(blob))
newStorageSize := p.store.Size(newId)
// Terminate the procedure if the transaction was already evicted. The
// newly added blob should be removed before return.
if !p.lookup.update(hash, newId, newSize) {
if derr := p.store.Delete(newId); derr != nil {
log.Error("Failed to delete the dangling blob tx", "err", derr)
} else {
log.Warn("Deleted the dangling blob tx", "id", newId)
}
return false
}
// Update the metadata of blob transaction
for _, meta := range p.index[address] {
if meta.hash == hash {
meta.id = newId
meta.version = types.BlobSidecarVersion1
meta.storageSize = newStorageSize
meta.size = newSize
p.stored += uint64(newStorageSize)
p.stored -= uint64(oldStorageSize)
break
}
}
if err := p.store.Delete(oldID); err != nil {
log.Error("Failed to delete the legacy transaction", "hash", hash, "id", oldID, "err", err)
}
return true
}
// convertLegacySidecar fetches transaction data from the store, performs an
// on-the-fly conversion. This function is intended for use only during the
// Osaka fork transition period.
//
// The returned flag indicates whether the replacement succeeds or not.
func (p *BlobPool) convertLegacySidecar(sender common.Address, hash common.Hash, id uint64) bool {
start := time.Now()
// Retrieves the legacy blob transaction from the underlying store with
// read lock held, preventing any potential data race around the slot
// specified by the id.
p.lock.RLock()
data, err := p.store.Get(id)
if err != nil {
p.lock.RUnlock()
// The transaction may have been evicted simultaneously, safe to skip conversion.
log.Debug("Blob transaction is missing", "hash", hash, "id", id, "err", err)
return false
}
oldStorageSize := p.store.Size(id)
p.lock.RUnlock()
// Decode the transaction, the failure is not expected and report the error
// loudly if possible. If the blob transaction in this slot is corrupted,
// leave it in the store, it will be dropped during the next pool
// initialization.
var tx types.Transaction
if err = rlp.DecodeBytes(data, &tx); err != nil {
log.Error("Blob transaction is corrupted", "hash", hash, "id", id, "err", err)
return false
}
// Skip conversion if the transaction does not match the expected hash, or if it was
// already converted. This can occur if the original transaction was evicted from the
// pool and the slot was reused by a new one.
if tx.Hash() != hash {
log.Warn("Blob transaction was replaced", "hash", hash, "id", id, "stored", tx.Hash())
return false
}
sc := tx.BlobTxSidecar()
if sc.Version >= types.BlobSidecarVersion1 {
log.Debug("Skipping conversion of blob tx", "hash", hash, "id", id)
return false
}
// Perform the sidecar conversion, the failure is not expected and report the error
// loudly if possible.
if err := tx.BlobTxSidecar().ToV1(); err != nil {
log.Error("Failed to convert blob transaction", "hash", hash, "err", err)
return false
}
// Encode the converted transaction, the failure is not expected and report
// the error loudly if possible.
blob, err := rlp.EncodeToBytes(&tx)
if err != nil {
log.Error("Failed to encode blob transaction", "hash", tx.Hash(), "err", err)
return false
}
// Replace the legacy blob transaction with the converted format.
if !p.compareAndSwap(sender, hash, blob, id, oldStorageSize) {
log.Error("Failed to replace the legacy transaction", "hash", hash)
return false
}
log.Debug("Converted legacy blob transaction", "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
return true
}
// convertLegacySidecars converts all given transactions to sidecar version 1.
//
// If any of them fails to be converted, the subsequent transactions will still
// be processed, as we assume the failure is very unlikely to happen. If happens,
// these transactions will be stuck in the pool until eviction.
func (p *BlobPool) convertLegacySidecars(ids map[common.Address]map[uint64]uint64, txs map[common.Address]map[uint64]common.Hash) {
var (
start = time.Now()
success int
failure int
)
for addr, list := range txs {
// Transactions evicted from the pool must be contiguous, if in any case,
// the transactions are gapped with each other, they will be discarded.
nonces := slices.Collect(maps.Keys(list))
slices.Sort(nonces)
// Convert the txs with nonce order
for _, nonce := range nonces {
if p.convertLegacySidecar(addr, list[nonce], ids[addr][nonce]) {
success++
} else {
failure++
}
}
}
log.Info("Completed blob transaction conversion", "discarded", failure, "injected", success, "elapsed", common.PrettyDuration(time.Since(start)))
}
// reorg assembles all the transactors and missing transactions between an old
@ -994,6 +1198,21 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
// TODO: seems like an easy optimization here would be getting the serialized tx
// from limbo instead of re-serializing it here.
// Converts reorged-out legacy blob transactions to the new format to prevent
// them from becoming stuck in the pool until eviction.
//
// Performance note: Conversion takes ~140ms (Mac M1 Pro). Since a maximum of
// 9 legacy blob transactions are allowed in a block pre-Osaka, an adversary
// could theoretically halt a Geth node for ~1.2s by reorging per block. However,
// this attack is financially inefficient to execute.
head := p.head.Load()
if p.chain.Config().IsOsaka(head.Number, head.Time) && tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 {
if err := tx.BlobTxSidecar().ToV1(); err != nil {
log.Error("Failed to convert the legacy sidecar", "err", err)
return err
}
log.Info("Legacy blob transaction is reorged", "hash", tx.Hash())
}
// Serialize the transaction back into the primary datastore.
blob, err := rlp.EncodeToBytes(tx)
if err != nil {
@ -1032,14 +1251,15 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
defer p.lock.Unlock()
// Store the new minimum gas tip
old := p.gasTip
p.gasTip = uint256.MustFromBig(tip)
old := p.gasTip.Load()
newTip := uint256.MustFromBig(tip)
p.gasTip.Store(newTip)
// If the min miner fee increased, remove transactions below the new threshold
if old == nil || p.gasTip.Cmp(old) > 0 {
if old == nil || newTip.Cmp(old) > 0 {
for addr, txs := range p.index {
for i, tx := range txs {
if tx.execTipCap.Cmp(p.gasTip) < 0 {
if tx.execTipCap.Cmp(newTip) < 0 {
// Drop the offending transaction
var (
ids = []uint64{tx.id}
@ -1099,10 +1319,10 @@ func (p *BlobPool) ValidateTxBasics(tx *types.Transaction) error {
Config: p.chain.Config(),
Accept: 1 << types.BlobTxType,
MaxSize: txMaxSize,
MinTip: p.gasTip.ToBig(),
MinTip: p.gasTip.Load().ToBig(),
MaxBlobCount: maxBlobsPerTx,
}
return txpool.ValidateTransaction(tx, p.head, p.signer, opts)
return txpool.ValidateTransaction(tx, p.head.Load(), p.signer, opts)
}
// checkDelegationLimit determines if the tx sender is delegated or has a
@ -1140,10 +1360,10 @@ func (p *BlobPool) checkDelegationLimit(tx *types.Transaction) error {
// validateTx checks whether a transaction is valid according to the consensus
// rules and adheres to some heuristic limits of the local node (price and size).
//
// This function assumes the static validation has been performed already and
// only runs the stateful checks with lock protection.
func (p *BlobPool) validateTx(tx *types.Transaction) error {
if err := p.ValidateTxBasics(tx); err != nil {
return err
}
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
stateOpts := &txpool.ValidationOptionsWithState{
State: p.state,
@ -1298,9 +1518,20 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
}
// GetBlobs returns a number of blobs and proofs for the given versioned hashes.
// Blobpool must place responses in the order given in the request, using null
// for any missing blobs.
//
// For instance, if the request is [A_versioned_hash, B_versioned_hash,
// C_versioned_hash] and blobpool has data for blobs A and C, but doesn't have
// data for B, the response MUST be [A, null, C].
//
// This is a utility method for the engine API, enabling consensus clients to
// retrieve blobs from the pools directly instead of the network.
func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
//
// The version argument specifies the type of proofs to return, either the
// blob proofs (version 0) or the cell proofs (version 1). Proofs conversion is
// CPU intensive, so only done if explicitly requested with the convert flag.
func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
var (
blobs = make([]*kzg4844.Blob, len(vhashes))
commitments = make([]kzg4844.Commitment, len(vhashes))
@ -1312,31 +1543,36 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo
for i, h := range vhashes {
indices[h] = append(indices[h], i)
}
for _, vhash := range vhashes {
// Skip duplicate vhash that was already resolved in a previous iteration
if _, ok := filled[vhash]; ok {
// Skip vhash that was already resolved in a previous iteration
continue
}
// Retrieve the corresponding blob tx with the vhash
// Retrieve the corresponding blob tx with the vhash.
p.lock.RLock()
txID, exists := p.lookup.storeidOfBlob(vhash)
p.lock.RUnlock()
if !exists {
return nil, nil, nil, fmt.Errorf("blob with vhash %x is not found", vhash)
continue
}
data, err := p.store.Get(txID)
if err != nil {
return nil, nil, nil, err
log.Error("Tracked blob transaction missing from store", "id", txID, "err", err)
continue
}
// Decode the blob transaction
tx := new(types.Transaction)
if err := rlp.DecodeBytes(data, tx); err != nil {
return nil, nil, nil, err
log.Error("Blobs corrupted for traced transaction", "id", txID, "err", err)
continue
}
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return nil, nil, nil, fmt.Errorf("blob tx without sidecar %x", tx.Hash())
log.Error("Blob tx without sidecar", "hash", tx.Hash(), "id", txID)
continue
}
// Traverse the blobs in the transaction
for i, hash := range tx.BlobHashes() {
@ -1344,6 +1580,14 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo
if !ok {
continue // non-interesting blob
}
// Mark hash as seen.
filled[hash] = struct{}{}
if sidecar.Version != version && !convert {
// Skip blobs with incompatible version. Note we still track the blob hash
// in `filled` here, ensuring that we do not resolve this tx another time.
continue
}
// Get or convert the proof.
var pf []kzg4844.Proof
switch version {
case types.BlobSidecarVersion0:
@ -1376,7 +1620,6 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo
commitments[index] = sidecar.Commitments[i]
proofs[index] = pf
}
filled[hash] = struct{}{}
}
}
return blobs, commitments, proofs, nil
@ -1397,48 +1640,59 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
return available
}
// convertSidecar converts the legacy sidecar in the submitted transactions
// if Osaka fork has been activated.
func (p *BlobPool) convertSidecar(txs []*types.Transaction) ([]*types.Transaction, []error) {
head := p.chain.CurrentBlock()
if !p.chain.Config().IsOsaka(head.Number, head.Time) {
return txs, make([]error, len(txs))
// preCheck performs the static validation upon the provided tx and converts
// the legacy sidecars if Osaka fork has been activated with a short time window.
//
// This function is pure static and lock free.
func (p *BlobPool) preCheck(tx *types.Transaction) error {
var (
head = p.head.Load()
isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time)
deadline time.Time
)
if isOsaka {
deadline = time.Unix(int64(*p.chain.Config().OsakaTime), 0).Add(conversionTimeWindow)
}
var errs []error
for _, tx := range txs {
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
errs = append(errs, errors.New("missing sidecar in blob transaction"))
continue
}
if sidecar.Version == types.BlobSidecarVersion0 {
if err := sidecar.ToV1(); err != nil {
errs = append(errs, err)
continue
}
}
errs = append(errs, nil)
// Validate the transaction statically at first to avoid unnecessary
// conversion. This step doesn't require lock protection.
if err := p.ValidateTxBasics(tx); err != nil {
return err
}
return txs, errs
// Before the Osaka fork, reject the blob txs with cell proofs
if !isOsaka {
if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 {
return nil
} else {
return errors.New("cell proof is not supported yet")
}
}
// After the Osaka fork, reject the legacy blob txs if the conversion
// time window is passed.
if tx.BlobTxSidecar().Version == types.BlobSidecarVersion1 {
return nil
}
if head.Time > uint64(deadline.Unix()) {
return errors.New("legacy blob tx is not supported")
}
// Convert the legacy sidecar after Osaka fork. This could be a long
// procedure which takes a few seconds, even minutes if there is a long
// queue. Fortunately it will only block the routine of the source peer
// announcing the tx, without affecting other parts.
return p.cQueue.convert(tx)
}
// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions).
//
// Note, if sync is set the method will block until all internal maintenance
// related to the add is finished. Only use this during tests for determinism.
func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var (
errs []error
adds = make([]*types.Transaction, 0, len(txs))
errs []error = make([]error, len(txs))
adds = make([]*types.Transaction, 0, len(txs))
)
txs, errs = p.convertSidecar(txs)
for i, tx := range txs {
if errs[i] != nil {
if errs[i] = p.preCheck(tx); errs[i] != nil {
continue
}
errs[i] = p.add(tx)
if errs[i] == nil {
if errs[i] = p.add(tx); errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
}
@ -1678,7 +1932,7 @@ func (p *BlobPool) drop() {
func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
// If only plain transactions are requested, this pool is unsuitable as it
// contains none, don't even bother.
if filter.OnlyPlainTxs {
if !filter.BlobTxs {
return nil
}
// Track the amount of time waiting to retrieve the list of pending blob txs
@ -1699,6 +1953,11 @@ func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*tx
for addr, txs := range p.index {
lazies := make([]*txpool.LazyTransaction, 0, len(txs))
for _, tx := range txs {
// Skip v0 or v1 blob transactions depending on the filter
if tx.version != filter.BlobVersion {
break // skip the rest because of nonce ordering
}
// If transaction filtering was requested, discard badly priced ones
if filter.MinTip != nil && filter.BaseFee != nil {
if tx.execFeeCap.Lt(filter.BaseFee) {
@ -1926,7 +2185,7 @@ func (p *BlobPool) Clear() {
p.spent = make(map[common.Address]*uint256.Int)
var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head.Load()))
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
p.evict = newPriceHeap(basefee, blobfee, p.index)

View file

@ -24,6 +24,7 @@ import (
"fmt"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
"reflect"
@ -41,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/internal/testrand"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
@ -86,6 +88,12 @@ type testBlockChain struct {
statedb *state.StateDB
blocks map[uint64]*types.Block
blockTime *uint64
}
func (bc *testBlockChain) setHeadTime(time uint64) {
bc.blockTime = &time
}
func (bc *testBlockChain) Config() *params.ChainConfig {
@ -103,6 +111,10 @@ func (bc *testBlockChain) CurrentBlock() *types.Header {
blockTime = *bc.config.CancunTime + 1
gasLimit = uint64(30_000_000)
)
if bc.blockTime != nil {
blockTime = *bc.blockTime
}
lo := new(big.Int)
hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil))
@ -262,8 +274,8 @@ func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap
return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rnd.Intn(len(testBlobs)))
}
// makeUnsignedTx is a utility method to construct a random blob transaction
// without signing it.
// makeUnsignedTxWithTestBlob is a utility method to construct a random blob transaction
// with a specific test blob without signing it.
func makeUnsignedTxWithTestBlob(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobIdx int) *types.BlobTx {
return &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
@ -421,11 +433,11 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
hashes = append(hashes, tx.vhashes...)
}
}
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0, false)
if err != nil {
t.Fatal(err)
}
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1)
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1, false)
if err != nil {
t.Fatal(err)
}
@ -439,22 +451,18 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
return
}
for i, hash := range hashes {
// If an item is missing, but shouldn't, error
if blobs1[i] == nil || proofs1[i] == nil {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
if blobs2[i] == nil || proofs2[i] == nil {
// If an item is missing from both, but shouldn't, error
if (blobs1[i] == nil || proofs1[i] == nil) && (blobs2[i] == nil || proofs2[i] == nil) {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
// Item retrieved, make sure it matches the expectation
index := testBlobIndices[hash]
if *blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index] {
if blobs1[i] != nil && (*blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index]) {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
if *blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index]) {
if blobs2[i] != nil && (*blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index])) {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
@ -1163,6 +1171,115 @@ func TestChangingSlotterSize(t *testing.T) {
}
}
// TestBillyMigration tests the billy migration from the default slotter to
// the PeerDAS slotter. This tests both the migration of the slotter
// as well as increasing the slotter size of the new slotter.
func TestBillyMigration(t *testing.T) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
os.MkdirAll(filepath.Join(storage, limboedTransactionStore), 0700)
// Create the billy with the old slotter
oldSlotter := newSlotterEIP7594(6)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, oldSlotter, nil)
// Create transactions from a few accounts.
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()
key3, _ = crypto.GenerateKey()
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2)
)
// Write the two safely sized txs to store. note: although the store is
// configured for a blob count of 6, it can also support around ~1mb of call
// data - all this to say that we aren't using the the absolute largest shelf
// available.
store.Put(blob1)
store.Put(blob2)
store.Close()
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
for _, maxBlobs := range []int{6, 24} {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true, false)
// Make custom chain config where the max blob count changes based on the loop variable.
zero := uint64(0)
config := &params.ChainConfig{
ChainID: big.NewInt(1),
LondonBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CancunTime: &zero,
OsakaTime: &zero,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: &params.BlobConfig{
Target: maxBlobs / 2,
Max: maxBlobs,
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
},
Osaka: &params.BlobConfig{
Target: maxBlobs / 2,
Max: maxBlobs,
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
},
},
}
chain := &testBlockChain{
config: config,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain, nil)
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
// Try to add the big blob tx. In the initial iteration it should overflow
// the pool. On the subsequent iteration it should be accepted.
errs := pool.Add([]*types.Transaction{tx3}, true)
if _, ok := pool.index[addr3]; ok && maxBlobs == 6 {
t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
} else if !ok && maxBlobs == 10 {
t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
}
// Verify the regular two txs are always available.
if got := pool.Get(tx1.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
}
if got := pool.Get(tx2.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
}
// Verify all the calculated pool internals. Interestingly, this is **not**
// a duplication of the above checks, this actually validates the verifier
// using the above already hard coded checks.
//
// Do not remove this, nor alter the above to be generic.
verifyPoolInternals(t, pool)
pool.Close()
}
}
// TestBlobCountLimit tests the blobpool enforced limits on the max blob count.
func TestBlobCountLimit(t *testing.T) {
var (
@ -1641,8 +1758,8 @@ func TestAdd(t *testing.T) {
// Add each transaction one by one, verifying the pool internals in between
for j, add := range tt.adds {
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(params.MainnetChainConfig), add.tx)
if err := pool.add(signed); !errors.Is(err, add.err) {
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
if errs := pool.Add([]*types.Transaction{signed}, true); !errors.Is(errs[0], add.err) {
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, errs[0], add.err)
}
if add.err == nil {
size, exist := pool.lookup.sizeOfTx(signed.Hash())
@ -1689,9 +1806,14 @@ func TestAdd(t *testing.T) {
}
}
// Tests that adding the transactions with legacy sidecar and expect them to
// be converted to new format correctly.
// Tests that transactions with legacy sidecars are accepted within the
// conversion window but rejected after it has passed.
func TestAddLegacyBlobTx(t *testing.T) {
testAddLegacyBlobTx(t, true) // conversion window has not yet passed
testAddLegacyBlobTx(t, false) // conversion window passed
}
func testAddLegacyBlobTx(t *testing.T, accept bool) {
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()
@ -1711,6 +1833,15 @@ func TestAddLegacyBlobTx(t *testing.T) {
blobfee: uint256.NewInt(105),
statedb: statedb,
}
var timeDiff uint64
if accept {
timeDiff = uint64(conversionTimeWindow.Seconds()) - 1
} else {
timeDiff = uint64(conversionTimeWindow.Seconds()) + 1
}
time := *params.MergedTestChainConfig.OsakaTime + timeDiff
chain.setHeadTime(time)
pool := New(Config{Datadir: t.TempDir()}, chain, nil)
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
@ -1720,12 +1851,15 @@ func TestAddLegacyBlobTx(t *testing.T) {
var (
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0)
tx3 = makeMultiBlobTx(1, 1, 800, 70, 6, 12, key2, types.BlobSidecarVersion1)
txs = []*types.Transaction{tx1, tx2}
)
errs := pool.Add([]*types.Transaction{tx1, tx2, tx3}, true)
errs := pool.Add(txs, true)
for _, err := range errs {
if err != nil {
t.Fatalf("failed to add tx: %v", err)
if accept && err != nil {
t.Fatalf("expected tx add to succeed, %v", err)
}
if !accept && err == nil {
t.Fatal("expected tx add to fail")
}
}
verifyPoolInternals(t, pool)
@ -1814,10 +1948,11 @@ func TestGetBlobs(t *testing.T) {
}
cases := []struct {
start int
limit int
version byte
expErr bool
start int
limit int
fillRandom bool // Whether to randomly fill some of the requested blobs with unknowns
version byte // Blob sidecar version to request
convert bool // Whether to convert version on retrieval
}{
{
start: 0, limit: 6,
@ -1827,6 +1962,14 @@ func TestGetBlobs(t *testing.T) {
start: 0, limit: 6,
version: types.BlobSidecarVersion1,
},
{
start: 0, limit: 6, fillRandom: true,
version: types.BlobSidecarVersion0,
},
{
start: 0, limit: 6, fillRandom: true,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 9,
version: types.BlobSidecarVersion0,
@ -1835,6 +1978,14 @@ func TestGetBlobs(t *testing.T) {
start: 3, limit: 9,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 9, fillRandom: true,
version: types.BlobSidecarVersion0,
},
{
start: 3, limit: 9, fillRandom: true,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 15,
version: types.BlobSidecarVersion0,
@ -1843,6 +1994,14 @@ func TestGetBlobs(t *testing.T) {
start: 3, limit: 15,
version: types.BlobSidecarVersion1,
},
{
start: 3, limit: 15, fillRandom: true,
version: types.BlobSidecarVersion0,
},
{
start: 3, limit: 15, fillRandom: true,
version: types.BlobSidecarVersion1,
},
{
start: 0, limit: 18,
version: types.BlobSidecarVersion0,
@ -1852,57 +2011,268 @@ func TestGetBlobs(t *testing.T) {
version: types.BlobSidecarVersion1,
},
{
start: 18, limit: 20,
start: 0, limit: 18, fillRandom: true,
version: types.BlobSidecarVersion0,
expErr: true,
},
{
start: 0, limit: 18, fillRandom: true,
version: types.BlobSidecarVersion1,
},
{
start: 0, limit: 18, fillRandom: true,
version: types.BlobSidecarVersion1,
convert: true, // Convert some version 0 blobs to version 1 while retrieving
},
}
for i, c := range cases {
var vhashes []common.Hash
var (
vhashes []common.Hash
filled = make(map[int]struct{})
)
if c.fillRandom {
filled[len(vhashes)] = struct{}{}
vhashes = append(vhashes, testrand.Hash())
}
for j := c.start; j < c.limit; j++ {
vhashes = append(vhashes, testBlobVHashes[j])
if c.fillRandom && rand.Intn(2) == 0 {
filled[len(vhashes)] = struct{}{}
vhashes = append(vhashes, testrand.Hash())
}
}
if c.fillRandom {
filled[len(vhashes)] = struct{}{}
vhashes = append(vhashes, testrand.Hash())
}
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version, c.convert)
if err != nil {
t.Errorf("Unexpected error for case %d, %v", i, err)
}
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version)
if c.expErr {
if err == nil {
t.Errorf("Unexpected return, want error for case %d", i)
}
} else {
if err != nil {
t.Errorf("Unexpected error for case %d, %v", i, err)
}
// Cross validate what we received vs what we wanted
length := c.limit - c.start
if len(blobs) != length || len(proofs) != length {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), length)
// Cross validate what we received vs what we wanted
length := c.limit - c.start
wantLen := length + len(filled)
if len(blobs) != wantLen || len(proofs) != wantLen {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), wantLen)
continue
}
var unknown int
for j := 0; j < len(blobs); j++ {
testBlobIndex := c.start + j - unknown
if _, exist := filled[j]; exist {
if blobs[j] != nil || proofs[j] != nil {
t.Errorf("Unexpected blob and proof, item %d", j)
}
unknown++
continue
}
for j := 0; j < len(blobs); j++ {
// If an item is missing, but shouldn't, error
if blobs[j] == nil || proofs[j] == nil {
// If an item is missing, but shouldn't, error
if blobs[j] == nil || proofs[j] == nil {
// This is only an error if there was no version mismatch
if c.convert ||
(c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) ||
(c.version == types.BlobSidecarVersion0 && (testBlobIndex < 6 || 12 <= testBlobIndex)) {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
continue
}
// Item retrieved, make sure the blob matches the expectation
if *blobs[j] != *testBlobs[c.start+j] {
t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
continue
continue
}
// Item retrieved, make sure the blob matches the expectation
if *blobs[j] != *testBlobs[testBlobIndex] {
t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
continue
}
// Item retrieved, make sure the proof matches the expectation
if c.version == types.BlobSidecarVersion0 {
if proofs[j][0] != testBlobProofs[testBlobIndex] {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
// Item retrieved, make sure the proof matches the expectation
if c.version == types.BlobSidecarVersion0 {
if proofs[j][0] != testBlobProofs[c.start+j] {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
} else {
want, _ := kzg4844.ComputeCellProofs(blobs[j])
if !reflect.DeepEqual(want, proofs[j]) {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
} else {
want, _ := kzg4844.ComputeCellProofs(blobs[j])
if !reflect.DeepEqual(want, proofs[j]) {
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
}
}
}
}
pool.Close()
}
// TestSidecarConversion will verify that after the Osaka fork, all legacy
// sidecars in the pool are successfully convert to v1 sidecars.
func TestSidecarConversion(t *testing.T) {
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
var (
preOsakaTxs = make(types.Transactions, 10)
postOsakaTxs = make(types.Transactions, 3)
keys = make([]*ecdsa.PrivateKey, len(preOsakaTxs)+len(postOsakaTxs))
addrs = make([]common.Address, len(preOsakaTxs)+len(postOsakaTxs))
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
)
for i := range keys {
keys[i], _ = crypto.GenerateKey()
addrs[i] = crypto.PubkeyToAddress(keys[i].PublicKey)
statedb.AddBalance(addrs[i], uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
}
for i := range preOsakaTxs {
preOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 2, 0, keys[i], types.BlobSidecarVersion0)
}
for i := range postOsakaTxs {
if i == 0 {
// First has a v0 sidecar.
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion0)
}
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion1)
}
statedb.Commit(0, true, false)
// Test plan:
// 1) Create a bunch v0 sidecar txs and add to pool before Osaka.
// 2) Pass in new Osaka header to activate the conversion thread.
// 3) Continue adding both v0 and v1 transactions to the pool.
// 4) Verify that as additional blocks come in, transactions involved in the
// migration are correctly discarded.
config := &params.ChainConfig{
ChainID: big.NewInt(1),
LondonBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CancunTime: newUint64(0),
PragueTime: newUint64(0),
OsakaTime: newUint64(1),
BlobScheduleConfig: params.DefaultBlobSchedule,
}
chain := &testBlockChain{
config: config,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
blocks: make(map[uint64]*types.Block),
}
// Create 3 blocks:
// - the current block, before Osaka
// - the first block after Osaka
// - another post-Osaka block with several transactions in it
header0 := chain.CurrentBlock()
header0.Time = 0
chain.blocks[0] = types.NewBlockWithHeader(header0)
header1 := chain.CurrentBlock()
header1.Number = big.NewInt(1)
header1.Time = 1
chain.blocks[1] = types.NewBlockWithHeader(header1)
header2 := chain.CurrentBlock()
header2.Time = 2
header2.Number = big.NewInt(2)
// Make a copy of one of the pre-Osaka transactions and convert it to v1 here
// so that we can add it to the pool later and ensure a duplicate is not added
// by the conversion queue.
tx := preOsakaTxs[len(preOsakaTxs)-1]
sc := *tx.BlobTxSidecar() // copy sidecar
sc.ToV1()
tx.WithBlobTxSidecar(&sc)
block2 := types.NewBlockWithHeader(header2).WithBody(types.Body{Transactions: append(postOsakaTxs, tx)})
chain.blocks[2] = block2
pool := New(Config{Datadir: storage}, chain, nil)
if err := pool.Init(1, header0, newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
errs := pool.Add(preOsakaTxs, true)
for i, err := range errs {
if err != nil {
t.Errorf("failed to insert blob tx from %s: %s", addrs[i], errs[i])
}
}
// Kick off migration.
pool.Reset(header0, header1)
// Add the v0 sidecar tx, but don't block so we can keep doing other stuff
// while it converts the sidecar.
addDone := make(chan struct{})
go func() {
pool.Add(types.Transactions{postOsakaTxs[0]}, false)
close(addDone)
}()
// Add the post-Osaka v1 sidecar txs.
errs = pool.Add(postOsakaTxs[1:], false)
for _, err := range errs {
if err != nil {
t.Fatalf("expected tx add to succeed: %v", err)
}
}
// Wait for the first tx's conversion to complete, then check that all
// transactions added after Osaka can be accounted for in the pool.
<-addDone
pending := pool.Pending(txpool.PendingFilter{BlobTxs: true, BlobVersion: types.BlobSidecarVersion1})
for _, tx := range postOsakaTxs {
from, _ := pool.signer.Sender(tx)
if len(pending[from]) != 1 || pending[from][0].Hash != tx.Hash() {
t.Fatalf("expected post-Osaka txs to be pending")
}
}
// Now update the pool with the next block. This should cause the pool to
// clear out the post-Osaka txs since they were included in block 2. Since the
// test blockchain doesn't manage nonces, we'll just do that manually before
// the reset is called. Don't forget about the pre-Osaka transaction we also
// added to block 2!
for i := range postOsakaTxs {
statedb.SetNonce(addrs[len(preOsakaTxs)+i], 1, tracing.NonceChangeEoACall)
}
statedb.SetNonce(addrs[len(preOsakaTxs)-1], 1, tracing.NonceChangeEoACall)
pool.Reset(header1, block2.Header())
// Now verify no post-Osaka transactions are tracked by the pool.
for i, tx := range postOsakaTxs {
if pool.Get(tx.Hash()) != nil {
t.Fatalf("expected txs added post-osaka to have been placed in limbo due to inclusion in a block: index %d, hash %s", i, tx.Hash())
}
}
// Wait for the pool migration to complete.
<-pool.cQueue.anyBillyConversionDone
// Verify all transactions in the pool were converted and verify the
// subsequent cell proofs.
count, _ := pool.Stats()
if count != len(preOsakaTxs)-1 {
t.Errorf("expected pending count to match initial tx count: pending=%d, expected=%d", count, len(preOsakaTxs)-1)
}
for addr, acc := range pool.index {
for _, m := range acc {
if m.version != types.BlobSidecarVersion1 {
t.Errorf("expected sidecar to have been converted: from %s, hash %s", addr, m.hash)
}
tx := pool.Get(m.hash)
if tx == nil {
t.Errorf("failed to get tx by hash: %s", m.hash)
}
sc := tx.BlobTxSidecar()
if err := kzg4844.VerifyCellProofs(sc.Blobs, sc.Commitments, sc.Proofs); err != nil {
t.Errorf("failed to verify cell proofs for tx %s after conversion: %s", m.hash, err)
}
}
}
verifyPoolInternals(t, pool)
// Launch conversion a second time.
// This is just a sanity check to ensure we can handle it.
pool.Reset(header0, header1)
pool.Close()
}
@ -1989,3 +2359,5 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
}
}
}
func newUint64(val uint64) *uint64 { return &val }

View file

@ -0,0 +1,203 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blobpool
import (
"errors"
"slices"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// maxPendingConversionTasks caps the number of pending conversion tasks. This
// prevents excessive memory usage; the worst-case scenario (2k transactions
// with 6 blobs each) would consume approximately 1.5GB of memory.
const maxPendingConversionTasks = 2048
// txConvert represents a conversion task with an attached legacy blob transaction.
type txConvert struct {
tx *types.Transaction // Legacy blob transaction
done chan error // Channel for signaling back if the conversion succeeds
}
// conversionQueue is a dedicated queue for converting legacy blob transactions
// received from the network after the Osaka fork. Since conversion is expensive,
// it is performed in the background by a single thread, ensuring the main Geth
// process is not overloaded.
type conversionQueue struct {
tasks chan *txConvert
startBilly chan func()
quit chan struct{}
closed chan struct{}
billyQueue []func()
billyTaskDone chan struct{}
// This channel will be closed when the first billy conversion finishes.
// It's added for unit tests to synchronize with the conversion progress.
anyBillyConversionDone chan struct{}
}
// newConversionQueue constructs the conversion queue.
func newConversionQueue() *conversionQueue {
q := &conversionQueue{
tasks: make(chan *txConvert),
startBilly: make(chan func()),
quit: make(chan struct{}),
closed: make(chan struct{}),
anyBillyConversionDone: make(chan struct{}),
}
go q.loop()
return q
}
// convert accepts a legacy blob transaction with version-0 blobs and queues it
// for conversion.
//
// This function may block for a long time until the transaction is processed.
func (q *conversionQueue) convert(tx *types.Transaction) error {
done := make(chan error, 1)
select {
case q.tasks <- &txConvert{tx: tx, done: done}:
return <-done
case <-q.closed:
return errors.New("conversion queue closed")
}
}
// launchBillyConversion starts a conversion task in the background.
func (q *conversionQueue) launchBillyConversion(fn func()) error {
select {
case q.startBilly <- fn:
return nil
case <-q.closed:
return errors.New("conversion queue closed")
}
}
// close terminates the conversion queue.
func (q *conversionQueue) close() {
select {
case <-q.closed:
return
default:
close(q.quit)
<-q.closed
}
}
// run converts a batch of legacy blob txs to the new cell proof format.
func (q *conversionQueue) run(tasks []*txConvert, done chan struct{}, interrupt *atomic.Int32) {
defer close(done)
for _, t := range tasks {
if interrupt != nil && interrupt.Load() != 0 {
t.done <- errors.New("conversion is interrupted")
continue
}
sidecar := t.tx.BlobTxSidecar()
if sidecar == nil {
t.done <- errors.New("tx without sidecar")
continue
}
// Run the conversion, the original sidecar will be mutated in place
start := time.Now()
err := sidecar.ToV1()
t.done <- err
log.Trace("Converted legacy blob tx", "hash", t.tx.Hash(), "err", err, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func (q *conversionQueue) loop() {
defer close(q.closed)
var (
done chan struct{} // Non-nil if background routine is active
interrupt *atomic.Int32 // Flag to signal conversion interruption
// The pending tasks for sidecar conversion. We assume the number of legacy
// blob transactions requiring conversion will not be excessive. However,
// a hard cap is applied as a protective measure.
txTasks []*txConvert
firstBilly = true
)
for {
select {
case t := <-q.tasks:
if len(txTasks) >= maxPendingConversionTasks {
t.done <- errors.New("conversion queue is overloaded")
continue
}
txTasks = append(txTasks, t)
// Launch the background conversion thread if it's idle
if done == nil {
done, interrupt = make(chan struct{}), new(atomic.Int32)
tasks := slices.Clone(txTasks)
txTasks = txTasks[:0]
go q.run(tasks, done, interrupt)
}
case <-done:
done, interrupt = nil, nil
case fn := <-q.startBilly:
q.billyQueue = append(q.billyQueue, fn)
q.runNextBillyTask()
case <-q.billyTaskDone:
if firstBilly {
close(q.anyBillyConversionDone)
firstBilly = false
}
q.runNextBillyTask()
case <-q.quit:
if done != nil {
log.Debug("Waiting for blob proof conversion to exit")
interrupt.Store(1)
<-done
}
if q.billyTaskDone != nil {
log.Debug("Waiting for blobpool billy conversion to exit")
<-q.billyTaskDone
}
return
}
}
}
func (q *conversionQueue) runNextBillyTask() {
if len(q.billyQueue) == 0 {
q.billyTaskDone = nil
return
}
fn := q.billyQueue[0]
q.billyQueue = append(q.billyQueue[:0], q.billyQueue[1:]...)
done := make(chan struct{})
go func() { defer close(done); fn() }()
q.billyTaskDone = done
}

View file

@ -0,0 +1,101 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blobpool
import (
"crypto/ecdsa"
"crypto/sha256"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
// createV1BlobTx creates a blob transaction with version 1 sidecar for testing.
func createV1BlobTx(nonce uint64, key *ecdsa.PrivateKey) *types.Transaction {
blob := &kzg4844.Blob{byte(nonce)}
commitment, _ := kzg4844.BlobToCommitment(blob)
cellProofs, _ := kzg4844.ComputeCellProofs(blob)
blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
Nonce: nonce,
GasTipCap: uint256.NewInt(1),
GasFeeCap: uint256.NewInt(1000),
Gas: 21000,
BlobFeeCap: uint256.NewInt(100),
BlobHashes: []common.Hash{kzg4844.CalcBlobHashV1(sha256.New(), &commitment)},
Value: uint256.NewInt(100),
Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion1, []kzg4844.Blob{*blob}, []kzg4844.Commitment{commitment}, cellProofs),
}
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
func TestConversionQueueBasic(t *testing.T) {
queue := newConversionQueue()
defer queue.close()
key, _ := crypto.GenerateKey()
tx := makeTx(0, 1, 1, 1, key)
if err := queue.convert(tx); err != nil {
t.Fatalf("Expected successful conversion, got error: %v", err)
}
if tx.BlobTxSidecar().Version != types.BlobSidecarVersion1 {
t.Errorf("Expected sidecar version to be %d, got %d", types.BlobSidecarVersion1, tx.BlobTxSidecar().Version)
}
}
func TestConversionQueueV1BlobTx(t *testing.T) {
queue := newConversionQueue()
defer queue.close()
key, _ := crypto.GenerateKey()
tx := createV1BlobTx(0, key)
version := tx.BlobTxSidecar().Version
err := queue.convert(tx)
if err != nil {
t.Fatalf("Expected successful conversion, got error: %v", err)
}
if tx.BlobTxSidecar().Version != version {
t.Errorf("Expected sidecar version to remain %d, got %d", version, tx.BlobTxSidecar().Version)
}
}
func TestConversionQueueClosed(t *testing.T) {
queue := newConversionQueue()
// Close the queue first
queue.close()
key, _ := crypto.GenerateKey()
tx := makeTx(0, 1, 1, 1, key)
err := queue.convert(tx)
if err == nil {
t.Fatal("Expected error when converting on closed queue, got nil")
}
}
func TestConversionQueueDoubleClose(t *testing.T) {
queue := newConversionQueue()
queue.close()
queue.close() // Should not panic
}

View file

@ -20,8 +20,10 @@ import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
)
@ -48,11 +50,21 @@ type limbo struct {
}
// newLimbo opens and indexes a set of limboed blob transactions.
func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) {
func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) {
l := &limbo{
index: make(map[common.Hash]uint64),
groups: make(map[uint64]map[uint64]common.Hash),
}
// Create new slotter for pre-Osaka blob configuration.
slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config))
// See if we need to migrate the limbo after fusaka.
slotter, err := tryMigrate(config, slotter, datadir)
if err != nil {
return nil, err
}
// Index all limboed blobs on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, data []byte) {
@ -60,7 +72,7 @@ func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) {
fails = append(fails, id)
}
}
store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index)
store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, slotter, index)
if err != nil {
return nil, err
}

View file

@ -110,3 +110,13 @@ func (l *lookup) untrack(tx *blobTxMeta) {
}
}
}
// update updates the transaction index. It should only be used in the conversion.
func (l *lookup) update(hash common.Hash, id uint64, size uint64) bool {
meta, exists := l.txIndex[hash]
if !exists {
return false
}
meta.id, meta.size = id, size
return true
}

View file

@ -16,6 +16,49 @@
package blobpool
import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/billy"
)
// tryMigrate checks if the billy needs to be migrated and migrates if needed.
// Returns a slotter that can be used for the database.
func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir string) (billy.SlotSizeFn, error) {
// Check if we need to migrate our blob db to the new slotter.
if config.OsakaTime != nil {
// Open the store using the version slotter to see if any version has been
// written.
var version int
index := func(_ uint64, _ uint32, blob []byte) {
version = max(version, parseSlotterVersion(blob))
}
store, err := billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), index)
if err != nil {
return nil, err
}
store.Close()
// If the version found is less than the currently configured store version,
// perform a migration then write the updated version of the store.
if version < storeVersion {
newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil {
return nil, err
}
store, err = billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), nil)
if err != nil {
return nil, err
}
writeSlotterVersion(store, storeVersion)
store.Close()
}
// Set the slotter to the format now that the Osaka is active.
slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
}
return slotter, nil
}
// newSlotter creates a helper method for the Billy datastore that returns the
// individual shelf sizes used to store transactions in.
//
@ -25,7 +68,7 @@ package blobpool
// The slotter also creates a shelf for 0-blob transactions. Whilst those are not
// allowed in the current protocol, having an empty shelf is not a relevant use
// of resources, but it makes stress testing with junk transactions simpler.
func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) {
func newSlotter(maxBlobsPerTransaction int) billy.SlotSizeFn {
slotsize := uint32(txAvgSize)
slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
@ -36,3 +79,42 @@ func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) {
return slotsize, finished
}
}
// newSlotterEIP7594 creates a different slotter for EIP-7594 transactions.
// EIP-7594 (PeerDAS) changes the average transaction size which means the current
// static 4KB average size is not enough anymore.
// This slotter adds a dynamic overhead component to the slotter, which also
// captures the notion that blob transactions with more blobs are also more likely to
// to have more calldata.
func newSlotterEIP7594(maxBlobsPerTransaction int) billy.SlotSizeFn {
slotsize := uint32(txAvgSize)
slotsize -= uint32(blobSize) + txBlobOverhead // underflows, it's ok, will overflow back in the first return
return func() (size uint32, done bool) {
slotsize += blobSize + txBlobOverhead
finished := slotsize > uint32(maxBlobsPerTransaction)*(blobSize+txBlobOverhead)+txMaxSize
return slotsize, finished
}
}
// newVersionSlotter creates a slotter with a single 8 byte shelf to store
// version metadata in.
func newVersionSlotter() billy.SlotSizeFn {
return func() (size uint32, done bool) {
return 8, true
}
}
// parseSlotterVersion will parse the slotter's version from a given data blob.
func parseSlotterVersion(blob []byte) int {
if len(blob) > 0 {
return int(blob[0])
}
return 0
}
// writeSlotterVersion writes the current slotter version into the store.
func writeSlotterVersion(store billy.Database, version int) {
store.Put([]byte{byte(version)})
}

View file

@ -16,7 +16,9 @@
package blobpool
import "testing"
import (
"testing"
)
// Tests that the slotter creates the expected database shelves.
func TestNewSlotter(t *testing.T) {
@ -58,3 +60,44 @@ func TestNewSlotter(t *testing.T) {
}
}
}
// Tests that the slotter creates the expected database shelves.
func TestNewSlotterEIP7594(t *testing.T) {
// Generate the database shelve sizes
slotter := newSlotterEIP7594(6)
var shelves []uint32
for {
shelf, done := slotter()
shelves = append(shelves, shelf)
if done {
break
}
}
// Compare the database shelves to the expected ones
want := []uint32{
0*blobSize + 0*txBlobOverhead + txAvgSize, // 0 blob + some expected tx infos
1*blobSize + 1*txBlobOverhead + txAvgSize, // 1 blob + some expected tx infos
2*blobSize + 2*txBlobOverhead + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
3*blobSize + 3*txBlobOverhead + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
4*blobSize + 4*txBlobOverhead + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
5*blobSize + 5*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
6*blobSize + 6*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
7*blobSize + 7*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
8*blobSize + 8*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
9*blobSize + 9*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
10*blobSize + 10*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
11*blobSize + 11*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
12*blobSize + 12*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
13*blobSize + 13*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
14*blobSize + 14*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
}
if len(shelves) != len(want) {
t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
}
for i := 0; i < len(shelves) && i < len(want); i++ {
if shelves[i] != want[i] {
t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i])
}
}
}

View file

@ -508,7 +508,7 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
// If only blob transactions are requested, this pool is unsuitable as it
// contains none, don't even bother.
if filter.OnlyBlobTxs {
if filter.BlobTxs {
return nil
}
pool.mu.Lock()

View file

@ -2292,8 +2292,8 @@ func TestSetCodeTransactions(t *testing.T) {
pending: 1,
run: func(name string) {
aa := common.Address{0xaa, 0xaa}
statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...))
statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)})
statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...), tracing.CodeChangeUnspecified)
statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)}, tracing.CodeChangeUnspecified)
// Send gapped transaction, it should be rejected.
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), keyA)); !errors.Is(err, ErrOutOfOrderTxFromDelegated) {
@ -2317,7 +2317,7 @@ func TestSetCodeTransactions(t *testing.T) {
}
// Reset the delegation, avoid leaking state into the other tests
statedb.SetCode(addrA, nil)
statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified)
},
},
{
@ -2583,7 +2583,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) {
}
// Simulate the chain moving
blockchain.statedb.SetNonce(addrA, 1, tracing.NonceChangeAuthorization)
blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address))
blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address), tracing.CodeChangeUnspecified)
<-pool.requestReset(nil, nil)
// Set an authorization for 0x00
auth, _ = types.SignSetCode(keyA, types.SetCodeAuthorization{
@ -2601,7 +2601,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) {
}
// Simulate the chain moving
blockchain.statedb.SetNonce(addrA, 2, tracing.NonceChangeAuthorization)
blockchain.statedb.SetCode(addrA, nil)
blockchain.statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified)
<-pool.requestReset(nil, nil)
// Now send two transactions from addrA
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1000), keyA)); err != nil {

View file

@ -323,15 +323,22 @@ func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transa
if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 {
return false, nil
}
// Old is being replaced, subtract old cost
l.subTotalCost([]*types.Transaction{old})
}
// Add new tx cost to totalcost
cost, overflow := uint256.FromBig(tx.Cost())
if overflow {
return false, nil
}
l.totalcost.Add(l.totalcost, cost)
total, overflow := new(uint256.Int).AddOverflow(l.totalcost, cost)
if overflow {
return false, nil
}
l.totalcost = total
// Old is being replaced, subtract old cost
if old != nil {
l.subTotalCost([]*types.Transaction{old})
}
// Otherwise overwrite the old transaction with the current one
l.txs.Put(tx)

Some files were not shown because too many files have changed in this diff Show more