Compare commits

...

26 commits

Author SHA1 Message Date
rjl493456442
be92f5487e
trie: error out for unexpected key-value pairs preceding the range (#33898)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
2026-02-26 23:00:02 +08:00
Sina M
8a4345611d
build: update ubuntu distros list (#33864)
The`plucky` and `oracular` have reached end of life. That's why
launchpad isn't building them anymore:
https://launchpad.net/~ethereum/+archive/ubuntu/ethereum/+packages.
2026-02-26 13:55:53 +01:00
Marius van der Wijden
f811bfe4fd
core/vm: implement eip-7843: SLOTNUM (#33589)
Implements the slotnum opcode as specified here:
https://eips.ethereum.org/EIPS/eip-7843
2026-02-26 13:53:46 +01:00
Guillaume Ballet
406a852ec8
AGENTS.md: add AGENTS.md (#33890)
Some checks failed
/ Linux Build (push) Has been cancelled
/ Linux Build (arm) (push) Has been cancelled
/ Keeper Build (push) Has been cancelled
/ Windows Build (push) Has been cancelled
/ Docker Image (push) Has been cancelled
Co-authored-by: tellabg <249254436+tellabg@users.noreply.github.com>
Co-authored-by: lightclient <lightclient@protonmail.com>
2026-02-24 23:08:23 -07:00
ANtutov
2a45272408
eth/protocols/eth: fix handshake timeout metrics classification (#33539)
Previously, handshake timeouts were recorded as generic peer errors
instead of timeout errors. waitForHandshake passed a raw
p2p.DiscReadTimeout into markError, but markError classified errors only
via errors.Unwrap(err), which returns nil for non-wrapped errors. As a
result, the timeoutError meter was never incremented and all such
failures fell into the peerError bucket.

This change makes markError switch on the base error, using
errors.Unwrap(err) when available and falling back to the original error
otherwise. With this adjustment, p2p.DiscReadTimeout is correctly mapped
to timeoutError, while existing behaviour for the other wrapped sentinel
errors remains unchanged

---------

Co-authored-by: lightclient <lightclient@protonmail.com>
2026-02-24 21:50:26 -07:00
Fynn
8450e40798
cmd/geth: add inspect trie tool to analysis trie storage (#28892)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
This pr adds a tool names `inpsect-trie`, aimed to analyze the mpt and
its node storage more efficiently.

## Example
 ./geth db inspect-trie --datadir server/data-seed/ latest 4000

## Result

- MPT shape
- Account Trie 
- Top N Storage Trie
```
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      0       |     16      |      0       |
|   -   |   2   |      76      |     32      |      74      |
|   -   |   3   |      66      |      1      |      66      |
|   -   |   4   |      2       |      0      |      2       |
| Total |  144  |      50      |     142     |
+-------+-------+--------------+-------------+--------------+
AccountTrie
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      0       |     16      |      0       |
|   -   |   2   |     108      |     84      |     104      |
|   -   |   3   |     195      |      5      |     195      |
|   -   |   4   |      10      |      0      |      10      |
| Total |  313  |     106      |     309     |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0xc874e65ccffb133d9db4ff637e62532ef6ecef3223845d02f522c55786782911
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      0       |     16      |      0       |
|   -   |   2   |      57      |     14      |      56      |
|   -   |   3   |      33      |      0      |      33      |
| Total |  90   |      31      |     89      |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0x1d7dcb6a0ce5227c5379fc5b0e004561d7833b063355f69bfea3178f08fbaab4
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      5       |      8      |      5       |
|   -   |   2   |      16      |      1      |      16      |
|   -   |   3   |      2       |      0      |      2       |
| Total |  23   |      10      |     23      |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0xaa8a4783ebbb3bec45d3e804b3c59bfd486edfa39cbeda1d42bf86c08a0ebc0f
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      9       |      3      |      9       |
|   -   |   2   |      7       |      1      |      7       |
|   -   |   3   |      2       |      0      |      2       |
| Total |  18   |      5       |     18      |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0x9d2804d0562391d7cfcfaf0013f0352e176a94403a58577ebf82168a21514441
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      6       |      4      |      6       |
|   -   |   2   |      8       |      0      |      8       |
| Total |  14   |      5       |     14      |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0x17e3eb95d0e6e92b42c0b3e95c6e75080c9fcd83e706344712e9587375de96e1
+-------+-------+--------------+-------------+--------------+
|   -   | LEVEL | SHORTNODECNT | FULLNODECNT | VALUENODECNT |
+-------+-------+--------------+-------------+--------------+
|   -   |   0   |      0       |      1      |      0       |
|   -   |   1   |      5       |      3      |      5       |
|   -   |   2   |      7       |      0      |      7       |
| Total |  12   |      4       |     12      |
+-------+-------+--------------+-------------+--------------+
ContractTrie-0xc017ca90c8aa37693c38f80436bb15bde46d7b30a503aa808cb7814127468a44
Contract Trie, total trie num: 142, ShortNodeCnt: 620, FullNodeCnt: 204, ValueNodeCnt: 615
```

---------

Co-authored-by: lightclient <lightclient@protonmail.com>
Co-authored-by: MariusVanDerWijden <m.vanderwijden@live.de>
2026-02-24 10:56:00 -07:00
cui
9ecb6c4ae6
core: reduce alloc (#33576)
Some checks are pending
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Linux Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
inside tx.GasPrice()/GasFeeCap()/GasTipCap() already new a big.Int.  
bench result:  
```
goos: darwin
goarch: arm64
pkg: github.com/ethereum/go-ethereum/core
cpu: Apple M4
                        │   old.txt   │               new.txt               │
                        │   sec/op    │   sec/op     vs base                │
TransactionToMessage-10   240.1n ± 7%   175.1n ± 7%  -27.09% (p=0.000 n=10)

                        │  old.txt   │              new.txt               │
                        │    B/op    │    B/op     vs base                │
TransactionToMessage-10   544.0 ± 0%   424.0 ± 0%  -22.06% (p=0.000 n=10)

                        │  old.txt   │              new.txt               │
                        │ allocs/op  │ allocs/op   vs base                │
TransactionToMessage-10   17.00 ± 0%   11.00 ± 0%  -35.29% (p=0.000 n=10)
```
benchmark code:  

```
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package core

import (
	"math/big"
	"testing"

	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/core/types"
	"github.com/ethereum/go-ethereum/crypto"
	"github.com/ethereum/go-ethereum/params"
)

// BenchmarkTransactionToMessage benchmarks the TransactionToMessage function.
func BenchmarkTransactionToMessage(b *testing.B) {
	key, _ := crypto.GenerateKey()
	signer := types.LatestSigner(params.TestChainConfig)
	to := common.HexToAddress("0x000000000000000000000000000000000000dead")
	
	// Create a DynamicFeeTx transaction
	txdata := &types.DynamicFeeTx{
		ChainID:   big.NewInt(1),
		Nonce:     42,
		GasTipCap: big.NewInt(1000000000),  // 1 gwei
		GasFeeCap: big.NewInt(2000000000),  // 2 gwei
		Gas:       21000,
		To:        &to,
		Value:     big.NewInt(1000000000000000000), // 1 ether
		Data:      []byte{0x12, 0x34, 0x56, 0x78},
		AccessList: types.AccessList{
			types.AccessTuple{
				Address:     common.HexToAddress("0x0000000000000000000000000000000000000001"),
				StorageKeys: []common.Hash{
					common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
				},
			},
		},
	}
	tx, _ := types.SignNewTx(key, signer, txdata)
	baseFee := big.NewInt(1500000000) // 1.5 gwei

	b.ResetTimer()
	b.ReportAllocs()
	for i := 0; i < b.N; i++ {
		_, err := TransactionToMessage(tx, signer, baseFee)
		if err != nil {
			b.Fatal(err)
		}
	}
}
l
```
2026-02-24 07:40:01 -07:00
rjl493456442
e636e4e3c1
core/state: track slot reads for empty storage (#33743)
From the https://eips.ethereum.org/EIPS/eip-7928

> SELFDESTRUCT (in-transaction): Accounts destroyed within a transaction
   MUST be included in AccountChanges without nonce or code changes. 
   However, if the account had a positive balance pre-transaction, the
   balance change to zero MUST be recorded. Storage keys within the self-destructed
   contracts that were modified or read MUST be included as a storage_reads
   entry.

The storage read against the empty contract (zero storage) should also
be recorded in the BAL's readlist.
2026-02-24 21:57:50 +08:00
rjl493456442
cbf3d8fed2
core/vm: touch precompile object with Amsterdam enabled (#33742)
https://eips.ethereum.org/EIPS/eip-7928 spec:

> Precompiled contracts: Precompiles MUST be included when accessed. 
   If a precompile receives value, it is recorded with a balance change.
   Otherwise, it is included with empty change lists.

The precompiled contracts are not explicitly touched when they are
invoked since Amsterdam fork.
2026-02-24 21:55:10 +08:00
rjl493456442
199ac16e07
core/types/bal: change code change type to list (#33774)
To align with the latest spec of EIP-7928:

```
# CodeChange: [block_access_index, new_code]
CodeChange = [BlockAccessIndex, Bytecode]
```
2026-02-24 21:53:20 +08:00
IONode Online
01083736c8
core/txpool/blobpool: remove unused adds slice in Add() (#33887) 2026-02-24 20:24:16 +08:00
Csaba Kiraly
59ad40e562
eth: check for tx on chain as well (#33607)
The fetcher should not fetch transactions that are already on chain.
Until now we were only checking in the txpool, but that does not have
the old transaction. This was leading to extra fetches of transactions
that were announced by a peer but are already on chain.

Here we extend the check to the chain as well.
2026-02-24 11:21:03 +01:00
CPerezz
c2e1785a48
eth/protocols/snap: restore peers to idle pool on request revert (#33790)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
All five `revert*Request` functions (account, bytecode, storage,
trienode heal, bytecode heal) remove the request from the tracked set
but never restore the peer to its corresponding idle pool. When a
request times out and no response arrives, the peer is permanently lost
from the idle pool, preventing new work from being assigned to it.

In normal operation mode (snap-sync full state) this bug is masked by
pivot movement (which resets idle pools via new Sync() cycles every ~15
minutes) and peer churn (reconnections re-add peers via Register()).
However in scenarios like the one I have running my (partial-stateful
node)[https://github.com/ethereum/go-ethereum/pull/33764] with
long-running sync cycles and few peers, all peers can eventually leak
out of the idle pools, stalling sync entirely.

Fix: after deleting from the request map, restore the peer to its idle
pool if it is still registered (guards against the peer-drop path where
Unregister already removed the peer). This mirrors the pattern used in
all five On* response handlers.


This only seems to manifest in peer-thirstly scenarios as where I find
myself when testing snapsync for the partial-statefull node).
Still, thought was at least good to raise this point. Unsure if required
to discuss or not
2026-02-24 09:14:11 +08:00
Nakanishi Hiro
82fad31540
internal/ethapi: add eth_getStorageValues method (#32591)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
Implements the new eth_getStorageValues method. It returns storage
values for a list of contracts.

Spec: https://github.com/ethereum/execution-apis/pull/756

---------

Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
2026-02-23 20:47:30 +01:00
vickkkkkyy
1625064c68
internal/ethapi: include AuthorizationList in gas estimation (#33849)
Fixes an issue where AuthorizationList wasn't copied over when
estimating gas for a user-provided transaction.
2026-02-23 18:07:26 +01:00
Marius van der Wijden
1d1a094d51
beacon/blsync: ignore beacon syncer reorging errors (#33628)
Downgrades beacon syncer reorging from Error to Debug
closes https://github.com/ethereum/go-ethereum/issues/29916
2026-02-23 16:02:23 +01:00
Marius van der Wijden
e40aa46e88
eth/catalyst: implement testing_buildBlockV1 (#33656)
implements
https://github.com/ethereum/execution-apis/pull/710/changes#r2712256529

---------

Co-authored-by: Felix Lange <fjl@twurst.com>
2026-02-23 15:56:31 +01:00
Csaba Kiraly
d3dd48e59d
metrics: allow changing influxdb interval (#33767)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
The PR exposes the InfuxDB reporting interval as a CLI parameter, which
was previously fixed 10s. Default is still kept at 10s.
Note that decreasing the interval comes with notable extra traffic and
load on InfluxDB.
2026-02-23 14:27:25 +01:00
Felix Lange
00cbd2e6f4
p2p/discover/v5wire: use Whoareyou.ChallengeData instead of storing encoded packet (#31547)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
This changes the challenge resend logic again to use the existing
`ChallengeData` field of `v5wire.Whoareyou` instead of storing a second
copy of the packet in `Whoareyou.Encoded`. It's more correct this way
since `ChallengeData` is supposed to be the data that is used by the ID
verification procedure.

Also adapts the cross-client test to verify this behavior.

Follow-up to #31543
2026-02-22 21:58:47 +01:00
Felix Lange
453d0f9299
build: upgrade to golangci-lint v2.10.1 (#33875)
Some checks are pending
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Linux Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
2026-02-21 20:53:02 +01:00
Felix Lange
6d865ccd30
build: upgrade -dlgo version to 1.25.7 (#33874) 2026-02-21 20:52:43 +01:00
rjl493456442
54f72c796f
core/rawdb: revert "check pruning tail in HasBody and HasReceipts" (#33865)
Some checks failed
/ Linux Build (push) Has been cancelled
/ Linux Build (arm) (push) Has been cancelled
/ Keeper Build (push) Has been cancelled
/ Windows Build (push) Has been cancelled
/ Docker Image (push) Has been cancelled
Reverts ethereum/go-ethereum#33747.

This change suffers an unexpected issue during the sync with
`history.chain=postmerge`.
2026-02-19 11:43:44 +01:00
Sina M
2a62df3815
.github: fix actions 32bit test (#33866)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
2026-02-18 18:28:53 +01:00
rjl493456442
01fe1d716c
core/vm: disable the value transfer in syscall (#33741)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
In src/ethereum/forks/amsterdam/vm/interpreter.py:299-304, the caller
address is
only tracked for block level accessList when there's a value transfer:

```python
if message.should_transfer_value and message.value != 0:  
    # Track value transfer  
    sender_balance = get_account(state, message.caller).balance  
    recipient_balance = get_account(state, message.current_target).balance  

    track_address(message.state_changes, message.caller)  # Line 304
```

Since system transactions have should_transfer_value=False and value=0, 
this condition is never met, so the caller (SYSTEM_ADDRESS) is not
tracked.

This condition is applied for the syscall in the geth implementation,
aligning with the spec of EIP7928.

---------

Co-authored-by: Felix Lange <fjl@twurst.com>
2026-02-18 08:40:23 +08:00
spencer
3eed0580d4
cmd/evm: add --opcode.count flag to t8n (#33800)
Adds `--opcode.count=<file>` flag to `evm t8n` that writes per-opcode
execution frequency counts to a JSON file (relative to
`--output.basedir`).

---------

Co-authored-by: MariusVanDerWijden <m.vanderwijden@live.de>
Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
2026-02-17 20:42:53 +01:00
Felix Lange
1054276906 version: begin v1.17.1 release cycle
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run
2026-02-17 17:17:00 +01:00
98 changed files with 2949 additions and 391 deletions

View file

@ -69,8 +69,8 @@ jobs:
- name: Install cross toolchain - name: Install cross toolchain
run: | run: |
apt-get update sudo apt-get update
apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib sudo apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- name: Build - name: Build
run: go run build/ci.go test -arch 386 -short -p 8 run: go run build/ci.go test -arch 386 -short -p 8

98
AGENTS.md Normal file
View file

@ -0,0 +1,98 @@
# AGENTS
## Guidelines
- **Keep changes minimal and focused.** Only modify code directly related to the task at hand. Do not refactor unrelated code, rename existing variables or functions for style, or bundle unrelated fixes into the same commit or PR.
- **Do not add, remove, or update dependencies** unless the task explicitly requires it.
## Pre-Commit Checklist
Before every commit, run **all** of the following checks and ensure they pass:
### 1. Formatting
Before committing, always run `gofmt` and `goimports` on all modified files:
```sh
gofmt -w <modified files>
goimports -w <modified files>
```
### 2. Build All Commands
Verify that all tools compile successfully:
```sh
make all
```
This builds all executables under `cmd/`, including `keeper` which has special build requirements.
### 3. Tests
While iterating during development, use `-short` for faster feedback:
```sh
go run ./build/ci.go test -short
```
Before committing, run the full test suite **without** `-short` to ensure all tests pass, including the Ethereum execution-spec tests and all state/block test permutations:
```sh
go run ./build/ci.go test
```
### 4. Linting
```sh
go run ./build/ci.go lint
```
This runs additional style checks. Fix any issues before committing.
### 5. Generated Code
```sh
go run ./build/ci.go check_generate
```
Ensures that all generated files (e.g., `gen_*.go`) are up to date. If this fails, first install the required code generators by running `make devtools`, then run the appropriate `go generate` commands and include the updated files in your commit.
### 6. Dependency Hygiene
```sh
go run ./build/ci.go check_baddeps
```
Verifies that no forbidden dependencies have been introduced.
## Commit Message Format
Commit messages must be prefixed with the package(s) they modify, followed by a short lowercase description:
```
<package(s)>: description
```
Examples:
- `core/vm: fix stack overflow in PUSH instruction`
- `eth, rpc: make trace configs optional`
- `cmd/geth: add new flag for sync mode`
Use comma-separated package names when multiple areas are affected. Keep the description concise.
## Pull Request Title Format
PR titles follow the same convention as commit messages:
```
<list of modified paths>: description
```
Examples:
- `core/vm: fix stack overflow in PUSH instruction`
- `core, eth: add arena allocator support`
- `cmd/geth, internal/ethapi: refactor transaction args`
- `trie/archiver: streaming subtree archival to fix OOM`
Use the top-level package paths, comma-separated if multiple areas are affected. Only mention the directories with functional changes, interface changes that trickle all over the codebase should not generate an exhaustive list. The description should be a short, lowercase summary of the change.

View file

@ -87,6 +87,10 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil { if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil {
log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status) log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status)
} else { } else {
if err.Error() == "beacon syncer reorging" {
log.Debug("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
continue // ignore beacon syncer reorging errors, this error can occur if the blsync is skipping a block
}
log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err) log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
} }
} }

View file

@ -21,6 +21,7 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var enc PayloadAttributes var enc PayloadAttributes
enc.Timestamp = hexutil.Uint64(p.Timestamp) enc.Timestamp = hexutil.Uint64(p.Timestamp)
@ -28,6 +29,7 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
enc.Withdrawals = p.Withdrawals enc.Withdrawals = p.Withdrawals
enc.BeaconRoot = p.BeaconRoot enc.BeaconRoot = p.BeaconRoot
enc.SlotNumber = (*hexutil.Uint64)(p.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -39,6 +41,7 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var dec PayloadAttributes var dec PayloadAttributes
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -62,5 +65,8 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
if dec.BeaconRoot != nil { if dec.BeaconRoot != nil {
p.BeaconRoot = dec.BeaconRoot p.BeaconRoot = dec.BeaconRoot
} }
if dec.SlotNumber != nil {
p.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

View file

@ -34,6 +34,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var enc ExecutableData var enc ExecutableData
enc.ParentHash = e.ParentHash enc.ParentHash = e.ParentHash
@ -58,6 +59,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.SlotNumber = (*hexutil.Uint64)(e.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -81,6 +83,7 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
} }
var dec ExecutableData var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -154,5 +157,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil { if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
} }
if dec.SlotNumber != nil {
e.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

View file

@ -50,6 +50,13 @@ var (
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new // ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
// fields: blobGasUsed and excessBlobGas. // fields: blobGasUsed and excessBlobGas.
PayloadV3 PayloadVersion = 0x3 PayloadV3 PayloadVersion = 0x3
// PayloadV4 is the identifier of ExecutionPayloadV4 introduced in amsterdam fork.
//
// https://github.com/ethereum/execution-apis/blob/main/src/engine/amsterdam.md#executionpayloadv4
// ExecutionPayloadV4 has the syntax of ExecutionPayloadV3 and appends the new
// field slotNumber.
PayloadV4 PayloadVersion = 0x4
) )
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
@ -62,11 +69,13 @@ type PayloadAttributes struct {
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *uint64 `json:"slotNumber"`
} }
// JSON type overrides for PayloadAttributes. // JSON type overrides for PayloadAttributes.
type payloadAttributesMarshaling struct { type payloadAttributesMarshaling struct {
Timestamp hexutil.Uint64 Timestamp hexutil.Uint64
SlotNumber *hexutil.Uint64
} }
//go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go //go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go
@ -90,6 +99,7 @@ type ExecutableData struct {
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"` BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"` ExcessBlobGas *uint64 `json:"excessBlobGas"`
SlotNumber *uint64 `json:"slotNumber"`
} }
// JSON type overrides for executableData. // JSON type overrides for executableData.
@ -104,6 +114,7 @@ type executableDataMarshaling struct {
Transactions []hexutil.Bytes Transactions []hexutil.Bytes
BlobGasUsed *hexutil.Uint64 BlobGasUsed *hexutil.Uint64
ExcessBlobGas *hexutil.Uint64 ExcessBlobGas *hexutil.Uint64
SlotNumber *hexutil.Uint64
} }
// StatelessPayloadStatusV1 is the result of a stateless payload execution. // StatelessPayloadStatusV1 is the result of a stateless payload execution.
@ -213,7 +224,7 @@ func encodeTransactions(txs []*types.Transaction) [][]byte {
return enc return enc
} }
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) { func DecodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
var txs = make([]*types.Transaction, len(enc)) var txs = make([]*types.Transaction, len(enc))
for i, encTx := range enc { for i, encTx := range enc {
var tx types.Transaction var tx types.Transaction
@ -251,7 +262,7 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
// for stateless execution, so it skips checking if the executable data hashes to // for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given). // the requested hash (stateless has to *compute* the root hash, it's not given).
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) { func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions) txs, err := DecodeTransactions(data.Transactions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -313,6 +324,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
BlobGasUsed: data.BlobGasUsed, BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot, ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash, RequestsHash: requestsHash,
SlotNumber: data.SlotNumber,
} }
return types.NewBlockWithHeader(header). return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}), WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
@ -340,6 +352,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
Withdrawals: block.Withdrawals(), Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(), BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(), ExcessBlobGas: block.ExcessBlobGas(),
SlotNumber: block.SlotNumber(),
} }
// Add blobs. // Add blobs.

View file

@ -5,81 +5,102 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0 # https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
# version:golang 1.25.1 # version:golang 1.25.7
# https://go.dev/dl/ # https://go.dev/dl/
d010c109cee94d80efe681eab46bdea491ac906bf46583c32e9f0dbb0bd1a594 go1.25.1.src.tar.gz 178f2832820274b43e177d32f06a3ebb0129e427dd20a5e4c88df2c1763cf10a go1.25.7.src.tar.gz
1d622468f767a1b9fe1e1e67bd6ce6744d04e0c68712adc689748bbeccb126bb go1.25.1.darwin-amd64.tar.gz 81bf2a1f20633f62d55d826d82dde3b0570cf1408a91e15781b266037299285b go1.25.7.aix-ppc64.tar.gz
68deebb214f39d542e518ebb0598a406ab1b5a22bba8ec9ade9f55fb4dd94a6c go1.25.1.darwin-arm64.tar.gz bf5050a2152f4053837b886e8d9640c829dbacbc3370f913351eb0904cb706f5 go1.25.7.darwin-amd64.tar.gz
d03cdcbc9bd8baf5cf028de390478e9e2b3e4d0afe5a6582dedc19bfe6a263b2 go1.25.1.linux-386.tar.gz ff18369ffad05c57d5bed888b660b31385f3c913670a83ef557cdfd98ea9ae1b go1.25.7.darwin-arm64.tar.gz
7716a0d940a0f6ae8e1f3b3f4f36299dc53e31b16840dbd171254312c41ca12e go1.25.1.linux-amd64.tar.gz c5dccd7f192dd7b305dc209fb316ac1917776d74bd8e4d532ef2772f305bf42a go1.25.7.dragonfly-amd64.tar.gz
65a3e34fb2126f55b34e1edfc709121660e1be2dee6bdf405fc399a63a95a87d go1.25.1.linux-arm64.tar.gz a2de97c8ac74bf64b0ae73fe9d379e61af530e061bc7f8f825044172ffe61a8b go1.25.7.freebsd-386.tar.gz
eb949be683e82a99e9861dafd7057e31ea40b161eae6c4cd18fdc0e8c4ae6225 go1.25.1.linux-armv6l.tar.gz 055f9e138787dcafa81eb0314c8ff70c6dd0f6dba1e8a6957fef5d5efd1ab8fd go1.25.7.freebsd-amd64.tar.gz
be13d5479b8c75438f2efcaa8c191fba3af684b3228abc9c99c7aa8502f34424 go1.25.1.windows-386.zip 60e7f7a7c990f0b9539ac8ed668155746997d404643a4eecd47b3dee1b7e710b go1.25.7.freebsd-arm.tar.gz
4a974de310e7ee1d523d2fcedb114ba5fa75408c98eb3652023e55ccf3fa7cab go1.25.1.windows-amd64.zip 631e03d5fd4c526e2f499154d8c6bf4cb081afb2fff171c428722afc9539d53a go1.25.7.freebsd-arm64.tar.gz
45ab4290adbd6ee9e7f18f0d57eaa9008fdbef590882778ed93eac3c8cca06c5 go1.25.1.aix-ppc64.tar.gz 8a264fd685823808140672812e3ad9c43f6ad59444c0dc14cdd3a1351839ddd5 go1.25.7.freebsd-riscv64.tar.gz
2e3c1549bed3124763774d648f291ac42611232f48320ebbd23517c909c09b81 go1.25.1.dragonfly-amd64.tar.gz 57c672447d906a1bcab98f2b11492d54521a791aacbb4994a25169e59cbe289a go1.25.7.illumos-amd64.tar.gz
dc0198dd4ec520e13f26798def8750544edf6448d8e9c43fd2a814e4885932af go1.25.1.freebsd-386.tar.gz 2866517e9ca81e6a2e85a930e9b11bc8a05cfeb2fc6dc6cb2765e7fb3c14b715 go1.25.7.linux-386.tar.gz
c4f1a7e7b258406e6f3b677ecdbd97bbb23ff9c0d44be4eb238a07d360f69ac8 go1.25.1.freebsd-amd64.tar.gz 12e6d6a191091ae27dc31f6efc630e3a3b8ba409baf3573d955b196fdf086005 go1.25.7.linux-amd64.tar.gz
7772fc5ff71ed39297ec0c1599fc54e399642c9b848eac989601040923b0de9c go1.25.1.freebsd-arm.tar.gz ba611a53534135a81067240eff9508cd7e256c560edd5d8c2fef54f083c07129 go1.25.7.linux-arm64.tar.gz
5bb011d5d5b6218b12189f07aa0be618ab2002662fff1ca40afba7389735c207 go1.25.1.freebsd-arm64.tar.gz 1ba07e0eb86b839e72467f4b5c7a5597d07f30bcf5563c951410454f7cda5266 go1.25.7.linux-armv6l.tar.gz
ccac716240cb049bebfafcb7eebc3758512178a4c51fc26da9cc032035d850c8 go1.25.1.freebsd-riscv64.tar.gz 775753fc5952a334c415f08768df2f0b73a3228a16e8f5f63d545daacb4e3357 go1.25.7.linux-loong64.tar.gz
cc53910ffb9fcfdd988a9fa25b5423bae1cfa01b19616be646700e1f5453b466 go1.25.1.illumos-amd64.tar.gz 1a023bb367c5fbb4c637a2f6dc23ff17c6591ad929ce16ea88c74d857153b307 go1.25.7.linux-mips.tar.gz
efe809f923bcedab44bf7be2b3af8d182b512b1bf9c07d302e0c45d26c8f56f3 go1.25.1.linux-loong64.tar.gz a8e97223d8aa6fdfd45f132a4784d2f536bbac5f3d63a24b63d33b6bfe1549af go1.25.7.linux-mips64.tar.gz
c0de33679f6ed68991dc42dc4a602e74a666e3e166c1748ee1b5d1a7ea2ffbb2 go1.25.1.linux-mips.tar.gz eb9edb6223330d5e20275667c65dea076b064c08e595fe4eba5d7d6055cfaccf go1.25.7.linux-mips64le.tar.gz
c270f7b0c0bdfbcd54fef4481227c40d41bb518f9ae38ee930870f04a0a6a589 go1.25.1.linux-mips64.tar.gz 9c1e693552a5f9bb9e0012d1c5e01456ecefbc59bef53a77305222ce10aba368 go1.25.7.linux-mipsle.tar.gz
80be871ba9c944f34d1868cdf5047e1cf2e1289fe08cdb90e2453d2f0d6965ae go1.25.1.linux-mips64le.tar.gz 28a788798e7329acbbc0ac2caa5e4368b1e5ede646cc24429c991214cfb45c63 go1.25.7.linux-ppc64.tar.gz
9f09defa9bb22ebf2cde76162f40958564e57ce5c2b3649bc063bebcbc9294c1 go1.25.1.linux-mipsle.tar.gz 42124c0edc92464e2b37b2d7fcd3658f0c47ebd6a098732415a522be8cb88e3f go1.25.7.linux-ppc64le.tar.gz
2c76b7d278c1d43ad19d478ad3f0f05e7b782b64b90870701b314fa48b5f43c6 go1.25.1.linux-ppc64.tar.gz 88d59c6893c8425875d6eef8e3434bc2fa2552e5ad4c058c6cd8cd710a0301c8 go1.25.7.linux-riscv64.tar.gz
8b0c8d3ee5b1b5c28b6bd63dc4438792012e01d03b4bf7a61d985c87edab7d1f go1.25.1.linux-ppc64le.tar.gz c6b77facf666dc68195ecab05dbf0ebb4e755b2a8b7734c759880557f1c29b0c go1.25.7.linux-s390x.tar.gz
22fe934a9d0c9c57275716c55b92d46ebd887cec3177c9140705efa9f84ba1e2 go1.25.1.linux-riscv64.tar.gz f14c184d9ade0ee04c7735d4071257b90896ecbde1b32adae84135f055e6399b go1.25.7.netbsd-386.tar.gz
9cfe517ba423f59f3738ca5c3d907c103253cffbbcc2987142f79c5de8c1bf93 go1.25.1.linux-s390x.tar.gz 7e7389e404dca1088c31f0fc07f1dd60891d7182bcd621469c14f7e79eceb3ff go1.25.7.netbsd-amd64.tar.gz
6af8a08353e76205d5b743dd7a3f0126684f96f62be0a31b75daf9837e512c46 go1.25.1.netbsd-386.tar.gz 70388bb3ef2f03dbf1357e9056bd09034a67e018262557354f8cf549766b3f9d go1.25.7.netbsd-arm.tar.gz
e5d534ff362edb1bd8c8e10892b6a027c4c1482454245d1529167676498684c7 go1.25.1.netbsd-amd64.tar.gz 8c1cda9d25bfc9b18d24d5f95fc23949dd3ff99fa408a6cfa40e2cf12b07e362 go1.25.7.netbsd-arm64.tar.gz
88bcf39254fdcea6a199c1c27d787831b652427ce60851ae9e41a3d7eb477f45 go1.25.1.netbsd-arm.tar.gz 42f0d1bfbe39b8401cccb84dd66b30795b97bfc9620dfdc17c5cd4fcf6495cb0 go1.25.7.openbsd-386.tar.gz
d7c2eabe1d04ee47bcaea2816fdd90dbd25d90d4dfa756faa9786c788e4f3a4e go1.25.1.netbsd-arm64.tar.gz e514879c0a28bc32123cd52c4c093de912477fe83f36a6d07517d066ef55391a go1.25.7.openbsd-amd64.tar.gz
14a2845977eb4dde11d929858c437a043467c427db87899935e90cee04a38d72 go1.25.1.openbsd-386.tar.gz 8cd22530695a0218232bf7efea8f162df1697a3106942ac4129b8c3de39ce4ef go1.25.7.openbsd-arm.tar.gz
d27ac54b38a13a09c81e67c82ac70d387037341c85c3399291c73e13e83fdd8c go1.25.1.openbsd-amd64.tar.gz 938720f6ebc0d1c53d7840321d3a31f29fd02496e84a6538f442a9311dc1cc9a go1.25.7.openbsd-arm64.tar.gz
0f4ab5f02500afa4befd51fed1e8b45e4d07ca050f641cc3acc76eaa4027b2c3 go1.25.1.openbsd-arm.tar.gz a4c378b73b98f89a3596c2ef51aabbb28783d9ca29f7e317d8ca07939660ce6f go1.25.7.openbsd-ppc64.tar.gz
d46c3bd156843656f7f3cb0dec27ea51cd926ec3f7b80744bf8156e67c1c812f go1.25.1.openbsd-arm64.tar.gz 937b58734fbeaa8c7941a0e4285e7e84b7885396e8d11c23f9ab1a8ff10ff20e go1.25.7.openbsd-riscv64.tar.gz
c550514c67f22e409be10e40eace761e2e43069f4ef086ae6e60aac736c2b679 go1.25.1.openbsd-ppc64.tar.gz 61a093c8c5244916f25740316386bb9f141545dcf01b06a79d1c78ece488403e go1.25.7.plan9-386.tar.gz
8a09a8714a2556eb13fc1f10b7ce2553fcea4971e3330fc3be0efd24aab45734 go1.25.1.openbsd-riscv64.tar.gz 7fc8f6689c9de8ccb7689d2278035fa83c2d601409101840df6ddfe09ba58699 go1.25.7.plan9-amd64.tar.gz
b0e1fefaf0c7abd71f139a54eee9767944aff5f0bc9d69c968234804884e552f go1.25.1.plan9-386.tar.gz 9661dff8eaeeb62f1c3aadbc5ff189a2e6744e1ec885e32dbcb438f58a34def5 go1.25.7.plan9-arm.tar.gz
e94732c94f149690aa0ab11c26090577211b4a988137cb2c03ec0b54e750402e go1.25.1.plan9-amd64.tar.gz 28ecba0e1d7950c8b29a4a04962dd49c3bf5221f55a44f17d98f369f82859cf4 go1.25.7.solaris-amd64.tar.gz
7eb80e9de1e817d9089a54e8c7c5c8d8ed9e5fb4d4a012fc0f18fc422a484f0c go1.25.1.plan9-arm.tar.gz baa6b488291801642fa620026169e38bec2da2ac187cd3ae2145721cf826bbc3 go1.25.7.windows-386.zip
1261dfad7c4953c0ab90381bc1242dc54e394db7485c59349428d532b2273343 go1.25.1.solaris-amd64.tar.gz c75e5f4ff62d085cc0017be3ad19d5536f46825fa05db06ec468941f847e3228 go1.25.7.windows-amd64.zip
04bc3c078e9e904c4d58d6ac2532a5bdd402bd36a9ff0b5949b3c5e6006a05ee go1.25.1.windows-arm64.zip 807033f85931bc4a589ca8497535dcbeb1f30d506e47fa200f5f04c4a71c3d9f go1.25.7.windows-arm64.zip
# version:golangci 2.4.0 # version:golangci 2.10.1
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v2.4.0/ # https://github.com/golangci/golangci-lint/releases/download/v2.10.1
7904ce63f79db44934939cf7a063086ea0ea98e9b19eba0a9d52ccdd0d21951c golangci-lint-2.4.0-darwin-amd64.tar.gz 66fb0da81b8033b477f97eea420d4b46b230ca172b8bb87c6610109f3772b6b6 golangci-lint-2.10.1-darwin-amd64.tar.gz
cd4dd53fa09b6646baff5fd22b8c64d91db02c21c7496df27992d75d34feec59 golangci-lint-2.4.0-darwin-arm64.tar.gz 03bfadf67e52b441b7ec21305e501c717df93c959836d66c7f97312654acb297 golangci-lint-2.10.1-darwin-arm64.tar.gz
d58f426ebe14cc257e81562b4bf37a488ffb4ffbbb3ec73041eb3b38bb25c0e1 golangci-lint-2.4.0-freebsd-386.tar.gz c9a44658ccc8f7b8dbbd4ae6020ba91c1a5d3987f4d91ced0f7d2bea013e57ca golangci-lint-2.10.1-freebsd-386.tar.gz
6ec4a6177fc6c0dd541fbcb3a7612845266d020d35cc6fa92959220cdf64ca39 golangci-lint-2.4.0-freebsd-amd64.tar.gz a513c5cb4e0f5bd5767001af9d5e97e7868cfc2d9c46739a4df93e713cfb24af golangci-lint-2.10.1-freebsd-amd64.tar.gz
4d473e3e71c01feaa915a0604fb35758b41284fb976cdeac3f842118d9ee7e17 golangci-lint-2.4.0-freebsd-armv6.tar.gz 2ef38eefc4b5cee2febacb75a30579526e5656c16338a921d80e59a8e87d4425 golangci-lint-2.10.1-freebsd-arm64.tar.gz
58727746c6530801a3f9a702a5945556a5eb7e88809222536dd9f9d54cafaeff golangci-lint-2.4.0-freebsd-armv7.tar.gz 8fea6766318b4829e766bbe325f10191d75297dcc44ae35bf374816037878e38 golangci-lint-2.10.1-freebsd-armv6.tar.gz
fbf28c662760e24c32f82f8d16dffdb4a82de7726a52ba1fad94f890c22997ea golangci-lint-2.4.0-illumos-amd64.tar.gz 30b629870574d6254f3e8804e5a74b34f98e1263c9d55465830d739c88b862ed golangci-lint-2.10.1-freebsd-armv7.tar.gz
a15a000a8981ef665e971e0f67e2acda9066a9e37a59344393b7351d8fb49c81 golangci-lint-2.4.0-linux-386.tar.gz c0db839f866ce80b1b6c96167aa101cfe50d9c936f42d942a3c1cbdc1801af68 golangci-lint-2.10.1-illumos-amd64.tar.gz
fae792524c04424c0ac369f5b8076f04b45cf29fc945a370e55d369a8dc11840 golangci-lint-2.4.0-linux-amd64.tar.gz 280eb56636e9175f671cd7b755d7d67f628ae2ed00a164d1e443c43c112034e5 golangci-lint-2.10.1-linux-386.deb
70ac11f55b80ec78fd3a879249cc9255121b8dfd7f7ed4fc46ed137f4abf17e7 golangci-lint-2.4.0-linux-arm64.tar.gz 065a7d99da61dc7dfbfef2e2d7053dd3fa6672598f2747117aa4bb5f45e7df7f golangci-lint-2.10.1-linux-386.rpm
4acdc40e5cebe99e4e7ced358a05b2e71789f409b41cb4f39bbb86ccfa14b1dc golangci-lint-2.4.0-linux-armv6.tar.gz a55918c03bb413b2662287653ab2ae2fef4e37428b247dad6348724adde9d770 golangci-lint-2.10.1-linux-386.tar.gz
2a68749568fa22b4a97cb88dbea655595563c795076536aa6c087f7968784bf3 golangci-lint-2.4.0-linux-armv7.tar.gz 8aa9b3aa14f39745eeb7fc7ff50bcac683e785397d1e4bc9afd2184b12c4ce86 golangci-lint-2.10.1-linux-amd64.deb
9e3369afb023711036dcb0b4f45c9fe2792af962fa1df050c9f6ac101a6c5d73 golangci-lint-2.4.0-linux-loong64.tar.gz 62a111688e9e305032334a2cbc84f4d971b64bb3bffc99d3f80081d57fb25e32 golangci-lint-2.10.1-linux-amd64.rpm
bb9143d6329be2c4dbfffef9564078e7da7d88e7dde6c829b6263d98e072229e golangci-lint-2.4.0-linux-mips64.tar.gz dfa775874cf0561b404a02a8f4481fc69b28091da95aa697259820d429b09c99 golangci-lint-2.10.1-linux-amd64.tar.gz
5ad1765b40d56cd04d4afd805b3ba6f4bfd9b36181da93c31e9b17e483d8608d golangci-lint-2.4.0-linux-mips64le.tar.gz b3f36937e8ea1660739dc0f5c892ea59c9c21ed4e75a91a25957c561f7f79a55 golangci-lint-2.10.1-linux-arm64.deb
918936fb9c0d5ba96bef03cf4348b03938634cfcced49be1e9bb29cb5094fa73 golangci-lint-2.4.0-linux-ppc64le.tar.gz 36d50314d53683b1f1a2a6cedfb5a9468451b481c64ab9e97a8e843ea088074d golangci-lint-2.10.1-linux-arm64.rpm
f7474c638e1fb67ebbdc654b55ca0125377ea0bc88e8fee8d964a4f24eacf828 golangci-lint-2.4.0-linux-riscv64.tar.gz 6652b42ae02915eb2f9cb2a2e0cac99514c8eded8388d88ae3e06e1a52c00de8 golangci-lint-2.10.1-linux-arm64.tar.gz
b617a9543997c8bfceaffa88a75d4e595030c6add69fba800c1e4d8f5fe253dd golangci-lint-2.4.0-linux-s390x.tar.gz a32d8d318e803496812dd3461f250e52ccc7f53c47b95ce404a9cf55778ceb6a golangci-lint-2.10.1-linux-armv6.deb
7db027b03a9ba328f795215b04f594036837bc7dd0dd7cd16776b02a6167981c golangci-lint-2.4.0-netbsd-386.tar.gz 41d065f4c8ea165a1531abea644988ee2e973e4f0b49f9725ed3b979dac45112 golangci-lint-2.10.1-linux-armv6.rpm
52d8f9393f4313df0a62b752c37775e3af0b818e43e8dd28954351542d7c60bc golangci-lint-2.4.0-netbsd-amd64.tar.gz 59159a4df03aabbde69d15c7b7b3df143363cbb41f4bd4b200caffb8e34fb734 golangci-lint-2.10.1-linux-armv6.tar.gz
5c0086027fb5a4af3829e530c8115db4b35d11afe1914322eef528eb8cd38c69 golangci-lint-2.4.0-netbsd-arm64.tar.gz b2e8ec0e050a1e2251dfe1561434999d202f5a3f9fa47ce94378b0fd1662ea5a golangci-lint-2.10.1-linux-armv7.deb
6b779d6ed1aed87cefe195cc11759902b97a76551b593312c6833f2635a3488f golangci-lint-2.4.0-netbsd-armv6.tar.gz 28c9331429a497da27e9c77846063bd0e8275e878ffedb4eb9e9f21d24771cc0 golangci-lint-2.10.1-linux-armv7.rpm
f00d1f4b7ec3468a0f9fffd0d9ea036248b029b7621cbc9a59c449ef94356d09 golangci-lint-2.4.0-netbsd-armv7.tar.gz 818f33e95b273e3769284b25563b51ef6a294e9e25acf140fda5830c075a1a59 golangci-lint-2.10.1-linux-armv7.tar.gz
3ce671b0b42b58e35066493aab75a7e2826c9e079988f1ba5d814a4029faaf87 golangci-lint-2.4.0-windows-386.zip 6b6b85ed4b7c27f51097dd681523000409dde835e86e6e314e87be4bb013e2ab golangci-lint-2.10.1-linux-loong64.deb
003112f7a56746feaabf20b744054bf9acdf900c9e77176383623c4b1d76aaa9 golangci-lint-2.4.0-windows-amd64.zip 94050a0cf06169e2ae44afb307dcaafa7d7c3b38c0c23b5652cf9cb60f0c337f golangci-lint-2.10.1-linux-loong64.rpm
dc0c2092af5d47fc2cd31a1dfe7b4c7e765fab22de98bd21ef2ffcc53ad9f54f golangci-lint-2.4.0-windows-arm64.zip 25820300fccb8c961c1cdcb1f77928040c079e04c43a3a5ceb34b1cb4a1c5c8d golangci-lint-2.10.1-linux-loong64.tar.gz
0263d23e20a260cb1592d35e12a388f99efe2c51b3611fdc66fbd9db1fce664d golangci-lint-2.4.0-windows-armv6.zip 98bf39d10139fdcaa37f94950e9bbb8888660ae468847ae0bf1cb5bf67c1f68b golangci-lint-2.10.1-linux-mips64.deb
9403c03bf648e6313036e0273149d44bad1b9ad53889b6d00e4ccb842ba3c058 golangci-lint-2.4.0-windows-armv7.zip df3ce5f03808dcceaa8b683d1d06e95c885f09b59dc8e15deb840fbe2b3e3299 golangci-lint-2.10.1-linux-mips64.rpm
972508dda523067e6e6a1c8e6609d63bc7c4153819c11b947d439235cf17bac2 golangci-lint-2.10.1-linux-mips64.tar.gz
1d37f2919e183b5bf8b1777ed8c4b163d3b491d0158355a7999d647655cbbeb6 golangci-lint-2.10.1-linux-mips64le.deb
e341d031002cd09a416329ed40f674231051a38544b8f94deb2d1708ce1f4a6f golangci-lint-2.10.1-linux-mips64le.rpm
393560122b9cb5538df0c357d30eb27b6ee563533fbb9b138c8db4fd264002af golangci-lint-2.10.1-linux-mips64le.tar.gz
21ca46b6a96442e8957677a3ca059c6b93674a68a01b1c71f4e5df0ea2e96d19 golangci-lint-2.10.1-linux-ppc64le.deb
57fe0cbca0a9bbdf1547c5e8aa7d278e6896b438d72a541bae6bc62c38b43d1e golangci-lint-2.10.1-linux-ppc64le.rpm
e2883db9fa51584e5e203c64456f29993550a7faadc84e3faccdb48f0669992e golangci-lint-2.10.1-linux-ppc64le.tar.gz
aa6da0e98ab0ba3bb7582e112174c349907d5edfeff90a551dca3c6eecf92fc0 golangci-lint-2.10.1-linux-riscv64.deb
3c68d76cd884a7aad206223a980b9c20bb9ea74b560fa27ed02baf2389189234 golangci-lint-2.10.1-linux-riscv64.rpm
3bca11bfac4197205639cbd4676a5415054e629ac6c12ea10fcbe33ef852d9c3 golangci-lint-2.10.1-linux-riscv64.tar.gz
0c6aed2ce49db2586adbac72c80d871f06feb1caf4c0763a5ca98fec809a8f0b golangci-lint-2.10.1-linux-s390x.deb
16c285adfe1061d69dd8e503be69f87c7202857c6f4add74ac02e3571158fbec golangci-lint-2.10.1-linux-s390x.rpm
21011ad368eb04f024201b832095c6b5f96d0888de194cca5bfe4d9307d6364b golangci-lint-2.10.1-linux-s390x.tar.gz
7b5191e77a70485918712e31ed55159956323e4911bab1b67569c9d86e1b75eb golangci-lint-2.10.1-netbsd-386.tar.gz
07801fd38d293ebad10826f8285525a39ea91ce5ddad77d05bfa90bda9c884a9 golangci-lint-2.10.1-netbsd-amd64.tar.gz
7e7219d71c1bf33b98c328c93dc0560706dd896a1c43c44696e5222fc9d7446e golangci-lint-2.10.1-netbsd-arm64.tar.gz
92fbc90b9eec0e572269b0f5492a2895c426b086a68372fde49b7e4d4020863e golangci-lint-2.10.1-netbsd-armv6.tar.gz
f67b3ae1f47caeefa507a4ebb0c8336958a19011fe48766443212030f75d004b golangci-lint-2.10.1-netbsd-armv7.tar.gz
a40bc091c10cea84eaee1a90b84b65f5e8652113b0a600bb099e4e4d9d7caddb golangci-lint-2.10.1-windows-386.zip
c60c87695e79db8e320f0e5be885059859de52bb5ee5f11be5577828570bc2a3 golangci-lint-2.10.1-windows-amd64.zip
636ab790c8dcea8034aa34aba6031ca3893d68f7eda000460ab534341fadbab1 golangci-lint-2.10.1-windows-arm64.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify! # This is the builder on PPA that will build Go itself (inception-y), don't modify!
# #

View file

@ -168,8 +168,6 @@ var (
"focal", // 20.04, EOL: 04/2030 "focal", // 20.04, EOL: 04/2030
"jammy", // 22.04, EOL: 04/2032 "jammy", // 22.04, EOL: 04/2032
"noble", // 24.04, EOL: 04/2034 "noble", // 24.04, EOL: 04/2034
"oracular", // 24.10, EOL: 07/2025
"plucky", // 25.04, EOL: 01/2026
} }
// This is where the tests should be unpacked. // This is where the tests should be unpacked.

View file

@ -52,7 +52,7 @@ func (s *Suite) AllTests() []utesting.Test {
{Name: "Ping", Fn: s.TestPing}, {Name: "Ping", Fn: s.TestPing},
{Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID}, {Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID},
{Name: "PingMultiIP", Fn: s.TestPingMultiIP}, {Name: "PingMultiIP", Fn: s.TestPingMultiIP},
{Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted}, {Name: "HandshakeResend", Fn: s.TestHandshakeResend},
{Name: "TalkRequest", Fn: s.TestTalkRequest}, {Name: "TalkRequest", Fn: s.TestTalkRequest},
{Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance}, {Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance},
{Name: "FindnodeResults", Fn: s.TestFindnodeResults}, {Name: "FindnodeResults", Fn: s.TestFindnodeResults},
@ -158,22 +158,20 @@ the attempt from a different IP.`)
} }
} }
// TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message // TestHandshakeResend starts a handshake, but doesn't finish it and sends a second ordinary message
// packet instead of a handshake message packet. The remote node should respond with // packet instead of a handshake message packet. The remote node should repeat the previous WHOAREYOU
// another WHOAREYOU challenge for the second packet. // challenge for the first PING.
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) { func (s *Suite) TestHandshakeResend(t *utesting.T) {
t.Log(`TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
packet instead of a handshake message packet. The remote node should respond with
another WHOAREYOU challenge for the second packet.`)
conn, l1 := s.listen1(t) conn, l1 := s.listen1(t)
defer conn.close() defer conn.close()
// First PING triggers challenge. // First PING triggers challenge.
ping := &v5wire.Ping{ReqID: conn.nextReqID()} ping := &v5wire.Ping{ReqID: conn.nextReqID()}
conn.write(l1, ping, nil) conn.write(l1, ping, nil)
var challenge1 *v5wire.Whoareyou
switch resp := conn.read(l1).(type) { switch resp := conn.read(l1).(type) {
case *v5wire.Whoareyou: case *v5wire.Whoareyou:
challenge1 = resp
t.Logf("got WHOAREYOU for PING") t.Logf("got WHOAREYOU for PING")
default: default:
t.Fatal("expected WHOAREYOU, got", resp) t.Fatal("expected WHOAREYOU, got", resp)
@ -181,9 +179,16 @@ another WHOAREYOU challenge for the second packet.`)
// Send second PING. // Send second PING.
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()} ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
switch resp := conn.reqresp(l1, ping2).(type) { conn.write(l1, ping2, nil)
case *v5wire.Pong: switch resp := conn.read(l1).(type) {
checkPong(t, resp, ping2, l1) case *v5wire.Whoareyou:
if resp.Nonce != challenge1.Nonce {
t.Fatalf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], challenge1.Nonce[:])
}
if !bytes.Equal(resp.ChallengeData, challenge1.ChallengeData) {
t.Fatalf("wrong ChallengeData in resent WHOAREYOU (want %x)", resp.ChallengeData, challenge1.ChallengeData)
}
resp.Node = conn.remote
default: default:
t.Fatal("expected WHOAREYOU, got", resp) t.Fatal("expected WHOAREYOU, got", resp)
} }

View file

@ -56,6 +56,7 @@ type header struct {
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
SlotNumber *uint64 `json:"slotNumber" rlp:"optional"`
} }
type headerMarshaling struct { type headerMarshaling struct {
@ -68,6 +69,7 @@ type headerMarshaling struct {
BaseFee *math.HexOrDecimal256 BaseFee *math.HexOrDecimal256
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
SlotNumber *math.HexOrDecimal64
} }
type bbInput struct { type bbInput struct {
@ -136,6 +138,7 @@ func (i *bbInput) ToBlock() *types.Block {
BlobGasUsed: i.Header.BlobGasUsed, BlobGasUsed: i.Header.BlobGasUsed,
ExcessBlobGas: i.Header.ExcessBlobGas, ExcessBlobGas: i.Header.ExcessBlobGas,
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot, ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
SlotNumber: i.Header.SlotNumber,
} }
// Fill optional values. // Fill optional values.

View file

@ -102,6 +102,7 @@ type stEnv struct {
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *uint64 `json:"slotNumber"`
} }
type stEnvMarshaling struct { type stEnvMarshaling struct {
@ -120,6 +121,7 @@ type stEnvMarshaling struct {
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
ParentExcessBlobGas *math.HexOrDecimal64 ParentExcessBlobGas *math.HexOrDecimal64
ParentBlobGasUsed *math.HexOrDecimal64 ParentBlobGasUsed *math.HexOrDecimal64
SlotNumber *math.HexOrDecimal64
} }
type rejectedTx struct { type rejectedTx struct {
@ -195,6 +197,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
ExcessBlobGas: pre.Env.ParentExcessBlobGas, ExcessBlobGas: pre.Env.ParentExcessBlobGas,
BlobGasUsed: pre.Env.ParentBlobGasUsed, BlobGasUsed: pre.Env.ParentBlobGasUsed,
BaseFee: pre.Env.ParentBaseFee, BaseFee: pre.Env.ParentBaseFee,
SlotNumber: pre.Env.SlotNumber,
} }
header := &types.Header{ header := &types.Header{
Time: pre.Env.Timestamp, Time: pre.Env.Timestamp,
@ -265,6 +268,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas) gaspool.SetGas(prevGas)
continue continue
} }
if receipt.Logs == nil {
receipt.Logs = []*types.Log{}
}
includedTxs = append(includedTxs, tx) includedTxs = append(includedTxs, tx)
if hashError != nil { if hashError != nil {
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError) return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)

View file

@ -56,27 +56,35 @@ func (l *fileWritingTracer) Write(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// newFileWriter creates a set of hooks which wraps inner hooks (typically a logger), // newFileWriter creates a tracer which wraps inner hooks (typically a logger),
// and writes the output to a file, one file per transaction. // and writes the output to a file, one file per transaction.
func newFileWriter(baseDir string, innerFn func(out io.Writer) *tracing.Hooks) *tracing.Hooks { func newFileWriter(baseDir string, innerFn func(out io.Writer) *tracing.Hooks) *tracers.Tracer {
t := &fileWritingTracer{ t := &fileWritingTracer{
baseDir: baseDir, baseDir: baseDir,
suffix: "jsonl", suffix: "jsonl",
} }
t.inner = innerFn(t) // instantiate the inner tracer t.inner = innerFn(t) // instantiate the inner tracer
return t.hooks() return &tracers.Tracer{
Hooks: t.hooks(),
GetResult: func() (json.RawMessage, error) { return json.RawMessage("{}"), nil },
Stop: func(err error) {},
}
} }
// newResultWriter creates a set of hooks wraps and invokes an underlying tracer, // newResultWriter creates a tracer that wraps and invokes an underlying tracer,
// and writes the result (getResult-output) to file, one per transaction. // and writes the result (getResult-output) to file, one per transaction.
func newResultWriter(baseDir string, tracer *tracers.Tracer) *tracing.Hooks { func newResultWriter(baseDir string, tracer *tracers.Tracer) *tracers.Tracer {
t := &fileWritingTracer{ t := &fileWritingTracer{
baseDir: baseDir, baseDir: baseDir,
getResult: tracer.GetResult, getResult: tracer.GetResult,
inner: tracer.Hooks, inner: tracer.Hooks,
suffix: "json", suffix: "json",
} }
return t.hooks() return &tracers.Tracer{
Hooks: t.hooks(),
GetResult: func() (json.RawMessage, error) { return json.RawMessage("{}"), nil },
Stop: func(err error) {},
}
} }
// OnTxStart creates a new output-file specific for this transaction, and invokes // OnTxStart creates a new output-file specific for this transaction, and invokes

View file

@ -162,6 +162,11 @@ var (
strings.Join(vm.ActivateableEips(), ", ")), strings.Join(vm.ActivateableEips(), ", ")),
Value: "GrayGlacier", Value: "GrayGlacier",
} }
OpcodeCountFlag = &cli.StringFlag{
Name: "opcode.count",
Usage: "If set, opcode execution counts will be written to this file (relative to output.basedir).",
Value: "",
}
VerbosityFlag = &cli.IntFlag{ VerbosityFlag = &cli.IntFlag{
Name: "verbosity", Name: "verbosity",
Usage: "sets the verbosity level", Usage: "sets the verbosity level",

View file

@ -38,6 +38,7 @@ func (h header) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
SlotNumber *math.HexOrDecimal64 `json:"slotNumber" rlp:"optional"`
} }
var enc header var enc header
enc.ParentHash = h.ParentHash enc.ParentHash = h.ParentHash
@ -60,6 +61,7 @@ func (h header) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed) enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas) enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
enc.SlotNumber = (*math.HexOrDecimal64)(h.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -86,6 +88,7 @@ func (h *header) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
SlotNumber *math.HexOrDecimal64 `json:"slotNumber" rlp:"optional"`
} }
var dec header var dec header
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -155,5 +158,8 @@ func (h *header) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil { if dec.ParentBeaconBlockRoot != nil {
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
} }
if dec.SlotNumber != nil {
h.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

View file

@ -37,6 +37,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *math.HexOrDecimal64 `json:"slotNumber"`
} }
var enc stEnv var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
@ -59,6 +60,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas) enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas)
enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed) enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed)
enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot
enc.SlotNumber = (*math.HexOrDecimal64)(s.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -85,6 +87,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
SlotNumber *math.HexOrDecimal64 `json:"slotNumber"`
} }
var dec stEnv var dec stEnv
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -154,5 +157,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil { if dec.ParentBeaconBlockRoot != nil {
s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
} }
if dec.SlotNumber != nil {
s.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

View file

@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/eth/tracers/native"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
@ -167,14 +168,15 @@ func Transition(ctx *cli.Context) error {
} }
// Configure tracer // Configure tracer
var tracer *tracers.Tracer
if ctx.IsSet(TraceTracerFlag.Name) { // Custom tracing if ctx.IsSet(TraceTracerFlag.Name) { // Custom tracing
config := json.RawMessage(ctx.String(TraceTracerConfigFlag.Name)) config := json.RawMessage(ctx.String(TraceTracerConfigFlag.Name))
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), innerTracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name),
nil, config, chainConfig) nil, config, chainConfig)
if err != nil { if err != nil {
return NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %v", err)) return NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %v", err))
} }
vmConfig.Tracer = newResultWriter(baseDir, tracer) tracer = newResultWriter(baseDir, innerTracer)
} else if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing } else if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
logConfig := &logger.Config{ logConfig := &logger.Config{
DisableStack: ctx.Bool(TraceDisableStackFlag.Name), DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
@ -182,20 +184,45 @@ func Transition(ctx *cli.Context) error {
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name), EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
} }
if ctx.Bool(TraceEnableCallFramesFlag.Name) { if ctx.Bool(TraceEnableCallFramesFlag.Name) {
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks { tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
return logger.NewJSONLoggerWithCallFrames(logConfig, out) return logger.NewJSONLoggerWithCallFrames(logConfig, out)
}) })
} else { } else {
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks { tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
return logger.NewJSONLogger(logConfig, out) return logger.NewJSONLogger(logConfig, out)
}) })
} }
} }
// Configure opcode counter
var opcodeTracer *tracers.Tracer
if ctx.IsSet(OpcodeCountFlag.Name) && ctx.String(OpcodeCountFlag.Name) != "" {
opcodeTracer = native.NewOpcodeCounter()
if tracer != nil {
// If we have an existing tracer, multiplex with the opcode tracer
mux, _ := native.NewMuxTracer([]string{"trace", "opcode"}, []*tracers.Tracer{tracer, opcodeTracer})
vmConfig.Tracer = mux.Hooks
} else {
vmConfig.Tracer = opcodeTracer.Hooks
}
} else if tracer != nil {
vmConfig.Tracer = tracer.Hooks
}
// Run the test and aggregate the result // Run the test and aggregate the result
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name)) s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name))
if err != nil { if err != nil {
return err return err
} }
// Write opcode counts if enabled
if opcodeTracer != nil {
fname := ctx.String(OpcodeCountFlag.Name)
result, err := opcodeTracer.GetResult()
if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed getting opcode counts: %v", err))
}
if err := saveFile(baseDir, fname, result); err != nil {
return err
}
}
// Dump the execution result // Dump the execution result
var ( var (
collector = make(Alloc) collector = make(Alloc)

View file

@ -161,6 +161,7 @@ var (
t8ntool.ForknameFlag, t8ntool.ForknameFlag,
t8ntool.ChainIDFlag, t8ntool.ChainIDFlag,
t8ntool.RewardFlag, t8ntool.RewardFlag,
t8ntool.OpcodeCountFlag,
}, },
} }

View file

@ -24,7 +24,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x5208", "cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208", "gasUsed": "0x5208",

View file

@ -12,7 +12,7 @@
"status": "0x0", "status": "0x0",
"cumulativeGasUsed": "0x84d0", "cumulativeGasUsed": "0x84d0",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0", "gasUsed": "0x84d0",
@ -27,7 +27,7 @@
"status": "0x0", "status": "0x0",
"cumulativeGasUsed": "0x109a0", "cumulativeGasUsed": "0x109a0",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0", "gasUsed": "0x84d0",

View file

@ -11,7 +11,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x520b", "cumulativeGasUsed": "0x520b",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x520b", "gasUsed": "0x520b",

View file

@ -27,7 +27,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0xa861", "cumulativeGasUsed": "0xa861",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941", "transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0xa861", "gasUsed": "0xa861",
@ -41,7 +41,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x10306", "cumulativeGasUsed": "0x10306",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x16b1d912f1d664f3f60f4e1b5f296f3c82a64a1a253117b4851d18bc03c4f1da", "transactionHash": "0x16b1d912f1d664f3f60f4e1b5f296f3c82a64a1a253117b4851d18bc03c4f1da",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5aa5", "gasUsed": "0x5aa5",

View file

@ -23,7 +23,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x5208", "cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941", "transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208", "gasUsed": "0x5208",

View file

@ -28,7 +28,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0xa865", "cumulativeGasUsed": "0xa865",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262", "transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0xa865", "gasUsed": "0xa865",

View file

@ -26,7 +26,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x5208", "cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed", "transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208", "gasUsed": "0x5208",

View file

@ -24,7 +24,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x521f", "cumulativeGasUsed": "0x521f",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x521f", "gasUsed": "0x521f",

View file

@ -25,7 +25,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x5208", "cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208", "gasUsed": "0x5208",
@ -40,7 +40,7 @@
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0xa410", "cumulativeGasUsed": "0xa410",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "logs": [],
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208", "gasUsed": "0x5208",

View file

@ -44,7 +44,7 @@
"root": "0x", "root": "0x",
"status": "0x1", "status": "0x1",
"cumulativeGasUsed": "0x15fa9", "cumulativeGasUsed": "0x15fa9",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","logs": null,"transactionHash": "0x0417aab7c1d8a3989190c3167c132876ce9b8afd99262c5a0f9d06802de3d7ef", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","logs": [],"transactionHash": "0x0417aab7c1d8a3989190c3167c132876ce9b8afd99262c5a0f9d06802de3d7ef",
"contractAddress": "0x0000000000000000000000000000000000000000", "contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x15fa9", "gasUsed": "0x15fa9",
"effectiveGasPrice": null, "effectiveGasPrice": null,

View file

@ -111,6 +111,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag, utils.MetricsInfluxDBTagsFlag,
utils.MetricsInfluxDBIntervalFlag,
utils.MetricsInfluxDBTokenFlag, utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag, utils.MetricsInfluxDBOrganizationFlag,

View file

@ -377,6 +377,9 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.IsSet(utils.MetricsInfluxDBTagsFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBTagsFlag.Name) {
cfg.Metrics.InfluxDBTags = ctx.String(utils.MetricsInfluxDBTagsFlag.Name) cfg.Metrics.InfluxDBTags = ctx.String(utils.MetricsInfluxDBTagsFlag.Name)
} }
if ctx.IsSet(utils.MetricsInfluxDBIntervalFlag.Name) {
cfg.Metrics.InfluxDBInterval = ctx.Duration(utils.MetricsInfluxDBIntervalFlag.Name)
}
if ctx.IsSet(utils.MetricsEnableInfluxDBV2Flag.Name) { if ctx.IsSet(utils.MetricsEnableInfluxDBV2Flag.Name) {
cfg.Metrics.EnableInfluxDBV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name) cfg.Metrics.EnableInfluxDBV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name)
} }

View file

@ -30,7 +30,7 @@ import (
) )
const ( const (
ipcAPIs = "admin:1.0 debug:1.0 engine:1.0 eth:1.0 miner:1.0 net:1.0 rpc:1.0 txpool:1.0 web3:1.0" ipcAPIs = "admin:1.0 debug:1.0 engine:1.0 eth:1.0 miner:1.0 net:1.0 rpc:1.0 testing:1.0 txpool:1.0 web3:1.0"
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
) )

View file

@ -19,6 +19,7 @@ package main
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"math"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
@ -37,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/tablewriter"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
@ -53,6 +55,23 @@ var (
Name: "remove.chain", Name: "remove.chain",
Usage: "If set, selects the state data for removal", Usage: "If set, selects the state data for removal",
} }
inspectTrieTopFlag = &cli.IntFlag{
Name: "top",
Usage: "Print the top N results per ranking category",
Value: 10,
}
inspectTrieDumpPathFlag = &cli.StringFlag{
Name: "dump-path",
Usage: "Path for the trie statistics dump file",
}
inspectTrieSummarizeFlag = &cli.StringFlag{
Name: "summarize",
Usage: "Summarize an existing trie dump file (skip trie traversal)",
}
inspectTrieContractFlag = &cli.StringFlag{
Name: "contract",
Usage: "Inspect only the storage of the given contract address (skips full account trie walk)",
}
removedbCommand = &cli.Command{ removedbCommand = &cli.Command{
Action: removeDB, Action: removeDB,
@ -74,6 +93,7 @@ Remove blockchain and state databases`,
dbCompactCmd, dbCompactCmd,
dbGetCmd, dbGetCmd,
dbDeleteCmd, dbDeleteCmd,
dbInspectTrieCmd,
dbPutCmd, dbPutCmd,
dbGetSlotsCmd, dbGetSlotsCmd,
dbDumpFreezerIndex, dbDumpFreezerIndex,
@ -92,6 +112,22 @@ Remove blockchain and state databases`,
Usage: "Inspect the storage size for each type of data in the database", Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
} }
dbInspectTrieCmd = &cli.Command{
Action: inspectTrie,
Name: "inspect-trie",
ArgsUsage: "<blocknum>",
Flags: slices.Concat([]cli.Flag{
utils.ExcludeStorageFlag,
inspectTrieTopFlag,
utils.OutputFileFlag,
inspectTrieDumpPathFlag,
inspectTrieSummarizeFlag,
inspectTrieContractFlag,
}, utils.NetworkFlags, utils.DatabaseFlags),
Usage: "Print detailed trie information about the structure of account trie and storage tries.",
Description: `This commands iterates the entrie trie-backed state. If the 'blocknum' is not specified,
the latest block number will be used by default.`,
}
dbCheckStateContentCmd = &cli.Command{ dbCheckStateContentCmd = &cli.Command{
Action: checkStateContent, Action: checkStateContent,
Name: "check-state-content", Name: "check-state-content",
@ -385,6 +421,88 @@ func checkStateContent(ctx *cli.Context) error {
return nil return nil
} }
func inspectTrie(ctx *cli.Context) error {
topN := ctx.Int(inspectTrieTopFlag.Name)
if topN <= 0 {
return fmt.Errorf("invalid --%s value %d (must be > 0)", inspectTrieTopFlag.Name, topN)
}
config := &trie.InspectConfig{
NoStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
TopN: topN,
Path: ctx.String(utils.OutputFileFlag.Name),
}
if summarizePath := ctx.String(inspectTrieSummarizeFlag.Name); summarizePath != "" {
if ctx.NArg() > 0 {
return fmt.Errorf("block number argument is not supported with --%s", inspectTrieSummarizeFlag.Name)
}
config.DumpPath = summarizePath
log.Info("Summarizing trie dump", "path", summarizePath, "top", topN)
return trie.Summarize(summarizePath, config)
}
if ctx.NArg() > 1 {
return fmt.Errorf("excessive number of arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
db := utils.MakeChainDatabase(ctx, stack, false)
defer stack.Close()
defer db.Close()
var (
trieRoot common.Hash
hash common.Hash
number uint64
)
switch {
case ctx.NArg() == 0 || ctx.Args().Get(0) == "latest":
head := rawdb.ReadHeadHeaderHash(db)
n, ok := rawdb.ReadHeaderNumber(db, head)
if !ok {
return fmt.Errorf("could not load head block hash")
}
number = n
case ctx.Args().Get(0) == "snapshot":
trieRoot = rawdb.ReadSnapshotRoot(db)
number = math.MaxUint64
default:
var err error
number, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
if err != nil {
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
}
}
if number != math.MaxUint64 {
hash = rawdb.ReadCanonicalHash(db, number)
if hash == (common.Hash{}) {
return fmt.Errorf("canonical hash for block %d not found", number)
}
blockHeader := rawdb.ReadHeader(db, hash, number)
trieRoot = blockHeader.Root
}
if trieRoot == (common.Hash{}) {
log.Error("Empty root hash")
}
config.DumpPath = ctx.String(inspectTrieDumpPathFlag.Name)
if config.DumpPath == "" {
config.DumpPath = stack.ResolvePath("trie-dump.bin")
}
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
defer triedb.Close()
if contractAddr := ctx.String(inspectTrieContractFlag.Name); contractAddr != "" {
address := common.HexToAddress(contractAddr)
log.Info("Inspecting contract", "address", address, "root", trieRoot, "block", number)
return trie.InspectContract(triedb, db, trieRoot, address)
}
log.Info("Inspecting trie", "root", trieRoot, "block", number, "dump", config.DumpPath, "top", topN)
return trie.Inspect(triedb, trieRoot, config)
}
func showDBStats(db ethdb.KeyValueStater) { func showDBStats(db ethdb.KeyValueStater) {
stats, err := db.Stat() stats, err := db.Stat()
if err != nil { if err != nil {
@ -759,7 +877,7 @@ func showMetaData(ctx *cli.Context) error {
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)}) data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
} }
table := rawdb.NewTableWriter(os.Stdout) table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"}) table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data) table.AppendBulk(data)
table.Render() table.Render()

View file

@ -216,6 +216,7 @@ var (
utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag, utils.MetricsInfluxDBTagsFlag,
utils.MetricsInfluxDBIntervalFlag,
utils.MetricsEnableInfluxDBV2Flag, utils.MetricsEnableInfluxDBV2Flag,
utils.MetricsInfluxDBTokenFlag, utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBBucketFlag,

View file

@ -218,6 +218,16 @@ var (
Usage: "Max number of elements (0 = no limit)", Usage: "Max number of elements (0 = no limit)",
Value: 0, Value: 0,
} }
TopFlag = &cli.IntFlag{
Name: "top",
Usage: "Print the top N results",
Value: 5,
}
OutputFileFlag = &cli.StringFlag{
Name: "output",
Usage: "Writes the result in json to the output",
Value: "",
}
SnapshotFlag = &cli.BoolFlag{ SnapshotFlag = &cli.BoolFlag{
Name: "snapshot", Name: "snapshot",
@ -1016,6 +1026,13 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Category: flags.MetricsCategory, Category: flags.MetricsCategory,
} }
MetricsInfluxDBIntervalFlag = &cli.DurationFlag{
Name: "metrics.influxdb.interval",
Usage: "Interval between metrics reports to InfluxDB (with time unit, e.g. 10s)",
Value: metrics.DefaultConfig.InfluxDBInterval,
Category: flags.MetricsCategory,
}
MetricsEnableInfluxDBV2Flag = &cli.BoolFlag{ MetricsEnableInfluxDBV2Flag = &cli.BoolFlag{
Name: "metrics.influxdbv2", Name: "metrics.influxdbv2",
Usage: "Enable metrics export/push to an external InfluxDB v2 database", Usage: "Enable metrics export/push to an external InfluxDB v2 database",
@ -2246,13 +2263,14 @@ func SetupMetrics(cfg *metrics.Config) {
bucket = cfg.InfluxDBBucket bucket = cfg.InfluxDBBucket
organization = cfg.InfluxDBOrganization organization = cfg.InfluxDBOrganization
tagsMap = SplitTagsFlag(cfg.InfluxDBTags) tagsMap = SplitTagsFlag(cfg.InfluxDBTags)
interval = cfg.InfluxDBInterval
) )
if enableExport { if enableExport {
log.Info("Enabling metrics export to InfluxDB") log.Info("Enabling metrics export to InfluxDB", "interval", interval)
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap) go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, interval, endpoint, database, username, password, "geth.", tagsMap)
} else if enableExportV2 { } else if enableExportV2 {
log.Info("Enabling metrics export to InfluxDB (v2)") log.Info("Enabling metrics export to InfluxDB (v2)", "interval", interval)
go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, token, bucket, organization, "geth.", tagsMap) go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, interval, endpoint, token, bucket, organization, "geth.", tagsMap)
} }
// Expvar exporter. // Expvar exporter.

View file

@ -272,6 +272,14 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return err return err
} }
} }
amsterdam := chain.Config().IsAmsterdam(header.Number, header.Time)
if amsterdam && header.SlotNumber == nil {
return errors.New("header is missing slotNumber")
}
if !amsterdam && header.SlotNumber != nil {
return fmt.Errorf("invalid slotNumber: have %d, expected nil", *header.SlotNumber)
}
return nil return nil
} }

View file

@ -310,6 +310,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed)
case header.ParentBeaconRoot != nil: case header.ParentBeaconRoot != nil:
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot)
case header.SlotNumber != nil:
return fmt.Errorf("invalid slotNumber, have %#x, expected nil", *header.SlotNumber)
} }
// All basic checks passed, verify cascading fields // All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents) return c.verifyCascadingFields(chain, header, parents)
@ -694,6 +696,9 @@ func encodeSigHeader(w io.Writer, header *types.Header) {
if header.ParentBeaconRoot != nil { if header.ParentBeaconRoot != nil {
panic("unexpected parent beacon root value in clique") panic("unexpected parent beacon root value in clique")
} }
if header.SlotNumber != nil {
panic("unexpected slot number value in clique")
}
if err := rlp.Encode(w, enc); err != nil { if err := rlp.Encode(w, enc); err != nil {
panic("can't encode: " + err.Error()) panic("can't encode: " + err.Error())
} }

View file

@ -283,6 +283,8 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed)
case header.ParentBeaconRoot != nil: case header.ParentBeaconRoot != nil:
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot)
case header.SlotNumber != nil:
return fmt.Errorf("invalid slotNumber, have %#x, expected nil", *header.SlotNumber)
} }
// Add some fake checks for tests // Add some fake checks for tests
if ethash.fakeDelay != nil { if ethash.fakeDelay != nil {
@ -559,6 +561,9 @@ func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
if header.ParentBeaconRoot != nil { if header.ParentBeaconRoot != nil {
panic("parent beacon root set on ethash") panic("parent beacon root set on ethash")
} }
if header.SlotNumber != nil {
panic("slot number set on ethash")
}
rlp.Encode(hasher, enc) rlp.Encode(hasher, enc)
hasher.Sum(hash[:0]) hasher.Sum(hash[:0])
return hash return hash

View file

@ -44,6 +44,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
baseFee *big.Int baseFee *big.Int
blobBaseFee *big.Int blobBaseFee *big.Int
random *common.Hash random *common.Hash
slotNum uint64
) )
// If we don't have an explicit author (i.e. not mining), extract from the header // If we don't have an explicit author (i.e. not mining), extract from the header
@ -61,6 +62,10 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.Difficulty.Sign() == 0 { if header.Difficulty.Sign() == 0 {
random = &header.MixDigest random = &header.MixDigest
} }
if header.SlotNumber != nil {
slotNum = *header.SlotNumber
}
return vm.BlockContext{ return vm.BlockContext{
CanTransfer: CanTransfer, CanTransfer: CanTransfer,
Transfer: Transfer, Transfer: Transfer,
@ -73,6 +78,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
BlobBaseFee: blobBaseFee, BlobBaseFee: blobBaseFee,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
Random: random, Random: random,
SlotNum: slotNum,
} }
} }

View file

@ -34,6 +34,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
SlotNumber *uint64 `json:"slotNumber"`
} }
var enc Genesis var enc Genesis
enc.Config = g.Config enc.Config = g.Config
@ -56,6 +57,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas) enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed) enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
enc.SlotNumber = g.SlotNumber
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -77,6 +79,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
SlotNumber *uint64 `json:"slotNumber"`
} }
var dec Genesis var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -133,5 +136,8 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil { if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed) g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
} }
if dec.SlotNumber != nil {
g.SlotNumber = dec.SlotNumber
}
return nil return nil
} }

View file

@ -73,6 +73,7 @@ type Genesis struct {
BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
SlotNumber *uint64 `json:"slotNumber"` // EIP-7843
} }
// copy copies the genesis. // copy copies the genesis.
@ -122,6 +123,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
genesis.BaseFee = genesisHeader.BaseFee genesis.BaseFee = genesisHeader.BaseFee
genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas
genesis.BlobGasUsed = genesisHeader.BlobGasUsed genesis.BlobGasUsed = genesisHeader.BlobGasUsed
genesis.SlotNumber = genesisHeader.SlotNumber
return &genesis, nil return &genesis, nil
} }
@ -547,6 +549,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
if conf.IsPrague(num, g.Timestamp) { if conf.IsPrague(num, g.Timestamp) {
head.RequestsHash = &types.EmptyRequestsHash head.RequestsHash = &types.EmptyRequestsHash
} }
if conf.IsAmsterdam(num, g.Timestamp) {
head.SlotNumber = g.SlotNumber
if head.SlotNumber == nil {
head.SlotNumber = new(uint64)
}
}
} }
return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil)) return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
} }

View file

@ -424,13 +424,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
// HasBody verifies the existence of a block body corresponding to the hash. // HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if isCanon(db, number, hash) { if isCanon(db, number, hash) {
// Block is in ancient store, but bodies can be pruned. return true
// Check if the block number is above the pruning tail.
tail, _ := db.Tail()
if number >= tail {
return true
}
return false
} }
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
return false return false
@ -472,13 +466,7 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// to a block. // to a block.
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
if isCanon(db, number, hash) { if isCanon(db, number, hash) {
// Block is in ancient store, but receipts can be pruned. return true
// Check if the block number is above the pruning tail.
tail, _ := db.Tail()
if number >= tail {
return true
}
return false
} }
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
return false return false

View file

@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/internal/tablewriter"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -663,7 +664,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
total.Add(uint64(ancient.size())) total.Add(uint64(ancient.size()))
} }
table := NewTableWriter(os.Stdout) table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"}) table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", common.StorageSize(total.Load()).String(), fmt.Sprintf("%d", count.Load())}) table.SetFooter([]string{"", "Total", common.StorageSize(total.Load()).String(), fmt.Sprintf("%d", count.Load())})
table.AppendBulk(stats) table.AppendBulk(stats)

View file

@ -195,6 +195,19 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// have been handles via pendingStorage above. // have been handles via pendingStorage above.
// 2) we don't have new values, and can deliver empty response back // 2) we don't have new values, and can deliver empty response back
if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
// Invoke the reader regardless and discard the returned value.
// The returned value may not be empty, as it could belong to a
// self-destructed contract.
//
// The read operation is still essential for correctly building
// the block-level access list.
//
// TODO(rjl493456442) the reader interface can be extended with
// Touch, recording the read access without the actual disk load.
_, err := s.db.reader.Storage(s.address, key)
if err != nil {
s.db.setError(err)
}
s.originStorage[key] = common.Hash{} // track the empty slot as origin value s.originStorage[key] = common.Hash{} // track the empty slot as origin value
return common.Hash{} return common.Hash{}
} }

View file

@ -422,6 +422,9 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
beaconRoot := common.HexToHash("0xbeac00") beaconRoot := common.HexToHash("0xbeac00")
header.ParentBeaconRoot = &beaconRoot header.ParentBeaconRoot = &beaconRoot
} }
if config.IsAmsterdam(header.Number, header.Time) {
header.SlotNumber = new(uint64)
}
// Assemble and return the final block for sealing // Assemble and return the final block for sealing
body := &types.Body{Transactions: txs} body := &types.Body{Transactions: txs}
if config.IsShanghai(header.Number, header.Time) { if config.IsShanghai(header.Number, header.Time) {

View file

@ -177,9 +177,9 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In
msg := &Message{ msg := &Message{
Nonce: tx.Nonce(), Nonce: tx.Nonce(),
GasLimit: tx.Gas(), GasLimit: tx.Gas(),
GasPrice: new(big.Int).Set(tx.GasPrice()), GasPrice: tx.GasPrice(),
GasFeeCap: new(big.Int).Set(tx.GasFeeCap()), GasFeeCap: tx.GasFeeCap(),
GasTipCap: new(big.Int).Set(tx.GasTipCap()), GasTipCap: tx.GasTipCap(),
To: tx.To(), To: tx.To(),
Value: tx.Value(), Value: tx.Value(),
Data: tx.Data(), Data: tx.Data(),

View file

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/trie"
) )
var accountTrieLeavesAtDepth [16]*metrics.Counter var accountTrieLeavesAtDepth [16]*metrics.Counter
@ -41,59 +42,68 @@ func init() {
// WitnessStats aggregates statistics for account and storage trie accesses. // WitnessStats aggregates statistics for account and storage trie accesses.
type WitnessStats struct { type WitnessStats struct {
accountTrieLeaves [16]int64 accountTrie *trie.LevelStats
storageTrieLeaves [16]int64 storageTrie *trie.LevelStats
} }
// NewWitnessStats creates a new WitnessStats collector. // NewWitnessStats creates a new WitnessStats collector.
func NewWitnessStats() *WitnessStats { func NewWitnessStats() *WitnessStats {
return &WitnessStats{} return &WitnessStats{
accountTrie: trie.NewLevelStats(),
storageTrie: trie.NewLevelStats(),
}
}
func (s *WitnessStats) init() {
if s.accountTrie == nil {
s.accountTrie = trie.NewLevelStats()
}
if s.storageTrie == nil {
s.storageTrie = trie.NewLevelStats()
}
} }
// Add records trie access depths from the given node paths. // Add records trie access depths from the given node paths.
// If `owner` is the zero hash, accesses are attributed to the account trie; // If `owner` is the zero hash, accesses are attributed to the account trie;
// otherwise, they are attributed to the storage trie of that account. // otherwise, they are attributed to the storage trie of that account.
func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) { func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) {
// Extract paths from the nodes map s.init()
// Extract paths from the nodes map.
paths := slices.Collect(maps.Keys(nodes)) paths := slices.Collect(maps.Keys(nodes))
sort.Strings(paths) sort.Strings(paths)
ownerStat := s.accountTrie
if owner != (common.Hash{}) {
ownerStat = s.storageTrie
}
for i, path := range paths { for i, path := range paths {
// If current path is a prefix of the next path, it's not a leaf. // If current path is a prefix of the next path, it's not a leaf.
// The last path is always a leaf. // The last path is always a leaf.
if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) { if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) {
depth := len(path) ownerStat.AddLeaf(len(path))
if owner == (common.Hash{}) {
if depth >= len(s.accountTrieLeaves) {
depth = len(s.accountTrieLeaves) - 1
}
s.accountTrieLeaves[depth] += 1
} else {
if depth >= len(s.storageTrieLeaves) {
depth = len(s.storageTrieLeaves) - 1
}
s.storageTrieLeaves[depth] += 1
}
} }
} }
} }
// ReportMetrics reports the collected statistics to the global metrics registry. // ReportMetrics reports the collected statistics to the global metrics registry.
func (s *WitnessStats) ReportMetrics(blockNumber uint64) { func (s *WitnessStats) ReportMetrics(blockNumber uint64) {
// Encode the metrics as JSON for easier consumption s.init()
accountLeavesJson, _ := json.Marshal(s.accountTrieLeaves)
storageLeavesJson, _ := json.Marshal(s.storageTrieLeaves)
// Log account trie depth statistics accountTrieLeaves := s.accountTrie.LeafDepths()
log.Info("Account trie depth stats", storageTrieLeaves := s.storageTrie.LeafDepths()
"block", blockNumber,
"leavesAtDepth", string(accountLeavesJson))
log.Info("Storage trie depth stats",
"block", blockNumber,
"leavesAtDepth", string(storageLeavesJson))
for i := 0; i < 16; i++ { // Encode the metrics as JSON for easier consumption.
accountTrieLeavesAtDepth[i].Inc(s.accountTrieLeaves[i]) accountLeavesJSON, _ := json.Marshal(accountTrieLeaves)
storageTrieLeavesAtDepth[i].Inc(s.storageTrieLeaves[i]) storageLeavesJSON, _ := json.Marshal(storageTrieLeaves)
// Log account trie depth statistics.
log.Info("Account trie depth stats", "block", blockNumber, "leavesAtDepth", string(accountLeavesJSON))
log.Info("Storage trie depth stats", "block", blockNumber, "leavesAtDepth", string(storageLeavesJSON))
for i := 0; i < len(accountTrieLeavesAtDepth); i++ {
accountTrieLeavesAtDepth[i].Inc(accountTrieLeaves[i])
storageTrieLeavesAtDepth[i].Inc(storageTrieLeaves[i])
} }
} }

View file

@ -17,18 +17,27 @@
package stateless package stateless
import ( import (
"strings"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
func expectedLeaves(counts map[int]int64) [16]int64 {
var leaves [16]int64
for depth, count := range counts {
leaves[depth] = count
}
return leaves
}
func TestWitnessStatsAdd(t *testing.T) { func TestWitnessStatsAdd(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
nodes map[string][]byte nodes map[string][]byte
owner common.Hash owner common.Hash
expectedAccountLeaves map[int64]int64 expectedAccountLeaves map[int]int64
expectedStorageLeaves map[int64]int64 expectedStorageLeaves map[int]int64
}{ }{
{ {
name: "empty nodes", name: "empty nodes",
@ -41,7 +50,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"": []byte("data"), "": []byte("data"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{0: 1}, expectedAccountLeaves: map[int]int64{0: 1},
}, },
{ {
name: "single account trie leaf", name: "single account trie leaf",
@ -49,7 +58,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"abc": []byte("data"), "abc": []byte("data"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 1}, expectedAccountLeaves: map[int]int64{3: 1},
}, },
{ {
name: "account trie with internal nodes", name: "account trie with internal nodes",
@ -59,7 +68,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"abc": []byte("data3"), "abc": []byte("data3"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 1}, // Only "abc" is a leaf expectedAccountLeaves: map[int]int64{3: 1}, // Only "abc" is a leaf
}, },
{ {
name: "multiple account trie branches", name: "multiple account trie branches",
@ -72,7 +81,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"bcd": []byte("data6"), "bcd": []byte("data6"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{3: 2}, // "abc" (3) + "bcd" (3) expectedAccountLeaves: map[int]int64{3: 2}, // "abc" (3) + "bcd" (3)
}, },
{ {
name: "siblings are all leaves", name: "siblings are all leaves",
@ -82,7 +91,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"ac": []byte("data3"), "ac": []byte("data3"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{2: 3}, expectedAccountLeaves: map[int]int64{2: 3},
}, },
{ {
name: "storage trie leaves", name: "storage trie leaves",
@ -93,7 +102,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"124": []byte("data4"), "124": []byte("data4"),
}, },
owner: common.HexToHash("0x1234"), owner: common.HexToHash("0x1234"),
expectedStorageLeaves: map[int64]int64{3: 2}, // "123" (3) + "124" (3) expectedStorageLeaves: map[int]int64{3: 2}, // "123" (3) + "124" (3)
}, },
{ {
name: "complex trie structure", name: "complex trie structure",
@ -109,7 +118,7 @@ func TestWitnessStatsAdd(t *testing.T) {
"3": []byte("data9"), "3": []byte("data9"),
}, },
owner: common.Hash{}, owner: common.Hash{},
expectedAccountLeaves: map[int64]int64{1: 1, 3: 4}, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1) expectedAccountLeaves: map[int]int64{1: 1, 3: 4}, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1)
}, },
} }
@ -118,32 +127,59 @@ func TestWitnessStatsAdd(t *testing.T) {
stats := NewWitnessStats() stats := NewWitnessStats()
stats.Add(tt.nodes, tt.owner) stats.Add(tt.nodes, tt.owner)
var expectedAccountTrieLeaves [16]int64 if got, want := stats.accountTrie.LeafDepths(), expectedLeaves(tt.expectedAccountLeaves); got != want {
for depth, count := range tt.expectedAccountLeaves { t.Errorf("account trie leaves = %v, want %v", got, want)
expectedAccountTrieLeaves[depth] = count
} }
var expectedStorageTrieLeaves [16]int64 if got, want := stats.storageTrie.LeafDepths(), expectedLeaves(tt.expectedStorageLeaves); got != want {
for depth, count := range tt.expectedStorageLeaves { t.Errorf("storage trie leaves = %v, want %v", got, want)
expectedStorageTrieLeaves[depth] = count
}
// Check account trie depth
if stats.accountTrieLeaves != expectedAccountTrieLeaves {
t.Errorf("Account trie total depth = %v, want %v", stats.accountTrieLeaves, expectedAccountTrieLeaves)
}
// Check storage trie depth
if stats.storageTrieLeaves != expectedStorageTrieLeaves {
t.Errorf("Storage trie total depth = %v, want %v", stats.storageTrieLeaves, expectedStorageTrieLeaves)
} }
}) })
} }
} }
func TestWitnessStatsStorageTrieAggregation(t *testing.T) {
stats := NewWitnessStats()
ownerA := common.HexToHash("0xa")
ownerB := common.HexToHash("0xb")
stats.Add(map[string][]byte{
"a": []byte("data1"),
"ab": []byte("data2"),
"abc": []byte("data3"),
}, ownerA)
stats.Add(map[string][]byte{
"xy": []byte("data4"),
}, ownerA)
stats.Add(map[string][]byte{
"1": []byte("data5"),
"12": []byte("data6"),
"123": []byte("data7"),
"124": []byte("data8"),
}, ownerB)
if got, want := stats.storageTrie.LeafDepths(), expectedLeaves(map[int]int64{2: 1, 3: 3}); got != want {
t.Errorf("storage leaves = %v, want %v", got, want)
}
if got, want := stats.accountTrie.LeafDepths(), expectedLeaves(nil); got != want {
t.Errorf("account leaves = %v, want %v", got, want)
}
}
func TestWitnessStatsPanicsOnDeepLeaf(t *testing.T) {
stats := NewWitnessStats()
defer func() {
if r := recover(); r == nil {
t.Fatal("expected panic for depth >= 16")
}
}()
stats.Add(map[string][]byte{strings.Repeat("a", 16): []byte("data")}, common.Hash{})
}
func TestWitnessStatsMinMax(t *testing.T) { func TestWitnessStatsMinMax(t *testing.T) {
stats := NewWitnessStats() stats := NewWitnessStats()
// Add some account trie nodes with varying depths // Add some account trie nodes with varying depths.
stats.Add(map[string][]byte{ stats.Add(map[string][]byte{
"a": []byte("data1"), "a": []byte("data1"),
"ab": []byte("data2"), "ab": []byte("data2"),
@ -152,21 +188,21 @@ func TestWitnessStatsMinMax(t *testing.T) {
"abcde": []byte("data5"), "abcde": []byte("data5"),
}, common.Hash{}) }, common.Hash{})
// Only "abcde" is a leaf (depth 5) // Only "abcde" is a leaf (depth 5).
for i, v := range stats.accountTrieLeaves { for i, v := range stats.accountTrie.LeafDepths() {
if v != 0 && i != 5 { if v != 0 && i != 5 {
t.Errorf("leaf found at invalid depth %d", i) t.Errorf("leaf found at invalid depth %d", i)
} }
} }
// Add more leaves with different depths // Add more leaves with different depths.
stats.Add(map[string][]byte{ stats.Add(map[string][]byte{
"x": []byte("data6"), "x": []byte("data6"),
"yz": []byte("data7"), "yz": []byte("data7"),
}, common.Hash{}) }, common.Hash{})
// Now we have leaves at depths 1, 2, and 5 // Now we have leaves at depths 1, 2, and 5.
for i, v := range stats.accountTrieLeaves { for i, v := range stats.accountTrie.LeafDepths() {
if v != 0 && (i != 5 && i != 2 && i != 1) { if v != 0 && (i != 5 && i != 2 && i != 1) {
t.Errorf("leaf found at invalid depth %d", i) t.Errorf("leaf found at invalid depth %d", i)
} }
@ -176,7 +212,7 @@ func TestWitnessStatsMinMax(t *testing.T) {
func TestWitnessStatsAverage(t *testing.T) { func TestWitnessStatsAverage(t *testing.T) {
stats := NewWitnessStats() stats := NewWitnessStats()
// Add nodes that will create leaves at depths 2, 3, and 4 // Add nodes that will create leaves at depths 2, 3, and 4.
stats.Add(map[string][]byte{ stats.Add(map[string][]byte{
"aa": []byte("data1"), "aa": []byte("data1"),
"bb": []byte("data2"), "bb": []byte("data2"),
@ -184,22 +220,22 @@ func TestWitnessStatsAverage(t *testing.T) {
"dddd": []byte("data4"), "dddd": []byte("data4"),
}, common.Hash{}) }, common.Hash{})
// All are leaves: 2 + 2 + 3 + 4 = 11 total, 4 samples // All are leaves: 2 + 2 + 3 + 4 = 11 total, 4 samples.
expectedAvg := int64(11) / int64(4) expectedAvg := int64(11) / int64(4)
var actualAvg, totalSamples int64 var actualAvg, totalSamples int64
for i, c := range stats.accountTrieLeaves { for i, c := range stats.accountTrie.LeafDepths() {
actualAvg += c * int64(i) actualAvg += c * int64(i)
totalSamples += c totalSamples += c
} }
actualAvg = actualAvg / totalSamples actualAvg = actualAvg / totalSamples
if actualAvg != expectedAvg { if actualAvg != expectedAvg {
t.Errorf("Account trie average depth = %d, want %d", actualAvg, expectedAvg) t.Errorf("account trie average depth = %d, want %d", actualAvg, expectedAvg)
} }
} }
func BenchmarkWitnessStatsAdd(b *testing.B) { func BenchmarkWitnessStatsAdd(b *testing.B) {
// Create a realistic trie node structure // Create a realistic trie node structure.
nodes := make(map[string][]byte) nodes := make(map[string][]byte)
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
base := string(rune('a' + i%26)) base := string(rune('a' + i%26))

View file

@ -1476,17 +1476,12 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
// Add inserts a set of blob transactions into the pool if they pass validation (both // Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions). // consensus validity and pool restrictions).
func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error { func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var ( errs := make([]error, len(txs))
errs = make([]error, len(txs))
adds = make([]*types.Transaction, 0, len(txs))
)
for i, tx := range txs { for i, tx := range txs {
if errs[i] = p.ValidateTxBasics(tx); errs[i] != nil { if errs[i] = p.ValidateTxBasics(tx); errs[i] != nil {
continue continue
} }
if errs[i] = p.add(tx); errs[i] == nil { errs[i] = p.add(tx)
adds = append(adds, tx.WithoutBlobTxSidecar())
}
} }
return errs return errs
} }

View file

@ -24,13 +24,6 @@ import (
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
// CodeChange contains the runtime bytecode deployed at an address and the
// transaction index where the deployment took place.
type CodeChange struct {
TxIndex uint16
Code []byte `json:"code,omitempty"`
}
// ConstructionAccountAccess contains post-block account state for mutations as well as // ConstructionAccountAccess contains post-block account state for mutations as well as
// all storage keys that were read during execution. It is used when building block // all storage keys that were read during execution. It is used when building block
// access list during execution. // access list during execution.
@ -55,9 +48,9 @@ type ConstructionAccountAccess struct {
// by tx index. // by tx index.
NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"` NonceChanges map[uint16]uint64 `json:"nonceChanges,omitempty"`
// CodeChange is only set for contract accounts which were deployed in // CodeChange contains the post-state contract code of an account keyed
// the block. // by tx index.
CodeChange *CodeChange `json:"codeChange,omitempty"` CodeChange map[uint16][]byte `json:"codeChange,omitempty"`
} }
// NewConstructionAccountAccess initializes the account access object. // NewConstructionAccountAccess initializes the account access object.
@ -67,6 +60,7 @@ func NewConstructionAccountAccess() *ConstructionAccountAccess {
StorageReads: make(map[common.Hash]struct{}), StorageReads: make(map[common.Hash]struct{}),
BalanceChanges: make(map[uint16]*uint256.Int), BalanceChanges: make(map[uint16]*uint256.Int),
NonceChanges: make(map[uint16]uint64), NonceChanges: make(map[uint16]uint64),
CodeChange: make(map[uint16][]byte),
} }
} }
@ -120,10 +114,8 @@ func (b *ConstructionBlockAccessList) CodeChange(address common.Address, txIndex
if _, ok := b.Accounts[address]; !ok { if _, ok := b.Accounts[address]; !ok {
b.Accounts[address] = NewConstructionAccountAccess() b.Accounts[address] = NewConstructionAccountAccess()
} }
b.Accounts[address].CodeChange = &CodeChange{ // TODO(rjl493456442) is it essential to deep-copy the code?
TxIndex: txIndex, b.Accounts[address].CodeChange[txIndex] = bytes.Clone(code)
Code: bytes.Clone(code),
}
} }
// NonceChange records tx post-state nonce of any contract-like accounts whose // NonceChange records tx post-state nonce of any contract-like accounts whose
@ -170,12 +162,11 @@ func (b *ConstructionBlockAccessList) Copy() *ConstructionBlockAccessList {
aaCopy.BalanceChanges = balances aaCopy.BalanceChanges = balances
aaCopy.NonceChanges = maps.Clone(aa.NonceChanges) aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
if aa.CodeChange != nil { codes := make(map[uint16][]byte, len(aa.CodeChange))
aaCopy.CodeChange = &CodeChange{ for index, code := range aa.CodeChange {
TxIndex: aa.CodeChange.TxIndex, codes[index] = bytes.Clone(code)
Code: bytes.Clone(aa.CodeChange.Code),
}
} }
aaCopy.CodeChange = codes
res.Accounts[addr] = &aaCopy res.Accounts[addr] = &aaCopy
} }
return &res return &res

View file

@ -119,6 +119,13 @@ func (e *encodingSlotWrites) validate() error {
return errors.New("storage write tx indices not in order") return errors.New("storage write tx indices not in order")
} }
// encodingCodeChange contains the runtime bytecode deployed at an address
// and the transaction index where the deployment took place.
type encodingCodeChange struct {
TxIndex uint16 `ssz-size:"2"`
Code []byte `ssz-max:"300000"` // TODO(rjl493456442) shall we put the limit here? The limit will be increased gradually
}
// AccountAccess is the encoding format of ConstructionAccountAccess. // AccountAccess is the encoding format of ConstructionAccountAccess.
type AccountAccess struct { type AccountAccess struct {
Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address Address [20]byte `ssz-size:"20"` // 20-byte Ethereum address
@ -126,7 +133,7 @@ type AccountAccess struct {
StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys StorageReads [][32]byte `ssz-max:"300000"` // Read-only storage keys
BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance]) BalanceChanges []encodingBalanceChange `ssz-max:"300000"` // Balance changes ([tx_index -> post_balance])
NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce]) NonceChanges []encodingAccountNonce `ssz-max:"300000"` // Nonce changes ([tx_index -> new_nonce])
Code []CodeChange `ssz-max:"1"` // Code changes ([tx_index -> new_code]) CodeChanges []encodingCodeChange `ssz-max:"300000"` // Code changes ([tx_index -> new_code])
} }
// validate converts the account accesses out of encoding format. // validate converts the account accesses out of encoding format.
@ -166,9 +173,16 @@ func (e *AccountAccess) validate() error {
return errors.New("nonce changes not in ascending order by tx index") return errors.New("nonce changes not in ascending order by tx index")
} }
// Convert code change // Check the code changes are sorted in order
if len(e.Code) == 1 { if !slices.IsSortedFunc(e.CodeChanges, func(a, b encodingCodeChange) int {
if len(e.Code[0].Code) > params.MaxCodeSize { return cmp.Compare[uint16](a.TxIndex, b.TxIndex)
}) {
return errors.New("code changes not in ascending order by tx index")
}
for _, change := range e.CodeChanges {
// TODO(rjl493456442): This check should be fork-aware, since the limit may
// differ across forks.
if len(change.Code) > params.MaxCodeSize {
return errors.New("code change contained oversized code") return errors.New("code change contained oversized code")
} }
} }
@ -182,6 +196,8 @@ func (e *AccountAccess) Copy() AccountAccess {
StorageReads: slices.Clone(e.StorageReads), StorageReads: slices.Clone(e.StorageReads),
BalanceChanges: slices.Clone(e.BalanceChanges), BalanceChanges: slices.Clone(e.BalanceChanges),
NonceChanges: slices.Clone(e.NonceChanges), NonceChanges: slices.Clone(e.NonceChanges),
StorageWrites: make([]encodingSlotWrites, 0, len(e.StorageWrites)),
CodeChanges: make([]encodingCodeChange, 0, len(e.CodeChanges)),
} }
for _, storageWrite := range e.StorageWrites { for _, storageWrite := range e.StorageWrites {
res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{ res.StorageWrites = append(res.StorageWrites, encodingSlotWrites{
@ -189,13 +205,11 @@ func (e *AccountAccess) Copy() AccountAccess {
Accesses: slices.Clone(storageWrite.Accesses), Accesses: slices.Clone(storageWrite.Accesses),
}) })
} }
if len(e.Code) == 1 { for _, codeChange := range e.CodeChanges {
res.Code = []CodeChange{ res.CodeChanges = append(res.CodeChanges, encodingCodeChange{
{ TxIndex: codeChange.TxIndex,
e.Code[0].TxIndex, Code: bytes.Clone(codeChange.Code),
bytes.Clone(e.Code[0].Code), })
},
}
} }
return res return res
} }
@ -212,11 +226,11 @@ var _ rlp.Encoder = &ConstructionBlockAccessList{}
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess { func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{ res := AccountAccess{
Address: addr, Address: addr,
StorageWrites: make([]encodingSlotWrites, 0), StorageWrites: make([]encodingSlotWrites, 0, len(a.StorageWrites)),
StorageReads: make([][32]byte, 0), StorageReads: make([][32]byte, 0, len(a.StorageReads)),
BalanceChanges: make([]encodingBalanceChange, 0), BalanceChanges: make([]encodingBalanceChange, 0, len(a.BalanceChanges)),
NonceChanges: make([]encodingAccountNonce, 0), NonceChanges: make([]encodingAccountNonce, 0, len(a.NonceChanges)),
Code: nil, CodeChanges: make([]encodingCodeChange, 0, len(a.CodeChange)),
} }
// Convert write slots // Convert write slots
@ -268,13 +282,13 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
} }
// Convert code change // Convert code change
if a.CodeChange != nil { codeIndices := slices.Collect(maps.Keys(a.CodeChange))
res.Code = []CodeChange{ slices.SortFunc(codeIndices, cmp.Compare[uint16])
{ for _, idx := range codeIndices {
a.CodeChange.TxIndex, res.CodeChanges = append(res.CodeChanges, encodingCodeChange{
bytes.Clone(a.CodeChange.Code), TxIndex: idx,
}, Code: a.CodeChange[idx],
} })
} }
return res return res
} }
@ -327,9 +341,9 @@ func (e *BlockAccessList) PrettyPrint() string {
printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce)) printWithIndent(2, fmt.Sprintf("%d: %d", change.TxIdx, change.Nonce))
} }
if len(accountDiff.Code) > 0 { printWithIndent(1, "code changes:")
printWithIndent(1, "code:") for _, change := range accountDiff.CodeChanges {
printWithIndent(2, fmt.Sprintf("%d: %x", accountDiff.Code[0].TxIndex, accountDiff.Code[0].Code)) printWithIndent(2, fmt.Sprintf("%d: %x", change.TxIndex, change.Code))
} }
} }
return res.String() return res.String()

View file

@ -49,7 +49,7 @@ func (obj *BlockAccessList) EncodeRLP(_w io.Writer) error {
} }
w.ListEnd(_tmp15) w.ListEnd(_tmp15)
_tmp18 := w.List() _tmp18 := w.List()
for _, _tmp19 := range _tmp2.Code { for _, _tmp19 := range _tmp2.CodeChanges {
_tmp20 := w.List() _tmp20 := w.List()
w.WriteUint64(uint64(_tmp19.TxIndex)) w.WriteUint64(uint64(_tmp19.TxIndex))
w.WriteBytes(_tmp19.Code) w.WriteBytes(_tmp19.Code)
@ -228,13 +228,13 @@ func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
return err return err
} }
_tmp2.NonceChanges = _tmp17 _tmp2.NonceChanges = _tmp17
// Code: // CodeChanges:
var _tmp21 []CodeChange var _tmp21 []encodingCodeChange
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
} }
for dec.MoreDataInList() { for dec.MoreDataInList() {
var _tmp22 CodeChange var _tmp22 encodingCodeChange
{ {
if _, err := dec.List(); err != nil { if _, err := dec.List(); err != nil {
return err return err
@ -260,7 +260,7 @@ func (obj *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }
_tmp2.Code = _tmp21 _tmp2.CodeChanges = _tmp21
if err := dec.ListEnd(); err != nil { if err := dec.ListEnd(); err != nil {
return err return err
} }

View file

@ -60,9 +60,8 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
1: 2, 1: 2,
2: 6, 2: 6,
}, },
CodeChange: &CodeChange{ CodeChange: map[uint16][]byte{
TxIndex: 0, 0: common.Hex2Bytes("deadbeef"),
Code: common.Hex2Bytes("deadbeef"),
}, },
}, },
common.BytesToAddress([]byte{0xff, 0xff, 0xff}): { common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
@ -85,6 +84,9 @@ func makeTestConstructionBAL() *ConstructionBlockAccessList {
NonceChanges: map[uint16]uint64{ NonceChanges: map[uint16]uint64{
1: 2, 1: 2,
}, },
CodeChange: map[uint16][]byte{
0: common.Hex2Bytes("deadbeef"),
},
}, },
}, },
} }
@ -179,7 +181,7 @@ func makeTestAccountAccess(sort bool) AccountAccess {
StorageReads: storageReads, StorageReads: storageReads,
BalanceChanges: balances, BalanceChanges: balances,
NonceChanges: nonces, NonceChanges: nonces,
Code: []CodeChange{ CodeChanges: []encodingCodeChange{
{ {
TxIndex: 100, TxIndex: 100,
Code: testrand.Bytes(256), Code: testrand.Bytes(256),

View file

@ -98,6 +98,9 @@ type Header struct {
// RequestsHash was added by EIP-7685 and is ignored in legacy headers. // RequestsHash was added by EIP-7685 and is ignored in legacy headers.
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
// SlotNumber was added by EIP-7843 and is ignored in legacy headers.
SlotNumber *uint64 `json:"slotNumber" rlp:"optional"`
} }
// field type overrides for gencodec // field type overrides for gencodec
@ -112,6 +115,7 @@ type headerMarshaling struct {
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
BlobGasUsed *hexutil.Uint64 BlobGasUsed *hexutil.Uint64
ExcessBlobGas *hexutil.Uint64 ExcessBlobGas *hexutil.Uint64
SlotNumber *hexutil.Uint64
} }
// Hash returns the block hash of the header, which is simply the keccak256 hash of its // Hash returns the block hash of the header, which is simply the keccak256 hash of its
@ -316,6 +320,10 @@ func CopyHeader(h *Header) *Header {
cpy.RequestsHash = new(common.Hash) cpy.RequestsHash = new(common.Hash)
*cpy.RequestsHash = *h.RequestsHash *cpy.RequestsHash = *h.RequestsHash
} }
if h.SlotNumber != nil {
cpy.SlotNumber = new(uint64)
*cpy.SlotNumber = *h.SlotNumber
}
return &cpy return &cpy
} }
@ -416,6 +424,15 @@ func (b *Block) BlobGasUsed() *uint64 {
return blobGasUsed return blobGasUsed
} }
func (b *Block) SlotNumber() *uint64 {
var slotNum *uint64
if b.header.SlotNumber != nil {
slotNum = new(uint64)
*slotNum = *b.header.SlotNumber
}
return slotNum
}
// Size returns the true RLP encoded storage size of the block, either by encoding // Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value. // and returning it, or returning a previously cached value.
func (b *Block) Size() uint64 { func (b *Block) Size() uint64 {

View file

@ -37,6 +37,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
} }
var enc Header var enc Header
@ -61,6 +62,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas) enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot enc.ParentBeaconRoot = h.ParentBeaconRoot
enc.RequestsHash = h.RequestsHash enc.RequestsHash = h.RequestsHash
enc.SlotNumber = (*hexutil.Uint64)(h.SlotNumber)
enc.Hash = h.Hash() enc.Hash = h.Hash()
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -89,6 +91,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"` RequestsHash *common.Hash `json:"requestsHash" rlp:"optional"`
SlotNumber *hexutil.Uint64 `json:"slotNumber" rlp:"optional"`
} }
var dec Header var dec Header
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -169,5 +172,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.RequestsHash != nil { if dec.RequestsHash != nil {
h.RequestsHash = dec.RequestsHash h.RequestsHash = dec.RequestsHash
} }
if dec.SlotNumber != nil {
h.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

View file

@ -43,7 +43,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp4 := obj.ExcessBlobGas != nil _tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil _tmp5 := obj.ParentBeaconRoot != nil
_tmp6 := obj.RequestsHash != nil _tmp6 := obj.RequestsHash != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 { _tmp7 := obj.SlotNumber != nil
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BaseFee == nil { if obj.BaseFee == nil {
w.Write(rlp.EmptyString) w.Write(rlp.EmptyString)
} else { } else {
@ -53,41 +54,48 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee) w.WriteBigInt(obj.BaseFee)
} }
} }
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 { if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.WithdrawalsHash == nil { if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.WithdrawalsHash[:]) w.WriteBytes(obj.WithdrawalsHash[:])
} }
} }
if _tmp3 || _tmp4 || _tmp5 || _tmp6 { if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.BlobGasUsed == nil { if obj.BlobGasUsed == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteUint64((*obj.BlobGasUsed)) w.WriteUint64((*obj.BlobGasUsed))
} }
} }
if _tmp4 || _tmp5 || _tmp6 { if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
if obj.ExcessBlobGas == nil { if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteUint64((*obj.ExcessBlobGas)) w.WriteUint64((*obj.ExcessBlobGas))
} }
} }
if _tmp5 || _tmp6 { if _tmp5 || _tmp6 || _tmp7 {
if obj.ParentBeaconRoot == nil { if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.ParentBeaconRoot[:]) w.WriteBytes(obj.ParentBeaconRoot[:])
} }
} }
if _tmp6 { if _tmp6 || _tmp7 {
if obj.RequestsHash == nil { if obj.RequestsHash == nil {
w.Write([]byte{0x80}) w.Write([]byte{0x80})
} else { } else {
w.WriteBytes(obj.RequestsHash[:]) w.WriteBytes(obj.RequestsHash[:])
} }
} }
if _tmp7 {
if obj.SlotNumber == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.SlotNumber))
}
}
w.ListEnd(_tmp0) w.ListEnd(_tmp0)
return w.Flush() return w.Flush()
} }

View file

@ -262,7 +262,7 @@ func ActivePrecompiles(rules params.Rules) []common.Address {
// - the returned bytes, // - the returned bytes,
// - the _remaining_ gas, // - the _remaining_ gas,
// - any error that occurred // - any error that occurred
func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, logger *tracing.Hooks) (ret []byte, remainingGas uint64, err error) { func RunPrecompiledContract(stateDB StateDB, p PrecompiledContract, address common.Address, input []byte, suppliedGas uint64, logger *tracing.Hooks) (ret []byte, remainingGas uint64, err error) {
gasCost := p.RequiredGas(input) gasCost := p.RequiredGas(input)
if suppliedGas < gasCost { if suppliedGas < gasCost {
return nil, 0, ErrOutOfGas return nil, 0, ErrOutOfGas
@ -271,6 +271,12 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uin
logger.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract) logger.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract)
} }
suppliedGas -= gasCost suppliedGas -= gasCost
// Touch the precompile for block-level accessList recording once Amsterdam
// fork is activated.
if stateDB != nil {
stateDB.Exist(address)
}
output, err := p.Run(input) output, err := p.Run(input)
return output, suppliedGas, err return output, suppliedGas, err
} }

View file

@ -36,7 +36,7 @@ func FuzzPrecompiledContracts(f *testing.F) {
return return
} }
inWant := string(input) inWant := string(input)
RunPrecompiledContract(p, input, gas, nil) RunPrecompiledContract(nil, p, a, input, gas, nil)
if inHave := string(input); inWant != inHave { if inHave := string(input); inWant != inHave {
t.Errorf("Precompiled %v modified input data", a) t.Errorf("Precompiled %v modified input data", a)
} }

View file

@ -99,7 +99,7 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
in := common.Hex2Bytes(test.Input) in := common.Hex2Bytes(test.Input)
gas := p.RequiredGas(in) gas := p.RequiredGas(in)
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
if res, _, err := RunPrecompiledContract(p, in, gas, nil); err != nil { if res, _, err := RunPrecompiledContract(nil, p, common.HexToAddress(addr), in, gas, nil); err != nil {
t.Error(err) t.Error(err)
} else if common.Bytes2Hex(res) != test.Expected { } else if common.Bytes2Hex(res) != test.Expected {
t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res))
@ -121,7 +121,7 @@ func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) {
gas := test.Gas - 1 gas := test.Gas - 1
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
_, _, err := RunPrecompiledContract(p, in, gas, nil) _, _, err := RunPrecompiledContract(nil, p, common.HexToAddress(addr), in, gas, nil)
if err.Error() != "out of gas" { if err.Error() != "out of gas" {
t.Errorf("Expected error [out of gas], got [%v]", err) t.Errorf("Expected error [out of gas], got [%v]", err)
} }
@ -138,7 +138,7 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing
in := common.Hex2Bytes(test.Input) in := common.Hex2Bytes(test.Input)
gas := p.RequiredGas(in) gas := p.RequiredGas(in)
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
_, _, err := RunPrecompiledContract(p, in, gas, nil) _, _, err := RunPrecompiledContract(nil, p, common.HexToAddress(addr), in, gas, nil)
if err.Error() != test.ExpectedError { if err.Error() != test.ExpectedError {
t.Errorf("Expected error [%v], got [%v]", test.ExpectedError, err) t.Errorf("Expected error [%v], got [%v]", test.ExpectedError, err)
} }
@ -169,7 +169,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
start := time.Now() start := time.Now()
for bench.Loop() { for bench.Loop() {
copy(data, in) copy(data, in)
res, _, err = RunPrecompiledContract(p, data, reqGas, nil) res, _, err = RunPrecompiledContract(nil, p, common.HexToAddress(addr), data, reqGas, nil)
} }
elapsed := uint64(time.Since(start)) elapsed := uint64(time.Since(start))
if elapsed < 1 { if elapsed < 1 {

View file

@ -43,6 +43,7 @@ var activators = map[int]func(*JumpTable){
7702: enable7702, 7702: enable7702,
7939: enable7939, 7939: enable7939,
8024: enable8024, 8024: enable8024,
7843: enable7843,
} }
// EnableEIP enables the given EIP on the config. // EnableEIP enables the given EIP on the config.
@ -579,3 +580,19 @@ func enable7702(jt *JumpTable) {
jt[STATICCALL].dynamicGas = gasStaticCallEIP7702 jt[STATICCALL].dynamicGas = gasStaticCallEIP7702
jt[DELEGATECALL].dynamicGas = gasDelegateCallEIP7702 jt[DELEGATECALL].dynamicGas = gasDelegateCallEIP7702
} }
// opSlotNum enables the SLOTNUM opcode
func opSlotNum(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(uint256.NewInt(evm.Context.SlotNum))
return nil, nil
}
// enable7843 enables the SLOTNUM opcode as specified in EIP-7843.
func enable7843(jt *JumpTable) {
jt[SLOTNUM] = &operation{
execute: opSlotNum,
constantGas: GasQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
}
}

View file

@ -66,6 +66,7 @@ type BlockContext struct {
BaseFee *big.Int // Provides information for BASEFEE (0 if vm runs with NoBaseFee flag and 0 gas price) BaseFee *big.Int // Provides information for BASEFEE (0 if vm runs with NoBaseFee flag and 0 gas price)
BlobBaseFee *big.Int // Provides information for BLOBBASEFEE (0 if vm runs with NoBaseFee flag and 0 blob gas price) BlobBaseFee *big.Int // Provides information for BLOBBASEFEE (0 if vm runs with NoBaseFee flag and 0 blob gas price)
Random *common.Hash // Provides information for PREVRANDAO Random *common.Hash // Provides information for PREVRANDAO
SlotNum uint64 // Provides information for SLOTNUM
} }
// TxContext provides the EVM with information about a transaction. // TxContext provides the EVM with information about a transaction.
@ -144,6 +145,8 @@ func NewEVM(blockCtx BlockContext, statedb StateDB, chainConfig *params.ChainCon
evm.precompiles = activePrecompiledContracts(evm.chainRules) evm.precompiles = activePrecompiledContracts(evm.chainRules)
switch { switch {
case evm.chainRules.IsAmsterdam:
evm.table = &amsterdamInstructionSet
case evm.chainRules.IsOsaka: case evm.chainRules.IsOsaka:
evm.table = &osakaInstructionSet evm.table = &osakaInstructionSet
case evm.chainRules.IsVerkle: case evm.chainRules.IsVerkle:
@ -245,13 +248,14 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
if evm.depth > int(params.CallCreateDepth) { if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth return nil, gas, ErrDepth
} }
// Fail if we're trying to transfer more than the available balance syscall := isSystemCall(caller)
if !value.IsZero() && !evm.Context.CanTransfer(evm.StateDB, caller, value) {
// Fail if we're trying to transfer more than the available balance.
if !syscall && !value.IsZero() && !evm.Context.CanTransfer(evm.StateDB, caller, value) {
return nil, gas, ErrInsufficientBalance return nil, gas, ErrInsufficientBalance
} }
snapshot := evm.StateDB.Snapshot() snapshot := evm.StateDB.Snapshot()
p, isPrecompile := evm.precompile(addr) p, isPrecompile := evm.precompile(addr)
if !evm.StateDB.Exist(addr) { if !evm.StateDB.Exist(addr) {
if !isPrecompile && evm.chainRules.IsEIP4762 && !isSystemCall(caller) { if !isPrecompile && evm.chainRules.IsEIP4762 && !isSystemCall(caller) {
// Add proof of absence to witness // Add proof of absence to witness
@ -275,10 +279,18 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
} }
evm.StateDB.CreateAccount(addr) evm.StateDB.CreateAccount(addr)
} }
evm.Context.Transfer(evm.StateDB, caller, addr, value) // Perform the value transfer only in non-syscall mode.
// Calling this is required even for zero-value transfers,
// to ensure the state clearing mechanism is applied.
if !syscall {
evm.Context.Transfer(evm.StateDB, caller, addr, value)
}
if isPrecompile { if isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) var stateDB StateDB
if evm.chainRules.IsAmsterdam {
stateDB = evm.StateDB
}
ret, gas, err = RunPrecompiledContract(stateDB, p, addr, input, gas, evm.Config.Tracer)
} else { } else {
// Initialise a new contract and set the code that is to be used by the EVM. // Initialise a new contract and set the code that is to be used by the EVM.
code := evm.resolveCode(addr) code := evm.resolveCode(addr)
@ -302,7 +314,6 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil { if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil {
evm.Config.Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution) evm.Config.Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution)
} }
gas = 0 gas = 0
} }
// TODO: consider clearing up unused snapshots: // TODO: consider clearing up unused snapshots:
@ -342,7 +353,11 @@ func (evm *EVM) CallCode(caller common.Address, addr common.Address, input []byt
// It is allowed to call precompiles, even via delegatecall // It is allowed to call precompiles, even via delegatecall
if p, isPrecompile := evm.precompile(addr); isPrecompile { if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) var stateDB StateDB
if evm.chainRules.IsAmsterdam {
stateDB = evm.StateDB
}
ret, gas, err = RunPrecompiledContract(stateDB, p, addr, input, gas, evm.Config.Tracer)
} else { } else {
// Initialise a new contract and set the code that is to be used by the EVM. // Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only. // The contract is a scoped environment for this execution context only.
@ -385,7 +400,11 @@ func (evm *EVM) DelegateCall(originCaller common.Address, caller common.Address,
// It is allowed to call precompiles, even via delegatecall // It is allowed to call precompiles, even via delegatecall
if p, isPrecompile := evm.precompile(addr); isPrecompile { if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) var stateDB StateDB
if evm.chainRules.IsAmsterdam {
stateDB = evm.StateDB
}
ret, gas, err = RunPrecompiledContract(stateDB, p, addr, input, gas, evm.Config.Tracer)
} else { } else {
// Initialise a new contract and make initialise the delegate values // Initialise a new contract and make initialise the delegate values
// //
@ -437,7 +456,11 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
evm.StateDB.AddBalance(addr, new(uint256.Int), tracing.BalanceChangeTouchAccount) evm.StateDB.AddBalance(addr, new(uint256.Int), tracing.BalanceChangeTouchAccount)
if p, isPrecompile := evm.precompile(addr); isPrecompile { if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) var stateDB StateDB
if evm.chainRules.IsAmsterdam {
stateDB = evm.StateDB
}
ret, gas, err = RunPrecompiledContract(stateDB, p, addr, input, gas, evm.Config.Tracer)
} else { } else {
// Initialise a new contract and set the code that is to be used by the EVM. // Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only. // The contract is a scoped environment for this execution context only.

View file

@ -63,6 +63,7 @@ var (
verkleInstructionSet = newVerkleInstructionSet() verkleInstructionSet = newVerkleInstructionSet()
pragueInstructionSet = newPragueInstructionSet() pragueInstructionSet = newPragueInstructionSet()
osakaInstructionSet = newOsakaInstructionSet() osakaInstructionSet = newOsakaInstructionSet()
amsterdamInstructionSet = newAmsterdamInstructionSet()
) )
// JumpTable contains the EVM opcodes supported at a given fork. // JumpTable contains the EVM opcodes supported at a given fork.
@ -92,6 +93,12 @@ func newVerkleInstructionSet() JumpTable {
return validate(instructionSet) return validate(instructionSet)
} }
func newAmsterdamInstructionSet() JumpTable {
instructionSet := newOsakaInstructionSet()
enable7843(&instructionSet) // EIP-7843 (SLOTNUM opcode)
return validate(instructionSet)
}
func newOsakaInstructionSet() JumpTable { func newOsakaInstructionSet() JumpTable {
instructionSet := newPragueInstructionSet() instructionSet := newPragueInstructionSet()
enable7939(&instructionSet) // EIP-7939 (CLZ opcode) enable7939(&instructionSet) // EIP-7939 (CLZ opcode)

View file

@ -105,6 +105,7 @@ const (
BASEFEE OpCode = 0x48 BASEFEE OpCode = 0x48
BLOBHASH OpCode = 0x49 BLOBHASH OpCode = 0x49
BLOBBASEFEE OpCode = 0x4a BLOBBASEFEE OpCode = 0x4a
SLOTNUM OpCode = 0x4b
) )
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
@ -320,6 +321,7 @@ var opCodeToString = [256]string{
BASEFEE: "BASEFEE", BASEFEE: "BASEFEE",
BLOBHASH: "BLOBHASH", BLOBHASH: "BLOBHASH",
BLOBBASEFEE: "BLOBBASEFEE", BLOBBASEFEE: "BLOBBASEFEE",
SLOTNUM: "SLOTNUM",
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
POP: "POP", POP: "POP",
@ -502,6 +504,7 @@ var stringToOp = map[string]OpCode{
"BASEFEE": BASEFEE, "BASEFEE": BASEFEE,
"BLOBHASH": BLOBHASH, "BLOBHASH": BLOBHASH,
"BLOBBASEFEE": BLOBBASEFEE, "BLOBBASEFEE": BLOBBASEFEE,
"SLOTNUM": SLOTNUM,
"DELEGATECALL": DELEGATECALL, "DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL, "STATICCALL": STATICCALL,
"CODESIZE": CODESIZE, "CODESIZE": CODESIZE,

View file

@ -47,9 +47,10 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
// Register adds the engine API to the full node. // Register adds the engine API and related APIs to the full node.
func Register(stack *node.Node, backend *eth.Ethereum) error { func Register(stack *node.Node, backend *eth.Ethereum) error {
stack.RegisterAPIs([]rpc.API{ stack.RegisterAPIs([]rpc.API{
newTestingAPI(backend),
{ {
Namespace: "engine", Namespace: "engine",
Service: NewConsensusAPI(backend), Service: NewConsensusAPI(backend),
@ -211,6 +212,28 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
return api.forkchoiceUpdated(update, params, engine.PayloadV3, false) return api.forkchoiceUpdated(update, params, engine.PayloadV3, false)
} }
// ForkchoiceUpdatedV4 is equivalent to V3 with the addition of slot number
// in the payload attributes. It supports only PayloadAttributesV4.
func (api *ConsensusAPI) ForkchoiceUpdatedV4(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if params != nil {
switch {
case params.Withdrawals == nil:
return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root")
case params.SlotNumber == nil:
return engine.STATUS_INVALID, attributesErr("missing slot number")
case !api.checkFork(params.Timestamp, forks.Amsterdam):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV4 must only be called for amsterdam payloads")
}
}
// TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up
// forkchoiceUpdate into a function that only updates the head and then a
// function that kicks off block construction.
return api.forkchoiceUpdated(update, params, engine.PayloadV4, false)
}
func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion, payloadWitness bool) (engine.ForkChoiceResponse, error) { func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion, payloadWitness bool) (engine.ForkChoiceResponse, error) {
api.forkchoiceLock.Lock() api.forkchoiceLock.Lock()
defer api.forkchoiceLock.Unlock() defer api.forkchoiceLock.Unlock()
@ -344,6 +367,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
Random: payloadAttributes.Random, Random: payloadAttributes.Random,
Withdrawals: payloadAttributes.Withdrawals, Withdrawals: payloadAttributes.Withdrawals,
BeaconRoot: payloadAttributes.BeaconRoot, BeaconRoot: payloadAttributes.BeaconRoot,
SlotNum: payloadAttributes.SlotNumber,
Version: payloadVersion, Version: payloadVersion,
} }
id := args.Id() id := args.Id()
@ -457,6 +481,18 @@ func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.Execu
}) })
} }
// GetPayloadV6 returns a cached payload by id. This endpoint should only
// be used after the Amsterdam fork.
func (api *ConsensusAPI) GetPayloadV6(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) {
return api.getPayload(
payloadID,
false,
[]engine.PayloadVersion{engine.PayloadV4},
[]forks.Fork{
forks.Amsterdam,
})
}
// getPayload will retrieve the specified payload and verify it conforms to the // getPayload will retrieve the specified payload and verify it conforms to the
// endpoint's allowed payload versions and forks. // endpoint's allowed payload versions and forks.
// //
@ -699,6 +735,33 @@ func (api *ConsensusAPI) NewPayloadV4(ctx context.Context, params engine.Executa
return api.newPayload(ctx, params, versionedHashes, beaconRoot, requests, false) return api.newPayload(ctx, params, versionedHashes, beaconRoot, requests, false)
} }
// NewPayloadV5 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) NewPayloadV5(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) {
switch {
case params.Withdrawals == nil:
return invalidStatus, paramsErr("nil withdrawals post-shanghai")
case params.ExcessBlobGas == nil:
return invalidStatus, paramsErr("nil excessBlobGas post-cancun")
case params.BlobGasUsed == nil:
return invalidStatus, paramsErr("nil blobGasUsed post-cancun")
case versionedHashes == nil:
return invalidStatus, paramsErr("nil versionedHashes post-cancun")
case beaconRoot == nil:
return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague")
case params.SlotNumber == nil:
return invalidStatus, paramsErr("nil slotnumber post-amsterdam")
case !api.checkFork(params.Timestamp, forks.Amsterdam):
return invalidStatus, unsupportedForkErr("newPayloadV5 must only be called for amsterdam payloads")
}
requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(ctx, params, versionedHashes, beaconRoot, requests, false)
}
func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (result engine.PayloadStatusV1, err error) { func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte, witness bool) (result engine.PayloadStatusV1, err error) {
// The locking here is, strictly, not required. Without these locks, this can happen: // The locking here is, strictly, not required. Without these locks, this can happen:
// //
@ -734,6 +797,10 @@ func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.Executabl
if params.ExcessBlobGas != nil { if params.ExcessBlobGas != nil {
ebg = strconv.Itoa(int(*params.ExcessBlobGas)) ebg = strconv.Itoa(int(*params.ExcessBlobGas))
} }
slotNum := "nil"
if params.SlotNumber != nil {
slotNum = strconv.Itoa(int(*params.SlotNumber))
}
log.Warn("Invalid NewPayload params", log.Warn("Invalid NewPayload params",
"params.Number", params.Number, "params.Number", params.Number,
"params.ParentHash", params.ParentHash, "params.ParentHash", params.ParentHash,
@ -749,6 +816,7 @@ func (api *ConsensusAPI) newPayload(ctx context.Context, params engine.Executabl
"params.BaseFeePerGas", params.BaseFeePerGas, "params.BaseFeePerGas", params.BaseFeePerGas,
"params.BlobGasUsed", bgu, "params.BlobGasUsed", bgu,
"params.ExcessBlobGas", ebg, "params.ExcessBlobGas", ebg,
"params.SlotNumber", slotNum,
"len(params.Transactions)", len(params.Transactions), "len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals), "len(params.Withdrawals)", len(params.Withdrawals),
"beaconRoot", beaconRoot, "beaconRoot", beaconRoot,

View file

@ -0,0 +1,79 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package catalyst
import (
"errors"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/rpc"
)
// testingAPI implements the testing_ namespace.
// It's an engine-API adjacent namespace for testing purposes.
type testingAPI struct {
eth *eth.Ethereum
}
func newTestingAPI(backend *eth.Ethereum) rpc.API {
return rpc.API{
Namespace: "testing",
Service: &testingAPI{backend},
Version: "1.0",
Authenticated: false,
}
}
func (api *testingAPI) BuildBlockV1(parentHash common.Hash, payloadAttributes engine.PayloadAttributes, transactions *[]hexutil.Bytes, extraData *hexutil.Bytes) (*engine.ExecutionPayloadEnvelope, error) {
if api.eth.BlockChain().CurrentBlock().Hash() != parentHash {
return nil, errors.New("parentHash is not current head")
}
// If transactions is empty but not nil, build an empty block
// If the transactions is nil, build a block with the current transactions from the txpool
// If the transactions is not nil and not empty, build a block with the transactions
buildEmpty := transactions != nil && len(*transactions) == 0
var txs []*types.Transaction
if transactions != nil {
dec := make([][]byte, 0, len(*transactions))
for _, tx := range *transactions {
dec = append(dec, tx)
}
var err error
txs, err = engine.DecodeTransactions(dec)
if err != nil {
return nil, err
}
}
extra := make([]byte, 0)
if extraData != nil {
extra = *extraData
}
args := &miner.BuildPayloadArgs{
Parent: parentHash,
Timestamp: payloadAttributes.Timestamp,
FeeRecipient: payloadAttributes.SuggestedFeeRecipient,
Random: payloadAttributes.Random,
Withdrawals: payloadAttributes.Withdrawals,
BeaconRoot: payloadAttributes.BeaconRoot,
}
return api.eth.Miner().BuildTestingPayload(args, txs, buildEmpty, extra)
}

View file

@ -0,0 +1,121 @@
// Copyright 2026 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package catalyst
import (
"context"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
func TestBuildBlockV1(t *testing.T) {
genesis, blocks := generateMergeChain(5, true)
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
parent := ethservice.BlockChain().CurrentBlock()
attrs := engine.PayloadAttributes{
Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte("test")),
SuggestedFeeRecipient: parent.Coinbase,
Withdrawals: nil,
BeaconRoot: nil,
}
currentNonce, _ := ethservice.APIBackend.GetPoolNonce(context.Background(), testAddr)
tx, _ := types.SignTx(types.NewTransaction(currentNonce, testAddr, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
api := &testingAPI{eth: ethservice}
t.Run("buildOnCurrentHead", func(t *testing.T) {
envelope, err := api.BuildBlockV1(parent.Hash(), attrs, nil, nil)
if err != nil {
t.Fatalf("BuildBlockV1 failed: %v", err)
}
if envelope == nil || envelope.ExecutionPayload == nil {
t.Fatal("expected non-nil envelope and payload")
}
payload := envelope.ExecutionPayload
if payload.ParentHash != parent.Hash() {
t.Errorf("parent hash mismatch: got %x want %x", payload.ParentHash, parent.Hash())
}
if payload.Number != parent.Number.Uint64()+1 {
t.Errorf("block number mismatch: got %d want %d", payload.Number, parent.Number.Uint64()+1)
}
if payload.Timestamp != attrs.Timestamp {
t.Errorf("timestamp mismatch: got %d want %d", payload.Timestamp, attrs.Timestamp)
}
if payload.FeeRecipient != attrs.SuggestedFeeRecipient {
t.Errorf("fee recipient mismatch: got %x want %x", payload.FeeRecipient, attrs.SuggestedFeeRecipient)
}
})
t.Run("wrongParentHash", func(t *testing.T) {
wrongParent := common.Hash{0x01}
_, err := api.BuildBlockV1(wrongParent, attrs, nil, nil)
if err == nil {
t.Fatal("expected error when parentHash is not current head")
}
if err.Error() != "parentHash is not current head" {
t.Errorf("unexpected error: %v", err)
}
})
t.Run("buildEmptyBlock", func(t *testing.T) {
emptyTxs := []hexutil.Bytes{}
envelope, err := api.BuildBlockV1(parent.Hash(), attrs, &emptyTxs, nil)
if err != nil {
t.Fatalf("BuildBlockV1 with empty txs failed: %v", err)
}
if envelope == nil || envelope.ExecutionPayload == nil {
t.Fatal("expected non-nil envelope and payload")
}
if len(envelope.ExecutionPayload.Transactions) != 0 {
t.Errorf("expected empty block, got %d transactions", len(envelope.ExecutionPayload.Transactions))
}
})
t.Run("buildBlockWithTransactions", func(t *testing.T) {
enc, _ := tx.MarshalBinary()
txs := []hexutil.Bytes{enc}
envelope, err := api.BuildBlockV1(parent.Hash(), attrs, &txs, nil)
if err != nil {
t.Fatalf("BuildBlockV1 with transaction failed: %v", err)
}
if len(envelope.ExecutionPayload.Transactions) != 1 {
t.Errorf("expected 1 transaction, got %d", len(envelope.ExecutionPayload.Transactions))
}
})
t.Run("buildBlockWithTransactionsFromTxPool", func(t *testing.T) {
ethservice.TxPool().Add([]*types.Transaction{tx}, true)
envelope, err := api.BuildBlockV1(parent.Hash(), attrs, nil, nil)
if err != nil {
t.Fatalf("BuildBlockV1 with transaction failed: %v", err)
}
if len(envelope.ExecutionPayload.Transactions) != 1 {
t.Errorf("expected 1 transaction, got %d", len(envelope.ExecutionPayload.Transactions))
}
})
}

View file

@ -376,5 +376,6 @@ func RegisterSimulatedBeaconAPIs(stack *node.Node, sim *SimulatedBeacon) {
Service: api, Service: api,
Version: "1.0", Version: "1.0",
}, },
newTestingAPI(sim.eth),
}) })
} }

View file

@ -24,6 +24,7 @@ var (
txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil) txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil) txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil) txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
txAnnounceOnchainMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/onchain", nil)
txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil) txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil) txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)

View file

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -71,6 +72,11 @@ const (
// addTxsBatchSize it the max number of transactions to add in a single batch from a peer. // addTxsBatchSize it the max number of transactions to add in a single batch from a peer.
addTxsBatchSize = 128 addTxsBatchSize = 128
// txOnChainCacheLimit is number of on-chain transactions to keep in a cache to avoid
// re-fetching them soon after they are mined.
// Approx 1MB for 30 minutes of transactions at 18 tps
txOnChainCacheLimit = 32768
) )
var ( var (
@ -152,6 +158,9 @@ type TxFetcher struct {
txSeq uint64 // Unique transaction sequence number txSeq uint64 // Unique transaction sequence number
underpriced *lru.Cache[common.Hash, time.Time] // Transactions discarded as too cheap (don't re-fetch) underpriced *lru.Cache[common.Hash, time.Time] // Transactions discarded as too cheap (don't re-fetch)
chain *core.BlockChain // Blockchain interface for on-chain checks
txOnChainCache *lru.Cache[common.Hash, struct{}] // Cache to avoid fetching once the tx gets on chain
// Stage 1: Waiting lists for newly discovered transactions that might be // Stage 1: Waiting lists for newly discovered transactions that might be
// broadcast without needing explicit request/reply round trips. // broadcast without needing explicit request/reply round trips.
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
@ -184,36 +193,40 @@ type TxFetcher struct {
// NewTxFetcher creates a transaction fetcher to retrieve transaction // NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements. // based on hash announcements.
func NewTxFetcher(validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher { // Chain can be nil to disable on-chain checks.
return NewTxFetcherForTests(validateMeta, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil) func NewTxFetcher(chain *core.BlockChain, validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
return NewTxFetcherForTests(chain, validateMeta, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil)
} }
// NewTxFetcherForTests is a testing method to mock out the realtime clock with // NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one. // a simulated version and the internal randomness with a deterministic one.
// Chain can be nil to disable on-chain checks.
func NewTxFetcherForTests( func NewTxFetcherForTests(
validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string), chain *core.BlockChain, validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
clock mclock.Clock, realTime func() time.Time, rand *mrand.Rand) *TxFetcher { clock mclock.Clock, realTime func() time.Time, rand *mrand.Rand) *TxFetcher {
return &TxFetcher{ return &TxFetcher{
notify: make(chan *txAnnounce), notify: make(chan *txAnnounce),
cleanup: make(chan *txDelivery), cleanup: make(chan *txDelivery),
drop: make(chan *txDrop), drop: make(chan *txDrop),
quit: make(chan struct{}), quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}), waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime), waittime: make(map[common.Hash]mclock.AbsTime),
waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq), waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq),
announces: make(map[string]map[common.Hash]*txMetadataWithSeq), announces: make(map[string]map[common.Hash]*txMetadataWithSeq),
announced: make(map[common.Hash]map[string]struct{}), announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string), fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest), requests: make(map[string]*txRequest),
alternates: make(map[common.Hash]map[string]struct{}), alternates: make(map[common.Hash]map[string]struct{}),
underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize), underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize),
validateMeta: validateMeta, txOnChainCache: lru.NewCache[common.Hash, struct{}](txOnChainCacheLimit),
addTxs: addTxs, chain: chain,
fetchTxs: fetchTxs, validateMeta: validateMeta,
dropPeer: dropPeer, addTxs: addTxs,
clock: clock, fetchTxs: fetchTxs,
realTime: realTime, dropPeer: dropPeer,
rand: rand, clock: clock,
realTime: realTime,
rand: rand,
} }
} }
@ -233,6 +246,7 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
unknownMetas = make([]txMetadata, 0, len(hashes)) unknownMetas = make([]txMetadata, 0, len(hashes))
duplicate int64 duplicate int64
onchain int64
underpriced int64 underpriced int64
) )
for i, hash := range hashes { for i, hash := range hashes {
@ -245,6 +259,12 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
continue continue
} }
// check on chain as well (no need to check limbo separately, as chain checks limbo too)
if _, exist := f.txOnChainCache.Get(hash); exist {
onchain++
continue
}
if f.isKnownUnderpriced(hash) { if f.isKnownUnderpriced(hash) {
underpriced++ underpriced++
continue continue
@ -259,6 +279,7 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
} }
txAnnounceKnownMeter.Mark(duplicate) txAnnounceKnownMeter.Mark(duplicate)
txAnnounceUnderpricedMeter.Mark(underpriced) txAnnounceUnderpricedMeter.Mark(underpriced)
txAnnounceOnchainMeter.Mark(onchain)
// If anything's left to announce, push it into the internal loop // If anything's left to announce, push it into the internal loop
if len(unknownHashes) == 0 { if len(unknownHashes) == 0 {
@ -412,7 +433,18 @@ func (f *TxFetcher) loop() {
waitTrigger = make(chan struct{}, 1) waitTrigger = make(chan struct{}, 1)
timeoutTrigger = make(chan struct{}, 1) timeoutTrigger = make(chan struct{}, 1)
oldHead *types.Header
) )
// Subscribe to chain events to know when transactions are added to chain
var headEventCh chan core.ChainEvent
if f.chain != nil {
headEventCh = make(chan core.ChainEvent, 10)
sub := f.chain.SubscribeChainEvent(headEventCh)
defer sub.Unsubscribe()
}
for { for {
select { select {
case ann := <-f.notify: case ann := <-f.notify:
@ -837,6 +869,21 @@ func (f *TxFetcher) loop() {
f.rescheduleTimeout(timeoutTimer, timeoutTrigger) f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
} }
case ev := <-headEventCh:
// New head(s) added
newHead := ev.Header
if oldHead != nil && newHead.ParentHash != oldHead.Hash() {
// Reorg or setHead detected, clear the cache. We could be smarter here and
// only remove/add the diff, but this is simpler and not being exact here
// only results in a few more fetches.
f.txOnChainCache.Purge()
}
oldHead = newHead
// Add all transactions from the new block to the on-chain cache
for _, tx := range ev.Transactions {
f.txOnChainCache.Add(tx.Hash(), struct{}{})
}
case <-f.quit: case <-f.quit:
return return
} }

View file

@ -91,6 +91,7 @@ type txFetcherTest struct {
// and deterministic randomness. // and deterministic randomness.
func newTestTxFetcher() *TxFetcher { func newTestTxFetcher() *TxFetcher {
return NewTxFetcher( return NewTxFetcher(
nil,
func(common.Hash, byte) error { return nil }, func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error { func(txs []*types.Transaction) []error {
return make([]error, len(txs)) return make([]error, len(txs))
@ -2191,6 +2192,7 @@ func TestTransactionForgotten(t *testing.T) {
} }
fetcher := NewTxFetcherForTests( fetcher := NewTxFetcherForTests(
nil,
func(common.Hash, byte) error { return nil }, func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error { func(txs []*types.Transaction) []error {
errs := make([]error, len(txs)) errs := make([]error, len(txs))

View file

@ -179,7 +179,6 @@ func newHandler(config *handlerConfig) (*handler, error) {
addTxs := func(txs []*types.Transaction) []error { addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false) return h.txpool.Add(txs, false)
} }
validateMeta := func(tx common.Hash, kind byte) error { validateMeta := func(tx common.Hash, kind byte) error {
if h.txpool.Has(tx) { if h.txpool.Has(tx) {
return txpool.ErrAlreadyKnown return txpool.ErrAlreadyKnown
@ -189,8 +188,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
return nil return nil
} }
h.txFetcher = fetcher.NewTxFetcher(h.chain, validateMeta, addTxs, fetchTx, h.removePeer)
h.txFetcher = fetcher.NewTxFetcher(validateMeta, addTxs, fetchTx, h.removePeer)
return h, nil return h, nil
} }

View file

@ -191,16 +191,16 @@ func markError(p *Peer, err error) {
return return
} }
m := meters.get(p.Inbound()) m := meters.get(p.Inbound())
switch errors.Unwrap(err) { switch {
case errNetworkIDMismatch: case errors.Is(err, errNetworkIDMismatch):
m.networkIDMismatch.Mark(1) m.networkIDMismatch.Mark(1)
case errProtocolVersionMismatch: case errors.Is(err, errProtocolVersionMismatch):
m.protocolVersionMismatch.Mark(1) m.protocolVersionMismatch.Mark(1)
case errGenesisMismatch: case errors.Is(err, errGenesisMismatch):
m.genesisMismatch.Mark(1) m.genesisMismatch.Mark(1)
case errForkIDRejected: case errors.Is(err, errForkIDRejected):
m.forkidRejected.Mark(1) m.forkidRejected.Mark(1)
case p2p.DiscReadTimeout: case errors.Is(err, p2p.DiscReadTimeout):
m.timeoutError.Mark(1) m.timeoutError.Mark(1)
default: default:
m.peerError.Mark(1) m.peerError.Mark(1)

View file

@ -1699,9 +1699,13 @@ func (s *Syncer) revertAccountRequest(req *accountRequest) {
} }
close(req.stale) close(req.stale)
// Remove the request from the tracked set // Remove the request from the tracked set and restore the peer to the
// idle pool so it can be reassigned work (skip if peer already left).
s.lock.Lock() s.lock.Lock()
delete(s.accountReqs, req.id) delete(s.accountReqs, req.id)
if _, ok := s.peers[req.peer]; ok {
s.accountIdlers[req.peer] = struct{}{}
}
s.lock.Unlock() s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the account // If there's a timeout timer still running, abort it and mark the account
@ -1740,9 +1744,13 @@ func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {
} }
close(req.stale) close(req.stale)
// Remove the request from the tracked set // Remove the request from the tracked set and restore the peer to the
// idle pool so it can be reassigned work (skip if peer already left).
s.lock.Lock() s.lock.Lock()
delete(s.bytecodeReqs, req.id) delete(s.bytecodeReqs, req.id)
if _, ok := s.peers[req.peer]; ok {
s.bytecodeIdlers[req.peer] = struct{}{}
}
s.lock.Unlock() s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the code // If there's a timeout timer still running, abort it and mark the code
@ -1781,9 +1789,13 @@ func (s *Syncer) revertStorageRequest(req *storageRequest) {
} }
close(req.stale) close(req.stale)
// Remove the request from the tracked set // Remove the request from the tracked set and restore the peer to the
// idle pool so it can be reassigned work (skip if peer already left).
s.lock.Lock() s.lock.Lock()
delete(s.storageReqs, req.id) delete(s.storageReqs, req.id)
if _, ok := s.peers[req.peer]; ok {
s.storageIdlers[req.peer] = struct{}{}
}
s.lock.Unlock() s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the storage // If there's a timeout timer still running, abort it and mark the storage
@ -1826,9 +1838,13 @@ func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
} }
close(req.stale) close(req.stale)
// Remove the request from the tracked set // Remove the request from the tracked set and restore the peer to the
// idle pool so it can be reassigned work (skip if peer already left).
s.lock.Lock() s.lock.Lock()
delete(s.trienodeHealReqs, req.id) delete(s.trienodeHealReqs, req.id)
if _, ok := s.peers[req.peer]; ok {
s.trienodeHealIdlers[req.peer] = struct{}{}
}
s.lock.Unlock() s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the trie node // If there's a timeout timer still running, abort it and mark the trie node
@ -1867,9 +1883,13 @@ func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {
} }
close(req.stale) close(req.stale)
// Remove the request from the tracked set // Remove the request from the tracked set and restore the peer to the
// idle pool so it can be reassigned work (skip if peer already left).
s.lock.Lock() s.lock.Lock()
delete(s.bytecodeHealReqs, req.id) delete(s.bytecodeHealReqs, req.id)
if _, ok := s.peers[req.peer]; ok {
s.bytecodeHealIdlers[req.peer] = struct{}{}
}
s.lock.Unlock() s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the code // If there's a timeout timer still running, abort it and mark the code

View file

@ -18,6 +18,7 @@ package logger
import ( import (
"maps" "maps"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
@ -88,8 +89,10 @@ func (al accessList) accessList() types.AccessList {
for slot := range slots { for slot := range slots {
tuple.StorageKeys = append(tuple.StorageKeys, slot) tuple.StorageKeys = append(tuple.StorageKeys, slot)
} }
acl = append(acl, tuple) keys := slices.SortedFunc(maps.Keys(slots), common.Hash.Cmp)
acl = append(acl, types.AccessTuple{Address: addr, StorageKeys: keys})
} }
slices.SortFunc(acl, func(a, b types.AccessTuple) int { return a.Address.Cmp(b.Address) })
return acl return acl
} }

View file

@ -28,7 +28,7 @@ import (
) )
func init() { func init() {
tracers.DefaultDirectory.Register("muxTracer", newMuxTracer, false) tracers.DefaultDirectory.Register("muxTracer", newMuxTracerFromConfig, false)
} }
// muxTracer is a go implementation of the Tracer interface which // muxTracer is a go implementation of the Tracer interface which
@ -38,8 +38,8 @@ type muxTracer struct {
tracers []*tracers.Tracer tracers []*tracers.Tracer
} }
// newMuxTracer returns a new mux tracer. // newMuxTracerFromConfig returns a new mux tracer.
func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) { func newMuxTracerFromConfig(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config map[string]json.RawMessage var config map[string]json.RawMessage
if err := json.Unmarshal(cfg, &config); err != nil { if err := json.Unmarshal(cfg, &config); err != nil {
return nil, err return nil, err
@ -54,7 +54,16 @@ func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params
objects = append(objects, t) objects = append(objects, t)
names = append(names, k) names = append(names, k)
} }
return NewMuxTracer(names, objects)
}
// NewMuxTracer creates a multiplexing tracer that fans out tracing hooks to
// multiple child tracers. Each hook invocation is forwarded to all children,
// in the order they are provided.
//
// The names parameter associates a label with each tracer, used as keys in
// the aggregated JSON result returned by GetResult.
func NewMuxTracer(names []string, objects []*tracers.Tracer) (*tracers.Tracer, error) {
t := &muxTracer{names: names, tracers: objects} t := &muxTracer{names: names, tracers: objects}
return &tracers.Tracer{ return &tracers.Tracer{
Hooks: &tracing.Hooks{ Hooks: &tracing.Hooks{

View file

@ -0,0 +1,55 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package native
import (
"encoding/json"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
)
// opcodeCounter is a simple tracer that counts how many times each opcode is executed.
type opcodeCounter struct {
counts [256]uint64
}
// NewOpcodeCounter returns a new opcodeCounter tracer.
func NewOpcodeCounter() *tracers.Tracer {
c := &opcodeCounter{}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnOpcode: func(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
c.counts[op]++
},
},
GetResult: c.getResult,
Stop: func(err error) {},
}
}
// getResult returns the opcode counts keyed by opcode name.
func (c *opcodeCounter) getResult() (json.RawMessage, error) {
out := make(map[string]uint64)
for op, count := range c.counts {
if count != 0 {
out[vm.OpCode(op).String()] = count
}
}
return json.Marshal(out)
}

View file

@ -1093,6 +1093,18 @@ func (b *Block) ExcessBlobGas(ctx context.Context) (*hexutil.Uint64, error) {
return &ret, nil return &ret, nil
} }
func (b *Block) SlotNumber(ctx context.Context) (*hexutil.Uint64, error) {
header, err := b.resolveHeader(ctx)
if err != nil {
return nil, err
}
if header.SlotNumber == nil {
return nil, nil
}
ret := hexutil.Uint64(*header.SlotNumber)
return &ret, nil
}
// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside // BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside
// a block. // a block.
type BlockFilterCriteria struct { type BlockFilterCriteria struct {

View file

@ -53,6 +53,10 @@ import (
// allowed to produce in order to speed up calculations. // allowed to produce in order to speed up calculations.
const estimateGasErrorRatio = 0.015 const estimateGasErrorRatio = 0.015
// maxGetStorageSlots is the maximum total number of storage slots that can
// be requested in a single eth_getStorageValues call.
const maxGetStorageSlots = 1024
var errBlobTxNotSupported = errors.New("signing blob transactions not supported") var errBlobTxNotSupported = errors.New("signing blob transactions not supported")
var errSubClosed = errors.New("chain subscription closed") var errSubClosed = errors.New("chain subscription closed")
@ -589,6 +593,41 @@ func (api *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Addre
return res[:], state.Error() return res[:], state.Error()
} }
// GetStorageValues returns multiple storage slot values for multiple accounts
// at the given block.
func (api *BlockChainAPI) GetStorageValues(ctx context.Context, requests map[common.Address][]common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (map[common.Address][]hexutil.Bytes, error) {
// Count total slots requested.
var totalSlots int
for _, keys := range requests {
totalSlots += len(keys)
if totalSlots > maxGetStorageSlots {
return nil, &clientLimitExceededError{message: fmt.Sprintf("too many slots (max %d)", maxGetStorageSlots)}
}
}
if totalSlots == 0 {
return nil, &invalidParamsError{message: "empty request"}
}
state, _, err := api.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
result := make(map[common.Address][]hexutil.Bytes, len(requests))
for addr, keys := range requests {
vals := make([]hexutil.Bytes, len(keys))
for i, key := range keys {
v := state.GetState(addr, key)
vals[i] = v[:]
}
if err := state.Error(); err != nil {
return nil, err
}
result[addr] = vals
}
return result, nil
}
// GetBlockReceipts returns the block receipts for the given block hash or number or tag. // GetBlockReceipts returns the block receipts for the given block hash or number or tag.
func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) {
var ( var (
@ -932,6 +971,9 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
if head.RequestsHash != nil { if head.RequestsHash != nil {
result["requestsHash"] = head.RequestsHash result["requestsHash"] = head.RequestsHash
} }
if head.SlotNumber != nil {
result["slotNumber"] = head.SlotNumber
}
return result return result
} }

View file

@ -4065,3 +4065,91 @@ func TestSendRawTransactionSync_Timeout(t *testing.T) {
t.Fatalf("expected ErrorData=%s, got %v", want, got) t.Fatalf("expected ErrorData=%s, got %v", want, got)
} }
} }
func TestGetStorageValues(t *testing.T) {
t.Parallel()
var (
addr1 = common.HexToAddress("0x1111")
addr2 = common.HexToAddress("0x2222")
slot0 = common.Hash{}
slot1 = common.BigToHash(big.NewInt(1))
slot2 = common.BigToHash(big.NewInt(2))
val0 = common.BigToHash(big.NewInt(42))
val1 = common.BigToHash(big.NewInt(100))
val2 = common.BigToHash(big.NewInt(200))
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
Alloc: types.GenesisAlloc{
addr1: {
Balance: big.NewInt(params.Ether),
Storage: map[common.Hash]common.Hash{
slot0: val0,
slot1: val1,
},
},
addr2: {
Balance: big.NewInt(params.Ether),
Storage: map[common.Hash]common.Hash{
slot2: val2,
},
},
},
}
)
api := NewBlockChainAPI(newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
b.SetPoS()
}))
latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
// Happy path: multiple addresses, multiple slots.
result, err := api.GetStorageValues(context.Background(), map[common.Address][]common.Hash{
addr1: {slot0, slot1},
addr2: {slot2},
}, latest)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(result) != 2 {
t.Fatalf("expected 2 addresses in result, got %d", len(result))
}
if got := common.BytesToHash(result[addr1][0]); got != val0 {
t.Errorf("addr1 slot0: want %x, got %x", val0, got)
}
if got := common.BytesToHash(result[addr1][1]); got != val1 {
t.Errorf("addr1 slot1: want %x, got %x", val1, got)
}
if got := common.BytesToHash(result[addr2][0]); got != val2 {
t.Errorf("addr2 slot2: want %x, got %x", val2, got)
}
// Missing slot returns zero.
result, err = api.GetStorageValues(context.Background(), map[common.Address][]common.Hash{
addr1: {common.HexToHash("0xff")},
}, latest)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if got := common.BytesToHash(result[addr1][0]); got != (common.Hash{}) {
t.Errorf("missing slot: want zero, got %x", got)
}
// Empty request returns error.
_, err = api.GetStorageValues(context.Background(), map[common.Address][]common.Hash{}, latest)
if err == nil {
t.Fatal("expected error for empty request")
}
// Exceeding slot limit returns error.
tooMany := make([]common.Hash, maxGetStorageSlots+1)
for i := range tooMany {
tooMany[i] = common.BigToHash(big.NewInt(int64(i)))
}
_, err = api.GetStorageValues(context.Background(), map[common.Address][]common.Hash{
addr1: tooMany,
}, latest)
if err == nil {
t.Fatal("expected error for exceeding slot limit")
}
}

View file

@ -19,7 +19,9 @@ package override
import ( import (
"errors" "errors"
"fmt" "fmt"
"maps"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -58,9 +60,13 @@ func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.Precompi
if diff == nil { if diff == nil {
return nil return nil
} }
// Iterate in deterministic order so error messages and behavior are stable (e.g. for tests).
addrs := slices.SortedFunc(maps.Keys(*diff), common.Address.Cmp)
// Tracks destinations of precompiles that were moved. // Tracks destinations of precompiles that were moved.
dirtyAddrs := make(map[common.Address]struct{}) dirtyAddrs := make(map[common.Address]struct{})
for addr, account := range *diff { for _, addr := range addrs {
account := (*diff)[addr]
// If a precompile was moved to this address already, it can't be overridden. // If a precompile was moved to this address already, it can't be overridden.
if _, ok := dirtyAddrs[addr]; ok { if _, ok := dirtyAddrs[addr]; ok {
return fmt.Errorf("account %s has already been overridden by a precompile", addr.Hex()) return fmt.Errorf("account %s has already been overridden by a precompile", addr.Hex())

View file

@ -155,6 +155,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, config
AccessList: args.AccessList, AccessList: args.AccessList,
BlobFeeCap: args.BlobFeeCap, BlobFeeCap: args.BlobFeeCap,
BlobHashes: args.BlobHashes, BlobHashes: args.BlobHashes,
AuthorizationList: args.AuthorizationList,
} }
latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, nil, b.RPCGasCap()) estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, nil, b.RPCGasCap())

View file

@ -16,7 +16,7 @@
// Naive stub implementation for tablewriter // Naive stub implementation for tablewriter
package rawdb package tablewriter
import ( import (
"errors" "errors"
@ -37,7 +37,7 @@ type Table struct {
rows [][]string rows [][]string
} }
func NewTableWriter(w io.Writer) *Table { func NewWriter(w io.Writer) *Table {
return &Table{out: w} return &Table{out: w}
} }
@ -58,12 +58,12 @@ func (t *Table) SetFooter(footer []string) {
t.footer = footer t.footer = footer
} }
// AppendBulk sets all data rows for the table at once, replacing any existing rows. // AppendBulk appends one or more data rows to the table.
// //
// Each row must have the same number of columns as the headers, or validation // Each row must have the same number of columns as the headers, or validation
// will fail during Render(). // will fail during Render().
func (t *Table) AppendBulk(rows [][]string) { func (t *Table) AppendBulk(rows [][]string) {
t.rows = rows t.rows = append(t.rows, rows...)
} }
// Render outputs the complete table to the configured writer. The table is rendered // Render outputs the complete table to the configured writer. The table is rendered

View file

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb package tablewriter
import ( import (
"bytes" "bytes"
@ -24,7 +24,7 @@ import (
func TestTableWriterTinyGo(t *testing.T) { func TestTableWriterTinyGo(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
table := NewTableWriter(&buf) table := NewWriter(&buf)
headers := []string{"Database", "Size", "Items", "Status"} headers := []string{"Database", "Size", "Items", "Status"}
rows := [][]string{ rows := [][]string{
@ -48,7 +48,7 @@ func TestTableWriterValidationErrors(t *testing.T) {
// Test missing headers // Test missing headers
t.Run("MissingHeaders", func(t *testing.T) { t.Run("MissingHeaders", func(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
table := NewTableWriter(&buf) table := NewWriter(&buf)
rows := [][]string{{"x", "y", "z"}} rows := [][]string{{"x", "y", "z"}}
@ -63,7 +63,7 @@ func TestTableWriterValidationErrors(t *testing.T) {
t.Run("NotEnoughRowColumns", func(t *testing.T) { t.Run("NotEnoughRowColumns", func(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
table := NewTableWriter(&buf) table := NewWriter(&buf)
headers := []string{"A", "B", "C"} headers := []string{"A", "B", "C"}
badRows := [][]string{ badRows := [][]string{
@ -82,7 +82,7 @@ func TestTableWriterValidationErrors(t *testing.T) {
t.Run("TooManyRowColumns", func(t *testing.T) { t.Run("TooManyRowColumns", func(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
table := NewTableWriter(&buf) table := NewWriter(&buf)
headers := []string{"A", "B", "C"} headers := []string{"A", "B", "C"}
badRows := [][]string{ badRows := [][]string{
@ -102,7 +102,7 @@ func TestTableWriterValidationErrors(t *testing.T) {
// Test mismatched footer columns // Test mismatched footer columns
t.Run("MismatchedFooterColumns", func(t *testing.T) { t.Run("MismatchedFooterColumns", func(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
table := NewTableWriter(&buf) table := NewWriter(&buf)
headers := []string{"A", "B", "C"} headers := []string{"A", "B", "C"}
rows := [][]string{{"x", "y", "z"}} rows := [][]string{{"x", "y", "z"}}

View file

@ -567,6 +567,12 @@ web3._extend({
params: 3, params: 3,
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter] inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter]
}), }),
new web3._extend.Method({
name: 'getStorageValues',
call: 'eth_getStorageValues',
params: 2,
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter]
}),
new web3._extend.Method({ new web3._extend.Method({
name: 'createAccessList', name: 'createAccessList',
call: 'eth_createAccessList', call: 'eth_createAccessList',

View file

@ -16,18 +16,21 @@
package metrics package metrics
import "time"
// Config contains the configuration for the metric collection. // Config contains the configuration for the metric collection.
type Config struct { type Config struct {
Enabled bool `toml:",omitempty"` Enabled bool `toml:",omitempty"`
EnabledExpensive bool `toml:"-"` EnabledExpensive bool `toml:"-"`
HTTP string `toml:",omitempty"` HTTP string `toml:",omitempty"`
Port int `toml:",omitempty"` Port int `toml:",omitempty"`
EnableInfluxDB bool `toml:",omitempty"` EnableInfluxDB bool `toml:",omitempty"`
InfluxDBEndpoint string `toml:",omitempty"` InfluxDBEndpoint string `toml:",omitempty"`
InfluxDBDatabase string `toml:",omitempty"` InfluxDBDatabase string `toml:",omitempty"`
InfluxDBUsername string `toml:",omitempty"` InfluxDBUsername string `toml:",omitempty"`
InfluxDBPassword string `toml:",omitempty"` InfluxDBPassword string `toml:",omitempty"`
InfluxDBTags string `toml:",omitempty"` InfluxDBTags string `toml:",omitempty"`
InfluxDBInterval time.Duration `toml:",omitempty"`
EnableInfluxDBV2 bool `toml:",omitempty"` EnableInfluxDBV2 bool `toml:",omitempty"`
InfluxDBToken string `toml:",omitempty"` InfluxDBToken string `toml:",omitempty"`
@ -47,6 +50,7 @@ var DefaultConfig = Config{
InfluxDBUsername: "test", InfluxDBUsername: "test",
InfluxDBPassword: "test", InfluxDBPassword: "test",
InfluxDBTags: "host=localhost", InfluxDBTags: "host=localhost",
InfluxDBInterval: 10 * time.Second,
// influxdbv2-specific flags // influxdbv2-specific flags
EnableInfluxDBV2: false, EnableInfluxDBV2: false,

View file

@ -43,6 +43,7 @@ type BuildPayloadArgs struct {
Random common.Hash // The provided randomness value Random common.Hash // The provided randomness value
Withdrawals types.Withdrawals // The provided withdrawals Withdrawals types.Withdrawals // The provided withdrawals
BeaconRoot *common.Hash // The provided beaconRoot (Cancun) BeaconRoot *common.Hash // The provided beaconRoot (Cancun)
SlotNum *uint64 // The provided slotNumber
Version engine.PayloadVersion // Versioning byte for payload id calculation. Version engine.PayloadVersion // Versioning byte for payload id calculation.
} }
@ -57,6 +58,9 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID {
if args.BeaconRoot != nil { if args.BeaconRoot != nil {
hasher.Write(args.BeaconRoot[:]) hasher.Write(args.BeaconRoot[:])
} }
if args.SlotNum != nil {
binary.Write(hasher, binary.BigEndian, args.SlotNum)
}
var out engine.PayloadID var out engine.PayloadID
copy(out[:], hasher.Sum(nil)[:8]) copy(out[:], hasher.Sum(nil)[:8])
out[0] = byte(args.Version) out[0] = byte(args.Version)
@ -218,6 +222,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
random: args.Random, random: args.Random,
withdrawals: args.Withdrawals, withdrawals: args.Withdrawals,
beaconRoot: args.BeaconRoot, beaconRoot: args.BeaconRoot,
slotNum: args.SlotNum,
noTxs: true, noTxs: true,
} }
empty := miner.generateWork(emptyParams, witness) empty := miner.generateWork(emptyParams, witness)
@ -248,6 +253,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
random: args.Random, random: args.Random,
withdrawals: args.Withdrawals, withdrawals: args.Withdrawals,
beaconRoot: args.BeaconRoot, beaconRoot: args.BeaconRoot,
slotNum: args.SlotNum,
noTxs: false, noTxs: false,
} }
@ -273,3 +279,26 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
}() }()
return payload, nil return payload, nil
} }
// BuildTestingPayload is for testing_buildBlockV*. It creates a block with the exact content given
// by the parameters instead of using the locally available transactions.
func (miner *Miner) BuildTestingPayload(args *BuildPayloadArgs, transactions []*types.Transaction, empty bool, extraData []byte) (*engine.ExecutionPayloadEnvelope, error) {
fullParams := &generateParams{
timestamp: args.Timestamp,
forceTime: true,
parentHash: args.Parent,
coinbase: args.FeeRecipient,
random: args.Random,
withdrawals: args.Withdrawals,
beaconRoot: args.BeaconRoot,
noTxs: empty,
forceOverrides: true,
overrideExtraData: extraData,
overrideTxs: transactions,
}
res := miner.generateWork(fullParams, false)
if res.err != nil {
return nil, res.err
}
return engine.BlockToExecutableData(res.block, new(big.Int), res.sidecars, res.requests), nil
}

View file

@ -111,7 +111,12 @@ type generateParams struct {
random common.Hash // The randomness generated by beacon chain, empty before the merge random common.Hash // The randomness generated by beacon chain, empty before the merge
withdrawals types.Withdrawals // List of withdrawals to include in block (shanghai field) withdrawals types.Withdrawals // List of withdrawals to include in block (shanghai field)
beaconRoot *common.Hash // The beacon root (cancun field). beaconRoot *common.Hash // The beacon root (cancun field).
slotNum *uint64 // The slot number (amsterdam field).
noTxs bool // Flag whether an empty block without any transaction is expected noTxs bool // Flag whether an empty block without any transaction is expected
forceOverrides bool // Flag whether we should overwrite extraData and transactions
overrideExtraData []byte
overrideTxs []*types.Transaction
} }
// generateWork generates a sealing block based on the given parameters. // generateWork generates a sealing block based on the given parameters.
@ -132,15 +137,30 @@ func (miner *Miner) generateWork(genParam *generateParams, witness bool) *newPay
work.size += uint64(genParam.withdrawals.Size()) work.size += uint64(genParam.withdrawals.Size())
if !genParam.noTxs { if !genParam.noTxs {
interrupt := new(atomic.Int32) // If forceOverrides is true and overrideTxs is not empty, commit the override transactions
timer := time.AfterFunc(miner.config.Recommit, func() { // otherwise, fill the block with the current transactions from the txpool
interrupt.Store(commitInterruptTimeout) if genParam.forceOverrides && len(genParam.overrideTxs) > 0 {
}) if work.gasPool == nil {
defer timer.Stop() work.gasPool = new(core.GasPool).AddGas(work.header.GasLimit)
}
for _, tx := range genParam.overrideTxs {
work.state.SetTxContext(tx.Hash(), work.tcount)
if err := miner.commitTransaction(work, tx); err != nil {
// all passed transactions HAVE to be valid at this point
return &newPayloadResult{err: err}
}
}
} else {
interrupt := new(atomic.Int32)
timer := time.AfterFunc(miner.config.Recommit, func() {
interrupt.Store(commitInterruptTimeout)
})
defer timer.Stop()
err := miner.fillTransactions(interrupt, work) err := miner.fillTransactions(interrupt, work)
if errors.Is(err, errBlockInterruptedByTimeout) { if errors.Is(err, errBlockInterruptedByTimeout) {
log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(miner.config.Recommit)) log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(miner.config.Recommit))
}
} }
} }
body := types.Body{Transactions: work.txs, Withdrawals: genParam.withdrawals} body := types.Body{Transactions: work.txs, Withdrawals: genParam.withdrawals}
@ -224,6 +244,9 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
if len(miner.config.ExtraData) != 0 { if len(miner.config.ExtraData) != 0 {
header.Extra = miner.config.ExtraData header.Extra = miner.config.ExtraData
} }
if genParams.forceOverrides {
header.Extra = genParams.overrideExtraData
}
// Set the randomness field from the beacon chain if it's available. // Set the randomness field from the beacon chain if it's available.
if genParams.random != (common.Hash{}) { if genParams.random != (common.Hash{}) {
header.MixDigest = genParams.random header.MixDigest = genParams.random
@ -252,6 +275,13 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
header.ExcessBlobGas = &excessBlobGas header.ExcessBlobGas = &excessBlobGas
header.ParentBeaconRoot = genParams.beaconRoot header.ParentBeaconRoot = genParams.beaconRoot
} }
// Apply EIP-7843.
if miner.chainConfig.IsAmsterdam(header.Number, header.Time) {
if genParams.slotNum == nil {
return nil, errors.New("no slot number set post-amsterdam")
}
header.SlotNumber = genParams.slotNum
}
// Could potentially happen if starting to mine in an odd state. // Could potentially happen if starting to mine in an odd state.
// Note genParams.coinbase can be different with header.Coinbase // Note genParams.coinbase can be different with header.Coinbase
// since clique algorithm can modify the coinbase field in header. // since clique algorithm can modify the coinbase field in header.

View file

@ -856,10 +856,10 @@ type testCodecFrame struct {
} }
func (c *testCodec) Encode(toID enode.ID, addr string, p v5wire.Packet, _ *v5wire.Whoareyou) ([]byte, v5wire.Nonce, error) { func (c *testCodec) Encode(toID enode.ID, addr string, p v5wire.Packet, _ *v5wire.Whoareyou) ([]byte, v5wire.Nonce, error) {
// To match the behavior of v5wire.Codec, we return the cached encoding of if wp, ok := p.(*v5wire.Whoareyou); ok && len(wp.ChallengeData) > 0 {
// WHOAREYOU challenges. // To match the behavior of v5wire.Codec, we return the cached encoding of
if wp, ok := p.(*v5wire.Whoareyou); ok && len(wp.Encoded) > 0 { // WHOAREYOU challenges.
return wp.Encoded, wp.Nonce, nil return wp.ChallengeData, wp.Nonce, nil
} }
c.ctr++ c.ctr++
@ -874,7 +874,7 @@ func (c *testCodec) Encode(toID enode.ID, addr string, p v5wire.Packet, _ *v5wir
// Store recently sent challenges. // Store recently sent challenges.
if w, ok := p.(*v5wire.Whoareyou); ok { if w, ok := p.(*v5wire.Whoareyou); ok {
w.Nonce = authTag w.Nonce = authTag
w.Encoded = frame w.ChallengeData = frame
if c.sentChallenges == nil { if c.sentChallenges == nil {
c.sentChallenges = make(map[enode.ID]*v5wire.Whoareyou) c.sentChallenges = make(map[enode.ID]*v5wire.Whoareyou)
} }
@ -911,6 +911,7 @@ func (c *testCodec) decodeFrame(input []byte) (frame testCodecFrame, p v5wire.Pa
case v5wire.WhoareyouPacket: case v5wire.WhoareyouPacket:
dec := new(v5wire.Whoareyou) dec := new(v5wire.Whoareyou)
err = rlp.DecodeBytes(frame.Packet, &dec) err = rlp.DecodeBytes(frame.Packet, &dec)
dec.ChallengeData = bytes.Clone(input)
p = dec p = dec
default: default:
p, err = v5wire.DecodeMessage(frame.Ptype, frame.Packet) p, err = v5wire.DecodeMessage(frame.Ptype, frame.Packet)

View file

@ -190,10 +190,16 @@ func (c *Codec) Encode(id enode.ID, addr string, packet Packet, challenge *Whoar
) )
switch { switch {
case packet.Kind() == WhoareyouPacket: case packet.Kind() == WhoareyouPacket:
// just send the WHOAREYOU packet raw again, rather than the re-encoded challenge data
w := packet.(*Whoareyou) w := packet.(*Whoareyou)
if len(w.Encoded) > 0 { if len(w.ChallengeData) > 0 {
return w.Encoded, w.Nonce, nil // This WHOAREYOU packet was encoded before, so it's a resend.
// The unmasked packet content is stored in w.ChallengeData.
// Just apply the masking again to finish encoding.
c.buf.Reset()
c.buf.Write(w.ChallengeData)
copy(head.IV[:], w.ChallengeData)
enc := applyMasking(id, head.IV, c.buf.Bytes())
return enc, w.Nonce, nil
} }
head, err = c.encodeWhoareyou(id, packet.(*Whoareyou)) head, err = c.encodeWhoareyou(id, packet.(*Whoareyou))
case challenge != nil: case challenge != nil:
@ -228,7 +234,6 @@ func (c *Codec) Encode(id enode.ID, addr string, packet Packet, challenge *Whoar
if err != nil { if err != nil {
return nil, Nonce{}, err return nil, Nonce{}, err
} }
challenge.Encoded = bytes.Clone(enc)
c.sc.storeSentHandshake(id, addr, challenge) c.sc.storeSentHandshake(id, addr, challenge)
return enc, head.Nonce, err return enc, head.Nonce, err
} }
@ -246,14 +251,10 @@ func (c *Codec) Encode(id enode.ID, addr string, packet Packet, challenge *Whoar
// EncodeRaw encodes a packet with the given header. // EncodeRaw encodes a packet with the given header.
func (c *Codec) EncodeRaw(id enode.ID, head Header, msgdata []byte) ([]byte, error) { func (c *Codec) EncodeRaw(id enode.ID, head Header, msgdata []byte) ([]byte, error) {
// header
c.writeHeaders(&head) c.writeHeaders(&head)
applyMasking(id, head.IV, c.buf.Bytes())
// Apply masking. // message data
masked := c.buf.Bytes()[sizeofMaskingIV:]
mask := head.mask(id)
mask.XORKeyStream(masked[:], masked[:])
// Write message data.
c.buf.Write(msgdata) c.buf.Write(msgdata)
return c.buf.Bytes(), nil return c.buf.Bytes(), nil
} }
@ -463,7 +464,7 @@ func (c *Codec) Decode(inputData []byte, addr string) (src enode.ID, n *enode.No
// Unmask the static header. // Unmask the static header.
var head Header var head Header
copy(head.IV[:], input[:sizeofMaskingIV]) copy(head.IV[:], input[:sizeofMaskingIV])
mask := head.mask(c.localnode.ID()) mask := createMask(c.localnode.ID(), head.IV)
staticHeader := input[sizeofMaskingIV:sizeofStaticPacketData] staticHeader := input[sizeofMaskingIV:sizeofStaticPacketData]
mask.XORKeyStream(staticHeader, staticHeader) mask.XORKeyStream(staticHeader, staticHeader)
@ -679,10 +680,17 @@ func (h *StaticHeader) checkValid(packetLen int, protocolID [6]byte) error {
} }
// mask returns a cipher for 'masking' / 'unmasking' packet headers. // mask returns a cipher for 'masking' / 'unmasking' packet headers.
func (h *Header) mask(destID enode.ID) cipher.Stream { func createMask(destID enode.ID, iv [16]byte) cipher.Stream {
block, err := aes.NewCipher(destID[:16]) block, err := aes.NewCipher(destID[:16])
if err != nil { if err != nil {
panic("can't create cipher") panic("can't create cipher")
} }
return cipher.NewCTR(block, h.IV[:]) return cipher.NewCTR(block, iv[:])
}
func applyMasking(destID enode.ID, iv [16]byte, packet []byte) []byte {
masked := packet[sizeofMaskingIV:]
mask := createMask(destID, iv)
mask.XORKeyStream(masked[:], masked[:])
return packet
} }

View file

@ -269,6 +269,35 @@ func TestHandshake_BadHandshakeAttack(t *testing.T) {
net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, findnode) net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, findnode)
} }
func TestEncodeWhoareyouResend(t *testing.T) {
t.Parallel()
net := newHandshakeTest()
defer net.close()
// A -> B WHOAREYOU
challenge := &Whoareyou{
Nonce: Nonce{1, 2, 3, 4},
IDNonce: testIDnonce,
RecordSeq: 0,
}
enc, _ := net.nodeA.encode(t, net.nodeB, challenge)
net.nodeB.expectDecode(t, WhoareyouPacket, enc)
whoareyou1 := bytes.Clone(enc)
if len(challenge.ChallengeData) == 0 {
t.Fatal("ChallengeData not assigned by encode")
}
// A -> B WHOAREYOU
// Send the same challenge again. This should produce exactly
// the same bytes as the first send.
enc, _ = net.nodeA.encode(t, net.nodeB, challenge)
whoareyou2 := bytes.Clone(enc)
if !bytes.Equal(whoareyou2, whoareyou1) {
t.Fatal("re-encoded challenge not equal to first")
}
}
// This test checks some malformed packets. // This test checks some malformed packets.
func TestDecodeErrorsV5(t *testing.T) { func TestDecodeErrorsV5(t *testing.T) {
t.Parallel() t.Parallel()

View file

@ -63,19 +63,20 @@ type (
// WHOAREYOU contains the handshake challenge. // WHOAREYOU contains the handshake challenge.
Whoareyou struct { Whoareyou struct {
ChallengeData []byte // Encoded challenge Nonce Nonce // Nonce of request packet
Nonce Nonce // Nonce of request packet IDNonce [16]byte // Identity proof data
IDNonce [16]byte // Identity proof data RecordSeq uint64 // ENR sequence number of recipient
RecordSeq uint64 // ENR sequence number of recipient
// Node is the locally known node record of recipient. // Node is the locally known node record of recipient.
// This must be set by the caller of Encode. // This must be set by the caller of Encode.
Node *enode.Node Node *enode.Node `rlp:"-"`
// ChallengeData stores the unmasked encoding of the whole packet. This is the
// input data for verification. It is assigned by both Encode and Decode
// operations.
ChallengeData []byte `rlp:"-"`
sent mclock.AbsTime // for handshake GC. sent mclock.AbsTime // for handshake GC.
// Encoded is packet raw data for sending out, but should not be include in the RLP encoding.
Encoded []byte `rlp:"-"`
} }
// PING is sent during liveness checks. // PING is sent during liveness checks.

View file

@ -97,6 +97,7 @@ type btHeader struct {
BlobGasUsed *uint64 BlobGasUsed *uint64
ExcessBlobGas *uint64 ExcessBlobGas *uint64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
SlotNumber *uint64
} }
type btHeaderMarshaling struct { type btHeaderMarshaling struct {
@ -109,6 +110,7 @@ type btHeaderMarshaling struct {
BaseFeePerGas *math.HexOrDecimal256 BaseFeePerGas *math.HexOrDecimal256
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
SlotNumber *math.HexOrDecimal64
} }
func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) { func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
@ -343,6 +345,9 @@ func validateHeader(h *btHeader, h2 *types.Header) error {
if !reflect.DeepEqual(h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) { if !reflect.DeepEqual(h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) {
return fmt.Errorf("parentBeaconBlockRoot: want: %v have: %v", h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) return fmt.Errorf("parentBeaconBlockRoot: want: %v have: %v", h.ParentBeaconBlockRoot, h2.ParentBeaconRoot)
} }
if !reflect.DeepEqual(h.SlotNumber, h2.SlotNumber) {
return fmt.Errorf("slotNumber: want: %v have: %v", h.SlotNumber, h2.SlotNumber)
}
return nil return nil
} }

View file

@ -78,6 +78,7 @@ func fuzz(input []byte) int {
rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!! rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!!
f := fetcher.NewTxFetcherForTests( f := fetcher.NewTxFetcherForTests(
nil,
func(common.Hash, byte) error { return nil }, func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error { func(txs []*types.Transaction) []error {
return make([]error, len(txs)) return make([]error, len(txs))

View file

@ -38,6 +38,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
SlotNumber *math.HexOrDecimal64
} }
var enc btHeader var enc btHeader
enc.Bloom = b.Bloom enc.Bloom = b.Bloom
@ -61,6 +62,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed) enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas) enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas)
enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot
enc.SlotNumber = (*math.HexOrDecimal64)(b.SlotNumber)
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -88,6 +90,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
BlobGasUsed *math.HexOrDecimal64 BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64 ExcessBlobGas *math.HexOrDecimal64
ParentBeaconBlockRoot *common.Hash ParentBeaconBlockRoot *common.Hash
SlotNumber *math.HexOrDecimal64
} }
var dec btHeader var dec btHeader
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -156,5 +159,8 @@ func (b *btHeader) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconBlockRoot != nil { if dec.ParentBeaconBlockRoot != nil {
b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
} }
if dec.SlotNumber != nil {
b.SlotNumber = (*uint64)(dec.SlotNumber)
}
return nil return nil
} }

930
trie/inspect.go Normal file
View file

@ -0,0 +1,930 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"bufio"
"bytes"
"cmp"
"container/heap"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/tablewriter"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/triedb/database"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
inspectDumpRecordSize = 32 + trieStatLevels*(3*4+8)
inspectDefaultTopN = 10
inspectParallelism = int64(16)
)
// inspector is used by the inner inspect function to coordinate across threads.
type inspector struct {
triedb database.NodeDatabase
root common.Hash
config *InspectConfig
accountStat *LevelStats
sem *semaphore.Weighted
// Pass 1: dump file writer.
dumpMu sync.Mutex
dumpBuf *bufio.Writer
dumpFile *os.File
storageRecordsWritten atomic.Uint64
errMu sync.Mutex
err error
}
// InspectConfig is a set of options to control inspection and format the output.
// TopN determines the maximum number of entries retained for each top-list.
// Path controls optional JSON output. DumpPath controls the pass-1 dump location.
type InspectConfig struct {
NoStorage bool
TopN int
Path string
DumpPath string
}
// Inspect walks the trie with the given root and records the number and type of
// nodes at each depth. Storage trie stats are streamed to disk in fixed-size
// records, then summarized in a second pass.
func Inspect(triedb database.NodeDatabase, root common.Hash, config *InspectConfig) error {
trie, err := New(TrieID(root), triedb)
if err != nil {
return fmt.Errorf("fail to open trie %s: %w", root, err)
}
config = normalizeInspectConfig(config)
dumpFile, err := os.OpenFile(config.DumpPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return fmt.Errorf("failed to create trie dump %s: %w", config.DumpPath, err)
}
in := inspector{
triedb: triedb,
root: root,
config: config,
accountStat: NewLevelStats(),
sem: semaphore.NewWeighted(inspectParallelism),
dumpBuf: bufio.NewWriterSize(dumpFile, 1<<20),
dumpFile: dumpFile,
}
// Start progress reporter
start := time.Now()
done := make(chan struct{})
go func() {
ticker := time.NewTicker(8 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
accountNodes := in.accountStat.TotalNodes()
storageRecords := in.storageRecordsWritten.Load()
log.Info("Inspecting trie",
"accountNodes", accountNodes,
"storageRecords", storageRecords,
"elapsed", common.PrettyDuration(time.Since(start)))
case <-done:
return
}
}
}()
in.recordRootSize(trie, root, in.accountStat)
in.inspect(trie, trie.root, 0, []byte{}, in.accountStat)
// inspect is synchronous: it waits for all spawned goroutines in its
// subtree before returning, so no additional wait is needed here.
// Persist account trie stats as the sentinel record.
in.writeDumpRecord(common.Hash{}, in.accountStat)
if err := in.closeDump(); err != nil {
in.setError(err)
}
// Stop progress reporter
close(done)
if err := in.getError(); err != nil {
return err
}
return Summarize(config.DumpPath, config)
}
// InspectContract inspects the on-disk footprint of a single contract.
// It reports snapshot storage (slot count + size) and storage trie node
// statistics (node type breakdown and per-depth distribution).
func InspectContract(triedb database.NodeDatabase, db ethdb.Database, stateRoot common.Hash, address common.Address) error {
// Resolve account from the state trie.
accountHash := crypto.Keccak256Hash(address.Bytes())
accountTrie, err := New(TrieID(stateRoot), triedb)
if err != nil {
return fmt.Errorf("failed to open account trie: %w", err)
}
accountRLP, err := accountTrie.Get(crypto.Keccak256(address.Bytes()))
if err != nil {
return fmt.Errorf("failed to read account: %w", err)
}
if accountRLP == nil {
return fmt.Errorf("account not found: %s", address)
}
var account types.StateAccount
if err := rlp.DecodeBytes(accountRLP, &account); err != nil {
return fmt.Errorf("failed to decode account: %w", err)
}
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
return fmt.Errorf("account %s has no storage", address)
}
// Look up account snapshot.
accountData := rawdb.ReadAccountSnapshot(db, accountHash)
// Run trie walk + snap iteration in parallel.
var (
snapSlots atomic.Uint64
snapSize atomic.Uint64
g errgroup.Group
start = time.Now()
)
// Goroutine 1: Snapshot storage iteration.
g.Go(func() error {
prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...)
it := db.NewIterator(prefix, nil)
defer it.Release()
for it.Next() {
if !bytes.HasPrefix(it.Key(), prefix) {
break
}
snapSlots.Add(1)
snapSize.Add(uint64(len(it.Key()) + len(it.Value())))
}
return it.Error()
})
// Goroutine 2: Storage trie walk using the existing inspector.
storageStat := NewLevelStats()
g.Go(func() error {
owner := accountHash
storage, err := New(StorageTrieID(stateRoot, owner, account.Root), triedb)
if err != nil {
return fmt.Errorf("failed to open storage trie: %w", err)
}
in := &inspector{
triedb: triedb,
root: stateRoot,
config: &InspectConfig{NoStorage: true},
accountStat: NewLevelStats(), // unused, but needed by inspector
sem: semaphore.NewWeighted(inspectParallelism),
dumpBuf: bufio.NewWriter(io.Discard),
}
in.recordRootSize(storage, account.Root, storageStat)
in.inspect(storage, storage.root, 0, []byte{}, storageStat)
if err := in.getError(); err != nil {
return err
}
return nil
})
// Progress reporter.
done := make(chan struct{})
go func() {
ticker := time.NewTicker(8 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Info("Inspecting contract",
"snapSlots", snapSlots.Load(),
"trieNodes", storageStat.TotalNodes(),
"elapsed", common.PrettyDuration(time.Since(start)))
case <-done:
return
}
}
}()
if err := g.Wait(); err != nil {
close(done)
return err
}
close(done)
// Display results.
fmt.Printf("\n=== Contract Inspection: %s ===\n", address)
fmt.Printf("Account hash: %s\n\n", accountHash)
if len(accountData) == 0 {
fmt.Println("Account snapshot: not found")
} else {
fmt.Printf("Account snapshot: %s\n", common.StorageSize(len(accountData)))
}
fmt.Printf("Snapshot storage: %d slots (%s)\n",
snapSlots.Load(), common.StorageSize(snapSize.Load()))
// Compute trie totals from LevelStats.
var trieTotal, trieSize uint64
for i := 0; i < trieStatLevels; i++ {
short, full, value, size := storageStat.level[i].load()
trieTotal += short + full + value
trieSize += size
}
fmt.Printf("Storage trie: %d nodes (%s)\n", trieTotal, common.StorageSize(trieSize))
// Depth distribution table with node type columns.
fmt.Println("\nStorage Trie Depth Distribution:")
b := new(strings.Builder)
table := tablewriter.NewWriter(b)
table.SetHeader([]string{"Depth", "Short", "Full", "Value", "Nodes", "Size"})
for i := 0; i < trieStatLevels; i++ {
short, full, value, size := storageStat.level[i].load()
total := short + full + value
if total == 0 && size == 0 {
continue
}
table.AppendBulk([][]string{{
fmt.Sprint(i),
fmt.Sprint(short),
fmt.Sprint(full),
fmt.Sprint(value),
fmt.Sprint(total),
common.StorageSize(size).String(),
}})
}
table.Render()
fmt.Print(b.String())
return nil
}
func normalizeInspectConfig(config *InspectConfig) *InspectConfig {
if config == nil {
config = &InspectConfig{}
}
if config.TopN <= 0 {
config.TopN = inspectDefaultTopN
}
if config.DumpPath == "" {
config.DumpPath = "trie-dump.bin"
}
return config
}
func (in *inspector) recordRootSize(trie *Trie, root common.Hash, stat *LevelStats) {
if root == (common.Hash{}) || root == types.EmptyRootHash {
return
}
blob := trie.prevalueTracer.Get(nil)
if len(blob) == 0 {
resolved, err := trie.reader.Node(nil, root)
if err != nil {
log.Error("Failed to read trie root for size accounting", "trie", trie.Hash(), "root", root, "err", err)
return
}
blob = resolved
}
stat.addSize(0, uint64(len(blob)))
}
func (in *inspector) closeDump() error {
var ret error
if in.dumpBuf != nil {
if err := in.dumpBuf.Flush(); err != nil {
ret = errors.Join(ret, fmt.Errorf("failed to flush trie dump %s: %w", in.config.DumpPath, err))
}
}
if in.dumpFile != nil {
if err := in.dumpFile.Close(); err != nil {
ret = errors.Join(ret, fmt.Errorf("failed to close trie dump %s: %w", in.config.DumpPath, err))
}
}
return ret
}
func (in *inspector) setError(err error) {
if err == nil {
return
}
in.errMu.Lock()
defer in.errMu.Unlock()
in.err = errors.Join(in.err, err)
}
func (in *inspector) getError() error {
in.errMu.Lock()
defer in.errMu.Unlock()
return in.err
}
func (in *inspector) hasError() bool {
return in.getError() != nil
}
// trySpawn attempts to run fn in a new goroutine, bounded by the semaphore.
// If a slot is available the goroutine is started and tracked via wg; the
// caller must call wg.Wait() before reading any state written by fn.
// Returns false (and does not start a goroutine) when no slot is available.
func (in *inspector) trySpawn(wg *sync.WaitGroup, fn func()) bool {
if !in.sem.TryAcquire(1) {
return false
}
wg.Add(1)
go func() {
defer in.sem.Release(1)
defer wg.Done()
fn()
}()
return true
}
func (in *inspector) writeDumpRecord(owner common.Hash, s *LevelStats) {
if in.hasError() {
return
}
var buf [inspectDumpRecordSize]byte
copy(buf[:32], owner[:])
off := 32
for i := 0; i < trieStatLevels; i++ {
binary.LittleEndian.PutUint32(buf[off:], uint32(s.level[i].short.Load()))
off += 4
binary.LittleEndian.PutUint32(buf[off:], uint32(s.level[i].full.Load()))
off += 4
binary.LittleEndian.PutUint32(buf[off:], uint32(s.level[i].value.Load()))
off += 4
binary.LittleEndian.PutUint64(buf[off:], s.level[i].size.Load())
off += 8
}
in.dumpMu.Lock()
_, err := in.dumpBuf.Write(buf[:])
in.dumpMu.Unlock()
if err != nil {
in.setError(fmt.Errorf("failed writing trie dump record: %w", err))
}
// Increment counter for storage tries only (not for account trie)
if owner != (common.Hash{}) {
in.storageRecordsWritten.Add(1)
}
}
// inspect walks the trie rooted at n and records node statistics into stat.
// It may spawn goroutines for subtrees, but always waits for them before
// returning — the caller sees a fully-populated stat when inspect returns.
func (in *inspector) inspect(trie *Trie, n node, height uint32, path []byte, stat *LevelStats) {
if n == nil {
return
}
// wg tracks goroutines spawned by this call so we can wait for them
// before returning. This guarantees stat is complete when we return,
// which is critical for storage tries that write their dump record
// immediately after inspect returns.
var wg sync.WaitGroup
// Four types of nodes can be encountered:
// - short: extend path with key, inspect single value.
// - full: inspect all 17 children, spin up new threads when possible.
// - hash: need to resolve node from disk, retry inspect on result.
// - value: if account, begin inspecting storage trie.
switch n := (n).(type) {
case *shortNode:
nextPath := slices.Concat(path, n.Key)
in.inspect(trie, n.Val, height+1, nextPath, stat)
case *fullNode:
for idx, child := range n.Children {
if child == nil {
continue
}
childPath := slices.Concat(path, []byte{byte(idx)})
childNode := child
if in.trySpawn(&wg, func() {
in.inspect(trie, childNode, height+1, childPath, stat)
}) {
continue
}
in.inspect(trie, childNode, height+1, childPath, stat)
}
case hashNode:
blob, err := trie.reader.Node(path, common.BytesToHash(n))
if err != nil {
log.Error("Failed to resolve HashNode", "err", err, "trie", trie.Hash(), "height", height+1, "path", path)
return
}
stat.addSize(height, uint64(len(blob)))
resolved := mustDecodeNode(n, blob)
in.inspect(trie, resolved, height, path, stat)
// Return early here so this level isn't recorded twice.
return
case valueNode:
if !hasTerm(path) {
break
}
var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(n), &account); err != nil {
// Not an account value.
break
}
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
// Account is empty, nothing further to inspect.
break
}
if !in.config.NoStorage {
owner := common.BytesToHash(hexToCompact(path))
storage, err := New(StorageTrieID(in.root, owner, account.Root), in.triedb)
if err != nil {
log.Error("Failed to open account storage trie", "node", n, "error", err, "height", height, "path", common.Bytes2Hex(path))
break
}
storageStat := NewLevelStats()
run := func() {
in.recordRootSize(storage, account.Root, storageStat)
in.inspect(storage, storage.root, 0, []byte{}, storageStat)
in.writeDumpRecord(owner, storageStat)
}
if in.trySpawn(&wg, run) {
break
}
run()
}
default:
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
}
// Wait for all goroutines spawned at this level before recording
// the current node. This ensures the entire subtree is counted
// before this call returns.
wg.Wait()
// Record stats for current height.
stat.add(n, height)
}
// Summarize performs pass 2 over a trie dump and reports account stats,
// aggregate storage statistics, and top-N rankings.
func Summarize(dumpPath string, config *InspectConfig) error {
config = normalizeInspectConfig(config)
if dumpPath == "" {
dumpPath = config.DumpPath
}
if dumpPath == "" {
return errors.New("missing dump path")
}
file, err := os.Open(dumpPath)
if err != nil {
return fmt.Errorf("failed to open trie dump %s: %w", dumpPath, err)
}
defer file.Close()
if info, err := file.Stat(); err == nil {
if info.Size()%inspectDumpRecordSize != 0 {
return fmt.Errorf("invalid trie dump size %d (not a multiple of %d)", info.Size(), inspectDumpRecordSize)
}
}
depthTop := newStorageStatsTopN(config.TopN, compareStorageStatsByDepth)
totalTop := newStorageStatsTopN(config.TopN, compareStorageStatsByTotal)
valueTop := newStorageStatsTopN(config.TopN, compareStorageStatsByValue)
summary := &inspectSummary{}
reader := bufio.NewReaderSize(file, 1<<20)
var buf [inspectDumpRecordSize]byte
for {
_, err := io.ReadFull(reader, buf[:])
if errors.Is(err, io.EOF) {
break
}
if errors.Is(err, io.ErrUnexpectedEOF) {
return fmt.Errorf("truncated trie dump %s", dumpPath)
}
if err != nil {
return fmt.Errorf("failed reading trie dump %s: %w", dumpPath, err)
}
record := decodeDumpRecord(buf[:])
snapshot := newStorageStats(record.Owner, record.Levels)
if record.Owner == (common.Hash{}) {
summary.Account = snapshot
continue
}
summary.StorageCount++
summary.DepthHistogram[snapshot.MaxDepth]++
for i := 0; i < trieStatLevels; i++ {
summary.StorageLevels[i].Short += record.Levels[i].Short
summary.StorageLevels[i].Full += record.Levels[i].Full
summary.StorageLevels[i].Value += record.Levels[i].Value
summary.StorageLevels[i].Size += record.Levels[i].Size
}
depthTop.TryInsert(snapshot)
totalTop.TryInsert(snapshot)
valueTop.TryInsert(snapshot)
}
if summary.Account == nil {
return fmt.Errorf("dump file %s does not contain the account trie sentinel record", dumpPath)
}
for i := 0; i < trieStatLevels; i++ {
summary.StorageTotals.Short += summary.StorageLevels[i].Short
summary.StorageTotals.Full += summary.StorageLevels[i].Full
summary.StorageTotals.Value += summary.StorageLevels[i].Value
summary.StorageTotals.Size += summary.StorageLevels[i].Size
}
summary.TopByDepth = depthTop.Sorted()
summary.TopByTotalNodes = totalTop.Sorted()
summary.TopByValueNodes = valueTop.Sorted()
if config.Path != "" {
return summary.writeJSON(config.Path)
}
summary.display()
return nil
}
type dumpRecord struct {
Owner common.Hash
Levels [trieStatLevels]jsonLevel
}
func decodeDumpRecord(raw []byte) dumpRecord {
var (
record dumpRecord
off = 32
)
copy(record.Owner[:], raw[:32])
for i := 0; i < trieStatLevels; i++ {
record.Levels[i] = jsonLevel{
Short: uint64(binary.LittleEndian.Uint32(raw[off:])),
Full: uint64(binary.LittleEndian.Uint32(raw[off+4:])),
Value: uint64(binary.LittleEndian.Uint32(raw[off+8:])),
Size: binary.LittleEndian.Uint64(raw[off+12:]),
}
off += 20
}
return record
}
type storageStats struct {
Owner common.Hash
Levels [trieStatLevels]jsonLevel
Summary jsonLevel
MaxDepth int
TotalNodes uint64
TotalSize uint64
}
func newStorageStats(owner common.Hash, levels [trieStatLevels]jsonLevel) *storageStats {
snapshot := &storageStats{Owner: owner, Levels: levels}
for i := 0; i < trieStatLevels; i++ {
level := levels[i]
if level.Short != 0 || level.Full != 0 || level.Value != 0 {
snapshot.MaxDepth = i
}
snapshot.Summary.Short += level.Short
snapshot.Summary.Full += level.Full
snapshot.Summary.Value += level.Value
snapshot.Summary.Size += level.Size
}
snapshot.TotalNodes = snapshot.Summary.Short + snapshot.Summary.Full + snapshot.Summary.Value
snapshot.TotalSize = snapshot.Summary.Size
return snapshot
}
func trimLevels(levels [trieStatLevels]jsonLevel) []jsonLevel {
n := len(levels)
for n > 0 && levels[n-1] == (jsonLevel{}) {
n--
}
return levels[:n]
}
func (s *storageStats) MarshalJSON() ([]byte, error) {
type jsonStorageSnapshot struct {
Owner common.Hash `json:"Owner"`
MaxDepth int `json:"MaxDepth"`
TotalNodes uint64 `json:"TotalNodes"`
TotalSize uint64 `json:"TotalSize"`
ValueNodes uint64 `json:"ValueNodes"`
Levels []jsonLevel `json:"Levels"`
Summary jsonLevel `json:"Summary"`
}
return json.Marshal(jsonStorageSnapshot{
Owner: s.Owner,
MaxDepth: s.MaxDepth,
TotalNodes: s.TotalNodes,
TotalSize: s.TotalSize,
ValueNodes: s.Summary.Value,
Levels: trimLevels(s.Levels),
Summary: s.Summary,
})
}
func (s *storageStats) toLevelStats() *LevelStats {
stats := NewLevelStats()
for i := 0; i < trieStatLevels; i++ {
stats.level[i].short.Store(s.Levels[i].Short)
stats.level[i].full.Store(s.Levels[i].Full)
stats.level[i].value.Store(s.Levels[i].Value)
stats.level[i].size.Store(s.Levels[i].Size)
}
return stats
}
type storageStatsCompare func(a, b *storageStats) int
type storageStatsTopN struct {
limit int
cmp storageStatsCompare
heap storageStatsHeap
}
type storageStatsHeap struct {
items []*storageStats
cmp storageStatsCompare
}
func (h storageStatsHeap) Len() int { return len(h.items) }
func (h storageStatsHeap) Less(i, j int) bool {
// Keep the weakest entry at the root (min-heap semantics).
return h.cmp(h.items[i], h.items[j]) < 0
}
func (h storageStatsHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *storageStatsHeap) Push(x any) {
h.items = append(h.items, x.(*storageStats))
}
func (h *storageStatsHeap) Pop() any {
item := h.items[len(h.items)-1]
h.items = h.items[:len(h.items)-1]
return item
}
func newStorageStatsTopN(limit int, cmp storageStatsCompare) *storageStatsTopN {
h := storageStatsHeap{cmp: cmp}
heap.Init(&h)
return &storageStatsTopN{limit: limit, cmp: cmp, heap: h}
}
func (t *storageStatsTopN) TryInsert(item *storageStats) {
if t.limit <= 0 {
return
}
if t.heap.Len() < t.limit {
heap.Push(&t.heap, item)
return
}
if t.cmp(item, t.heap.items[0]) <= 0 {
return
}
heap.Pop(&t.heap)
heap.Push(&t.heap, item)
}
func (t *storageStatsTopN) Sorted() []*storageStats {
out := append([]*storageStats(nil), t.heap.items...)
sort.Slice(out, func(i, j int) bool { return t.cmp(out[i], out[j]) > 0 })
return out
}
func compareStorageStatsByDepth(a, b *storageStats) int {
return cmp.Or(
cmp.Compare(a.MaxDepth, b.MaxDepth),
cmp.Compare(a.TotalNodes, b.TotalNodes),
cmp.Compare(a.Summary.Value, b.Summary.Value),
bytes.Compare(a.Owner[:], b.Owner[:]),
)
}
func compareStorageStatsByTotal(a, b *storageStats) int {
return cmp.Or(
cmp.Compare(a.TotalNodes, b.TotalNodes),
cmp.Compare(a.MaxDepth, b.MaxDepth),
cmp.Compare(a.Summary.Value, b.Summary.Value),
bytes.Compare(a.Owner[:], b.Owner[:]),
)
}
func compareStorageStatsByValue(a, b *storageStats) int {
return cmp.Or(
cmp.Compare(a.Summary.Value, b.Summary.Value),
cmp.Compare(a.MaxDepth, b.MaxDepth),
cmp.Compare(a.TotalNodes, b.TotalNodes),
bytes.Compare(a.Owner[:], b.Owner[:]),
)
}
type inspectSummary struct {
Account *storageStats
StorageCount uint64
StorageTotals jsonLevel
StorageLevels [trieStatLevels]jsonLevel
DepthHistogram [trieStatLevels]uint64
TopByDepth []*storageStats
TopByTotalNodes []*storageStats
TopByValueNodes []*storageStats
}
func (s *inspectSummary) display() {
s.displayCombinedDepthTable()
s.Account.toLevelStats().display("Accounts trie")
fmt.Println("Storage trie aggregate summary")
fmt.Printf("Total storage tries: %d\n", s.StorageCount)
totalNodes := s.StorageTotals.Short + s.StorageTotals.Full + s.StorageTotals.Value
fmt.Printf("Total nodes: %d\n", totalNodes)
fmt.Printf("Total size: %s\n", common.StorageSize(s.StorageTotals.Size))
fmt.Printf(" Short nodes: %d\n", s.StorageTotals.Short)
fmt.Printf(" Full nodes: %d\n", s.StorageTotals.Full)
fmt.Printf(" Value nodes: %d\n", s.StorageTotals.Value)
b := new(strings.Builder)
table := tablewriter.NewWriter(b)
table.SetHeader([]string{"Max Depth", "Storage Tries"})
for i, count := range s.DepthHistogram {
table.AppendBulk([][]string{{fmt.Sprint(i), fmt.Sprint(count)}})
}
table.Render()
fmt.Print(b.String())
fmt.Println()
s.displayTop("Top storage tries by max depth", s.TopByDepth)
s.displayTop("Top storage tries by total node count", s.TopByTotalNodes)
s.displayTop("Top storage tries by value (slot) count", s.TopByValueNodes)
}
func (s *inspectSummary) displayCombinedDepthTable() {
accountTotal := s.Account.Summary.Short + s.Account.Summary.Full + s.Account.Summary.Value
storageTotal := s.StorageTotals.Short + s.StorageTotals.Full + s.StorageTotals.Value
accountTotalSize := s.Account.Summary.Size
storageTotalSize := s.StorageTotals.Size
fmt.Println("Trie Depth Distribution")
fmt.Printf("Account Trie: %d nodes (%s)\n", accountTotal, common.StorageSize(accountTotalSize))
fmt.Printf("Storage Tries: %d nodes (%s) across %d tries\n", storageTotal, common.StorageSize(storageTotalSize), s.StorageCount)
b := new(strings.Builder)
table := tablewriter.NewWriter(b)
table.SetHeader([]string{"Depth", "Account Nodes", "Account Size", "Storage Nodes", "Storage Size"})
for i := 0; i < trieStatLevels; i++ {
accountNodes := s.Account.Levels[i].Short + s.Account.Levels[i].Full + s.Account.Levels[i].Value
accountSize := s.Account.Levels[i].Size
storageNodes := s.StorageLevels[i].Short + s.StorageLevels[i].Full + s.StorageLevels[i].Value
storageSize := s.StorageLevels[i].Size
if accountNodes == 0 && storageNodes == 0 {
continue
}
table.AppendBulk([][]string{{
fmt.Sprint(i),
fmt.Sprint(accountNodes),
common.StorageSize(accountSize).String(),
fmt.Sprint(storageNodes),
common.StorageSize(storageSize).String(),
}})
}
table.Render()
fmt.Print(b.String())
fmt.Println()
}
func (s *inspectSummary) displayTop(title string, list []*storageStats) {
fmt.Println(title)
if len(list) == 0 {
fmt.Println("No storage tries found")
fmt.Println()
return
}
for i, item := range list {
fmt.Printf("%d: %s\n", i+1, item.Owner)
item.toLevelStats().display("storage trie")
}
}
func (s *inspectSummary) MarshalJSON() ([]byte, error) {
type jsonAccountTrie struct {
Name string `json:"Name"`
Levels []jsonLevel `json:"Levels"`
Summary jsonLevel `json:"Summary"`
}
type jsonStorageSummary struct {
TotalStorageTries uint64 `json:"TotalStorageTries"`
Totals jsonLevel `json:"Totals"`
Levels []jsonLevel `json:"Levels"`
DepthHistogram [trieStatLevels]uint64 `json:"DepthHistogram"`
}
type jsonInspectSummary struct {
AccountTrie jsonAccountTrie `json:"AccountTrie"`
StorageSummary jsonStorageSummary `json:"StorageSummary"`
TopByDepth []*storageStats `json:"TopByDepth"`
TopByTotalNodes []*storageStats `json:"TopByTotalNodes"`
TopByValueNodes []*storageStats `json:"TopByValueNodes"`
}
return json.Marshal(jsonInspectSummary{
AccountTrie: jsonAccountTrie{
Name: "account trie",
Levels: trimLevels(s.Account.Levels),
Summary: s.Account.Summary,
},
StorageSummary: jsonStorageSummary{
TotalStorageTries: s.StorageCount,
Totals: s.StorageTotals,
Levels: trimLevels(s.StorageLevels),
DepthHistogram: s.DepthHistogram,
},
TopByDepth: s.TopByDepth,
TopByTotalNodes: s.TopByTotalNodes,
TopByValueNodes: s.TopByValueNodes,
})
}
func (s *inspectSummary) writeJSON(path string) error {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return err
}
defer file.Close()
enc := json.NewEncoder(file)
enc.SetIndent("", " ")
return enc.Encode(s)
}
// display will print a table displaying the trie's node statistics.
func (s *LevelStats) display(title string) {
// Shorten title if too long.
if len(title) > 32 {
title = title[0:8] + "..." + title[len(title)-8:]
}
b := new(strings.Builder)
table := tablewriter.NewWriter(b)
table.SetHeader([]string{title, "Level", "Short Nodes", "Full Node", "Value Node"})
stat := &stat{}
for i := range s.level {
if s.level[i].empty() {
continue
}
short, full, value, _ := s.level[i].load()
table.AppendBulk([][]string{{"-", fmt.Sprint(i), fmt.Sprint(short), fmt.Sprint(full), fmt.Sprint(value)}})
stat.add(&s.level[i])
}
short, full, value, _ := stat.load()
table.SetFooter([]string{"Total", "", fmt.Sprint(short), fmt.Sprint(full), fmt.Sprint(value)})
table.Render()
fmt.Print(b.String())
fmt.Println("Max depth", s.MaxDepth())
fmt.Println()
}
type jsonLevel struct {
Short uint64
Full uint64
Value uint64
Size uint64
}

256
trie/inspect_test.go Normal file
View file

@ -0,0 +1,256 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"encoding/json"
"math/rand"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
)
// TestInspect inspects a randomly generated account trie. It's useful for
// quickly verifying changes to the results display.
func TestInspect(t *testing.T) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie, err := NewStateTrie(TrieID(types.EmptyRootHash), db)
if err != nil {
t.Fatalf("failed to create state trie: %v", err)
}
// Create a realistic looking account trie with storage.
addresses, accounts := makeAccountsWithStorage(db, 11, true)
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
root, nodes := trie.Commit(true)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
db.Commit(root)
tempDir := t.TempDir()
dumpPath := filepath.Join(tempDir, "trie-dump.bin")
if err := Inspect(db, root, &InspectConfig{
TopN: 1,
DumpPath: dumpPath,
Path: filepath.Join(tempDir, "trie-summary.json"),
}); err != nil {
t.Fatalf("inspect failed: %v", err)
}
reanalysisPath := filepath.Join(tempDir, "trie-summary-reanalysis.json")
if err := Summarize(dumpPath, &InspectConfig{
TopN: 1,
Path: reanalysisPath,
}); err != nil {
t.Fatalf("summarize failed: %v", err)
}
inspectSummaryPath := filepath.Join(tempDir, "trie-summary.json")
inspectOut := loadInspectJSON(t, inspectSummaryPath)
reanalysisOut := loadInspectJSON(t, reanalysisPath)
if len(inspectOut.StorageSummary.Levels) == 0 {
t.Fatal("expected StorageSummary.Levels to be populated")
}
if inspectOut.AccountTrie.Summary.Size == 0 {
t.Fatal("expected account trie size summary to be populated")
}
if inspectOut.StorageSummary.Totals.Size == 0 {
t.Fatal("expected storage trie size summary to be populated")
}
if !reflect.DeepEqual(inspectOut.AccountTrie, reanalysisOut.AccountTrie) {
t.Fatal("account trie summary mismatch between inspect and summarize")
}
if !reflect.DeepEqual(inspectOut.StorageSummary, reanalysisOut.StorageSummary) {
t.Fatal("storage summary mismatch between inspect and summarize")
}
assertStorageTotalsMatchLevels(t, inspectOut)
assertStorageTotalsMatchLevels(t, reanalysisOut)
assertAccountTotalsMatchLevels(t, inspectOut.AccountTrie)
assertAccountTotalsMatchLevels(t, reanalysisOut.AccountTrie)
var histogramTotal uint64
for _, count := range inspectOut.StorageSummary.DepthHistogram {
histogramTotal += count
}
if histogramTotal != inspectOut.StorageSummary.TotalStorageTries {
t.Fatalf("depth histogram total %d does not match total storage tries %d", histogramTotal, inspectOut.StorageSummary.TotalStorageTries)
}
}
type inspectJSONOutput struct {
// Reuse storageStats for AccountTrie JSON to avoid introducing a parallel
// account summary test type. AccountTrie JSON includes Levels+Summary,
// which map directly; other storageStats fields remain zero-values.
AccountTrie storageStats `json:"AccountTrie"`
StorageSummary struct {
TotalStorageTries uint64 `json:"TotalStorageTries"`
Totals jsonLevel `json:"Totals"`
Levels []jsonLevel `json:"Levels"`
DepthHistogram [trieStatLevels]uint64 `json:"DepthHistogram"`
} `json:"StorageSummary"`
}
func loadInspectJSON(t *testing.T, path string) inspectJSONOutput {
t.Helper()
raw, err := os.ReadFile(path)
if err != nil {
t.Fatalf("failed to read %s: %v", path, err)
}
var out inspectJSONOutput
if err := json.Unmarshal(raw, &out); err != nil {
t.Fatalf("failed to decode %s: %v", path, err)
}
return out
}
func assertStorageTotalsMatchLevels(t *testing.T, out inspectJSONOutput) {
t.Helper()
var fromLevels jsonLevel
for _, level := range out.StorageSummary.Levels {
fromLevels.Short += level.Short
fromLevels.Full += level.Full
fromLevels.Value += level.Value
fromLevels.Size += level.Size
}
if fromLevels.Short != out.StorageSummary.Totals.Short || fromLevels.Full != out.StorageSummary.Totals.Full || fromLevels.Value != out.StorageSummary.Totals.Value || fromLevels.Size != out.StorageSummary.Totals.Size {
t.Fatalf("storage totals mismatch: levels=%+v totals=%+v", fromLevels, out.StorageSummary.Totals)
}
}
func assertAccountTotalsMatchLevels(t *testing.T, account storageStats) {
t.Helper()
var fromLevels jsonLevel
for _, level := range account.Levels {
fromLevels.Short += level.Short
fromLevels.Full += level.Full
fromLevels.Value += level.Value
fromLevels.Size += level.Size
}
if fromLevels.Short != account.Summary.Short || fromLevels.Full != account.Summary.Full || fromLevels.Value != account.Summary.Value || fromLevels.Size != account.Summary.Size {
t.Fatalf("account totals mismatch: levels=%+v totals=%+v", fromLevels, account.Summary)
}
}
// TestInspectContract tests the InspectContract function on a single account
// with storage and snapshot data.
func TestInspectContract(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
db := newTestDatabase(diskdb, rawdb.HashScheme)
// Create a contract address and its storage trie.
address := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678")
accountHash := crypto.Keccak256Hash(address.Bytes())
// Build a storage trie with some entries.
storageTrie := NewEmpty(db)
storageSlots := make(map[common.Hash][]byte)
for i := 0; i < 10; i++ {
k := crypto.Keccak256Hash([]byte{byte(i)})
v := []byte{byte(i + 1)}
storageTrie.MustUpdate(k.Bytes(), v)
storageSlots[k] = v
}
storageRoot, storageNodes := storageTrie.Commit(true)
db.Update(storageRoot, types.EmptyRootHash, trienode.NewWithNodeSet(storageNodes))
db.Commit(storageRoot)
// Build the account trie with the contract account.
account := types.StateAccount{
Nonce: 1,
Balance: uint256.NewInt(1000),
Root: storageRoot,
CodeHash: crypto.Keccak256(nil),
}
accountRLP, err := rlp.EncodeToBytes(&account)
if err != nil {
t.Fatalf("failed to encode account: %v", err)
}
accountTrie := NewEmpty(db)
accountTrie.MustUpdate(crypto.Keccak256(address.Bytes()), accountRLP)
stateRoot, accountNodes := accountTrie.Commit(true)
db.Update(stateRoot, types.EmptyRootHash, trienode.NewWithNodeSet(accountNodes))
db.Commit(stateRoot)
// Write snapshot data for the account and its storage slots.
rawdb.WriteAccountSnapshot(diskdb, accountHash, accountRLP)
for k, v := range storageSlots {
rawdb.WriteStorageSnapshot(diskdb, accountHash, k, v)
}
// InspectContract should succeed without error.
if err := InspectContract(db, diskdb, stateRoot, address); err != nil {
t.Fatalf("InspectContract failed: %v", err)
}
}
func makeAccountsWithStorage(db *testDb, size int, storage bool) (addresses [][20]byte, accounts [][]byte) {
// Make the random benchmark deterministic
random := rand.New(rand.NewSource(0))
addresses = make([][20]byte, size)
for i := 0; i < len(addresses); i++ {
data := make([]byte, 20)
random.Read(data)
copy(addresses[i][:], data)
}
accounts = make([][]byte, len(addresses))
for i := 0; i < len(accounts); i++ {
var (
nonce = uint64(random.Int63())
root = types.EmptyRootHash
code = crypto.Keccak256(nil)
)
if storage {
trie := NewEmpty(db)
for range random.Uint32()%256 + 1 { // non-zero
k, v := make([]byte, 32), make([]byte, 32)
random.Read(k)
random.Read(v)
trie.MustUpdate(k, v)
}
var nodes *trienode.NodeSet
root, nodes = trie.Commit(true)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
db.Commit(root)
}
numBytes := random.Uint32() % 33 // [0, 32] bytes
balanceBytes := make([]byte, numBytes)
random.Read(balanceBytes)
balance := new(uint256.Int).SetBytes(balanceBytes)
data, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: nonce,
Balance: balance,
Root: root,
CodeHash: code,
})
accounts[i] = data
}
return addresses, accounts
}

123
trie/levelstats.go Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"fmt"
"sync/atomic"
)
const trieStatLevels = 16
// LevelStats tracks the type and count of trie nodes at each level in a trie.
//
// Note: theoretically it is possible to have up to 64 trie levels, but
// LevelStats supports exactly 16 levels and panics on deeper paths.
type LevelStats struct {
level [trieStatLevels]stat
}
// NewLevelStats creates an empty trie statistics collector.
func NewLevelStats() *LevelStats {
return &LevelStats{}
}
// MaxDepth iterates each level and finds the deepest level with at least one
// trie node.
func (s *LevelStats) MaxDepth() int {
depth := 0
for i := range s.level {
if s.level[i].short.Load() != 0 || s.level[i].full.Load() != 0 || s.level[i].value.Load() != 0 {
depth = i
}
}
return depth
}
// TotalNodes returns the total number of nodes across all levels and types.
func (s *LevelStats) TotalNodes() uint64 {
var total uint64
for i := range s.level {
total += s.level[i].short.Load() + s.level[i].full.Load() + s.level[i].value.Load()
}
return total
}
// add increases the node count by one for the specified node type and depth.
func (s *LevelStats) add(n node, depth uint32) {
d := int(depth)
switch (n).(type) {
case *shortNode:
s.level[d].short.Add(1)
case *fullNode:
s.level[d].full.Add(1)
case valueNode:
s.level[d].value.Add(1)
default:
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
}
}
// addSize increases the raw byte-size tally at the specified depth.
func (s *LevelStats) addSize(depth uint32, size uint64) {
s.level[depth].size.Add(size)
}
// AddLeaf records a leaf depth. Witness collection reuses the value-node bucket
// for leaf accounting. It panics if the depth is outside [0, 15].
func (s *LevelStats) AddLeaf(depth int) {
s.level[depth].value.Add(1)
}
// LeafDepths returns leaf counts grouped by depth.
func (s *LevelStats) LeafDepths() [trieStatLevels]int64 {
var leaves [trieStatLevels]int64
for i := range s.level {
leaves[i] = int64(s.level[i].value.Load())
}
return leaves
}
// stat is a specific level's count of each node type.
type stat struct {
short atomic.Uint64
full atomic.Uint64
value atomic.Uint64
size atomic.Uint64
}
// empty is a helper that returns whether there are any trie nodes at the level.
func (s *stat) empty() bool {
if s.full.Load() == 0 && s.short.Load() == 0 && s.value.Load() == 0 && s.size.Load() == 0 {
return true
}
return false
}
// load is a helper that loads each node type's value.
func (s *stat) load() (uint64, uint64, uint64, uint64) {
return s.short.Load(), s.full.Load(), s.value.Load(), s.size.Load()
}
// add is a helper that adds two level's stats together.
func (s *stat) add(other *stat) *stat {
s.short.Add(other.short.Load())
s.full.Add(other.full.Load())
s.value.Add(other.value.Load())
s.size.Add(other.size.Load())
return s
}

37
trie/levelstats_test.go Normal file
View file

@ -0,0 +1,37 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import "testing"
func TestLevelStatsAddLeafDepthBounds(t *testing.T) {
stats := NewLevelStats()
stats.AddLeaf(15)
if got := stats.LeafDepths()[15]; got != 1 {
t.Fatalf("leaf count at depth 15 = %d, want 1", got)
}
}
func TestLevelStatsAddLeafPanicsOnDepth16(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatal("expected panic for depth >= 16")
}
}()
NewLevelStats().AddLeaf(16)
}

View file

@ -454,7 +454,7 @@ func hasRightElement(node node, key []byte) bool {
// //
// The firstKey is paired with firstProof, not necessarily the same as keys[0] // The firstKey is paired with firstProof, not necessarily the same as keys[0]
// (unless firstProof is an existent proof). Similarly, lastKey and lastProof // (unless firstProof is an existent proof). Similarly, lastKey and lastProof
// are paired. // are paired. The firstKey should be less than or equal to all keys in the list.
// //
// Expect the normal case, this function can also be used to verify the following // Expect the normal case, this function can also be used to verify the following
// range proofs: // range proofs:
@ -520,9 +520,14 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu
} }
return false, nil return false, nil
} }
var lastKey = keys[len(keys)-1] // Short circuit if the key of first element is greater than firstKey.
// A nil firstKey slice is equivalent to an empty slice.
if bytes.Compare(firstKey, keys[0]) > 0 {
return false, errors.New("unexpected key-value pairs preceding the requested range")
}
// Special case, there is only one element and two edge keys are same. // Special case, there is only one element and two edge keys are same.
// In this case, we can't construct two edge paths. So handle it here. // In this case, we can't construct two edge paths. So handle it here.
var lastKey = keys[len(keys)-1]
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) { if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
root, val, err := proofToPath(rootHash, nil, firstKey, proof, false) root, val, err := proofToPath(rootHash, nil, firstKey, proof, false)
if err != nil { if err != nil {
@ -577,7 +582,9 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu
tr.root = nil tr.root = nil
} }
for index, key := range keys { for index, key := range keys {
tr.Update(key, values[index]) if err := tr.Update(key, values[index]); err != nil {
return false, err
}
} }
if tr.Hash() != rootHash { if tr.Hash() != rootHash {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())

View file

@ -17,8 +17,8 @@
package version package version
const ( const (
Major = 1 // Major version component of the current release Major = 1 // Major version component of the current release
Minor = 17 // Minor version component of the current release Minor = 17 // Minor version component of the current release
Patch = 0 // Patch version component of the current release Patch = 1 // Patch version component of the current release
Meta = "stable" // Version metadata to append to the version string Meta = "unstable" // Version metadata to append to the version string
) )