Merge branch 'master' into release/1.16

This commit is contained in:
Felix Lange 2025-10-16 09:59:30 +02:00
commit 737ffd1bf0
111 changed files with 21313 additions and 11798 deletions

View file

@ -10,7 +10,7 @@ on:
jobs: jobs:
lint: lint:
name: Lint name: Lint
runs-on: self-hosted-ghr runs-on: [self-hosted-ghr, size-s-x64]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
@ -37,7 +37,7 @@ jobs:
test: test:
name: Test name: Test
needs: lint needs: lint
runs-on: self-hosted-ghr runs-on: [self-hosted-ghr, size-l-x64]
strategy: strategy:
matrix: matrix:
go: go:
@ -55,4 +55,4 @@ jobs:
cache: false cache: false
- name: Run tests - name: Run tests
run: go run build/ci.go test run: go run build/ci.go test -p 8

View file

@ -485,13 +485,13 @@ var bindTests = []struct {
contract Defaulter { contract Defaulter {
address public caller; address public caller;
function() { fallback() external payable {
caller = msg.sender; caller = msg.sender;
} }
} }
`, `,
[]string{`6060604052606a8060106000396000f360606040523615601d5760e060020a6000350463fc9c8d3981146040575b605e6000805473ffffffffffffffffffffffffffffffffffffffff191633179055565b606060005473ffffffffffffffffffffffffffffffffffffffff1681565b005b6060908152602090f3`}, []string{`608060405234801561000f575f80fd5b5061013d8061001d5f395ff3fe608060405260043610610021575f3560e01c8063fc9c8d391461006257610022565b5b335f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055005b34801561006d575f80fd5b5061007661008c565b60405161008391906100ee565b60405180910390f35b5f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6100d8826100af565b9050919050565b6100e8816100ce565b82525050565b5f6020820190506101015f8301846100df565b9291505056fea26469706673582212201e9273ecfb1f534644c77f09a25c21baaba81cf1c444ebc071e12a225a23c72964736f6c63430008140033`},
[]string{`[{"constant":true,"inputs":[],"name":"caller","outputs":[{"name":"","type":"address"}],"type":"function"}]`}, []string{`[{"stateMutability":"payable","type":"fallback"},{"inputs":[],"name":"caller","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"}]`},
` `
"math/big" "math/big"

View file

@ -277,8 +277,10 @@ func (c *BoundContract) RawCreationTransact(opts *TransactOpts, calldata []byte)
// Transfer initiates a plain transaction to move funds to the contract, calling // Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available. // its default method if one is available.
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) { func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
// todo(rjl493456442) check the payable fallback or receive is defined // Check if payable fallback or receive is defined
// or not, reject invalid transaction at the first place if !c.abi.HasReceive() && !(c.abi.HasFallback() && c.abi.Fallback.IsPayable()) {
return nil, fmt.Errorf("contract does not have a payable fallback or receive function")
}
return c.transact(opts, &c.address, nil) return c.transact(opts, &c.address, nil)
} }

View file

@ -2,7 +2,6 @@ clone_depth: 5
version: "{branch}.{build}" version: "{branch}.{build}"
image: image:
- Ubuntu
- Visual Studio 2019 - Visual Studio 2019
environment: environment:
@ -17,25 +16,6 @@ install:
- go version - go version
for: for:
# Linux has its own script without -arch and -cc.
# The linux builder also runs lint.
- matrix:
only:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- go run build/ci.go check_generate
- go run build/ci.go check_baddeps
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -short
# linux/386 is disabled.
- matrix:
exclude:
- image: Ubuntu
GETH_ARCH: 386
# Windows builds for amd64 + 386. # Windows builds for amd64 + 386.
- matrix: - matrix:
only: only:
@ -56,4 +36,4 @@ for:
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script: test_script:
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short -skip-spectests

View file

@ -57,6 +57,7 @@ import (
"time" "time"
"github.com/cespare/cp" "github.com/cespare/cp"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/crypto/signify"
"github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/internal/build"
"github.com/ethereum/go-ethereum/internal/download" "github.com/ethereum/go-ethereum/internal/download"
@ -148,7 +149,7 @@ func executablePath(name string) string {
func main() { func main() {
log.SetFlags(log.Lshortfile) log.SetFlags(log.Lshortfile)
if !build.FileExist(filepath.Join("build", "ci.go")) { if !common.FileExist(filepath.Join("build", "ci.go")) {
log.Fatal("this script must be run from the root of the repository") log.Fatal("this script must be run from the root of the repository")
} }
if len(os.Args) < 2 { if len(os.Args) < 2 {
@ -280,20 +281,26 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
func doTest(cmdline []string) { func doTest(cmdline []string) {
var ( var (
dlgo = flag.Bool("dlgo", false, "Download Go and build with it") dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
arch = flag.String("arch", "", "Run tests for given architecture") arch = flag.String("arch", "", "Run tests for given architecture")
cc = flag.String("cc", "", "Sets C compiler binary") cc = flag.String("cc", "", "Sets C compiler binary")
coverage = flag.Bool("coverage", false, "Whether to record code coverage") coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely") verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector") race = flag.Bool("race", false, "Execute the race detector")
short = flag.Bool("short", false, "Pass the 'short'-flag to go test") short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
skipspectests = flag.Bool("skip-spectests", false, "Skip downloading execution-spec-tests fixtures")
threads = flag.Int("p", 1, "Number of CPU threads to use for testing")
) )
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)
// Get test fixtures. // Load checksums file (needed for both spec tests and dlgo)
csdb := download.MustLoadChecksums("build/checksums.txt") csdb := download.MustLoadChecksums("build/checksums.txt")
downloadSpecTestFixtures(csdb, *cachedir)
// Get test fixtures.
if !*skipspectests {
downloadSpecTestFixtures(csdb, *cachedir)
}
// Configure the toolchain. // Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc} tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
@ -314,7 +321,7 @@ func doTest(cmdline []string) {
// Test a single package at a time. CI builders are slow // Test a single package at a time. CI builders are slow
// and some tests run into timeouts under load. // and some tests run into timeouts under load.
gotest.Args = append(gotest.Args, "-p", "1") gotest.Args = append(gotest.Args, "-p", fmt.Sprintf("%d", *threads))
if *coverage { if *coverage {
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
} }
@ -895,7 +902,7 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
var idfile string var idfile string
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 { if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
idfile = filepath.Join(workdir, "sshkey") idfile = filepath.Join(workdir, "sshkey")
if !build.FileExist(idfile) { if !common.FileExist(idfile) {
os.WriteFile(idfile, sshkey, 0600) os.WriteFile(idfile, sshkey, 0600)
} }
} }

View file

@ -1,9 +1,10 @@
#!/bin/sh #!/bin/sh
hivechain generate \ hivechain generate \
--pos \
--fork-interval 6 \ --fork-interval 6 \
--tx-interval 1 \ --tx-interval 1 \
--length 500 \ --length 600 \
--outdir testdata \ --outdir testdata \
--lastfork cancun \ --lastfork prague \
--outputs accounts,genesis,chain,headstate,txinfo,headblock,headfcu,newpayload,forkenv --outputs accounts,genesis,chain,headstate,txinfo,headblock,headfcu,newpayload,forkenv

View file

@ -86,3 +86,9 @@ func protoOffset(proto Proto) uint64 {
panic("unhandled protocol") panic("unhandled protocol")
} }
} }
// msgTypePtr is the constraint for protocol message types.
type msgTypePtr[U any] interface {
*U
Kind() byte
}

View file

@ -86,9 +86,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
root: root, root: root,
startingHash: zero, startingHash: zero,
limitHash: ffHash, limitHash: ffHash,
expAccounts: 86, expAccounts: 67,
expFirst: firstKey, expFirst: firstKey,
expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"), expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"),
desc: "In this test, we request the entire state range, but limit the response to 4000 bytes.", desc: "In this test, we request the entire state range, but limit the response to 4000 bytes.",
}, },
{ {
@ -96,9 +96,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
root: root, root: root,
startingHash: zero, startingHash: zero,
limitHash: ffHash, limitHash: ffHash,
expAccounts: 65, expAccounts: 49,
expFirst: firstKey, expFirst: firstKey,
expLast: common.HexToHash("0x2e6fe1362b3e388184fd7bf08e99e74170b26361624ffd1c5f646da7067b58b6"), expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"),
desc: "In this test, we request the entire state range, but limit the response to 3000 bytes.", desc: "In this test, we request the entire state range, but limit the response to 3000 bytes.",
}, },
{ {
@ -106,9 +106,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
root: root, root: root,
startingHash: zero, startingHash: zero,
limitHash: ffHash, limitHash: ffHash,
expAccounts: 44, expAccounts: 34,
expFirst: firstKey, expFirst: firstKey,
expLast: common.HexToHash("0x1c3f74249a4892081ba0634a819aec9ed25f34c7653f5719b9098487e65ab595"), expLast: common.HexToHash("0x2ef46ebd2073cecde499c2e8df028ad79a26d57bfaa812c4c6f7eb4c9617b913"),
desc: "In this test, we request the entire state range, but limit the response to 2000 bytes.", desc: "In this test, we request the entire state range, but limit the response to 2000 bytes.",
}, },
{ {
@ -177,9 +177,9 @@ The server should return the first available account.`,
root: root, root: root,
startingHash: firstKey, startingHash: firstKey,
limitHash: ffHash, limitHash: ffHash,
expAccounts: 86, expAccounts: 67,
expFirst: firstKey, expFirst: firstKey,
expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"), expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"),
desc: `In this test, startingHash is exactly the first available account key. desc: `In this test, startingHash is exactly the first available account key.
The server should return the first available account of the state as the first item.`, The server should return the first available account of the state as the first item.`,
}, },
@ -188,9 +188,9 @@ The server should return the first available account of the state as the first i
root: root, root: root,
startingHash: hashAdd(firstKey, 1), startingHash: hashAdd(firstKey, 1),
limitHash: ffHash, limitHash: ffHash,
expAccounts: 86, expAccounts: 67,
expFirst: secondKey, expFirst: secondKey,
expLast: common.HexToHash("0x4615e5f5df5b25349a00ad313c6cd0436b6c08ee5826e33a018661997f85ebaa"), expLast: common.HexToHash("0x66192e4c757fba1cdc776e6737008f42d50370d3cd801db3624274283bf7cd63"),
desc: `In this test, startingHash is after the first available key. desc: `In this test, startingHash is after the first available key.
The server should return the second account of the state as the first item.`, The server should return the second account of the state as the first item.`,
}, },
@ -226,9 +226,9 @@ server to return no data because genesis is older than 127 blocks.`,
root: s.chain.RootAt(int(s.chain.Head().Number().Uint64()) - 127), root: s.chain.RootAt(int(s.chain.Head().Number().Uint64()) - 127),
startingHash: zero, startingHash: zero,
limitHash: ffHash, limitHash: ffHash,
expAccounts: 84, expAccounts: 66,
expFirst: firstKey, expFirst: firstKey,
expLast: common.HexToHash("0x580aa878e2f92d113a12c0a3ce3c21972b03dbe80786858d49a72097e2c491a3"), expLast: common.HexToHash("0x729953a43ed6c913df957172680a17e5735143ad767bda8f58ac84ec62fbec5e"),
desc: `This test requests data at a state root that is 127 blocks old. desc: `This test requests data at a state root that is 127 blocks old.
We expect the server to have this state available.`, We expect the server to have this state available.`,
}, },
@ -657,8 +657,8 @@ The server should reject the request.`,
// It's a bit unfortunate these are hard-coded, but the result depends on // It's a bit unfortunate these are hard-coded, but the result depends on
// a lot of aspects of the state trie and can't be guessed in a simple // a lot of aspects of the state trie and can't be guessed in a simple
// way. So you'll have to update this when the test chain is changed. // way. So you'll have to update this when the test chain is changed.
common.HexToHash("0x3e963a69401a70224cbfb8c0cc2249b019041a538675d71ccf80c9328d114e2e"), common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"),
common.HexToHash("0xd0670d09cdfbf3c6320eb3e92c47c57baa6c226551a2d488c05581091e6b1689"), common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"),
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
@ -678,8 +678,8 @@ The server should reject the request.`,
// be updated when the test chain is changed. // be updated when the test chain is changed.
expHashes: []common.Hash{ expHashes: []common.Hash{
empty, empty,
common.HexToHash("0xd0670d09cdfbf3c6320eb3e92c47c57baa6c226551a2d488c05581091e6b1689"), common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"),
common.HexToHash("0x3e963a69401a70224cbfb8c0cc2249b019041a538675d71ccf80c9328d114e2e"), common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"),
}, },
}, },

View file

@ -196,6 +196,7 @@ to check if the node disconnects after receiving multiple invalid requests.`)
func (s *Suite) TestSimultaneousRequests(t *utesting.T) { func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
t.Log(`This test requests blocks headers from the node, performing two requests t.Log(`This test requests blocks headers from the node, performing two requests
concurrently, with different request IDs.`) concurrently, with different request IDs.`)
conn, err := s.dialAndPeer(nil) conn, err := s.dialAndPeer(nil)
if err != nil { if err != nil {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
@ -235,37 +236,36 @@ concurrently, with different request IDs.`)
} }
// Wait for responses. // Wait for responses.
headers1 := new(eth.BlockHeadersPacket) // Note they can arrive in either order.
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers1); err != nil { resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
t.Fatalf("error reading block headers msg: %v", err) if msg.RequestId != 111 && msg.RequestId != 222 {
} t.Fatalf("response with unknown request ID: %v", msg.RequestId)
if got, want := headers1.RequestId, req1.RequestId; got != want { }
t.Fatalf("unexpected request id in response: got %d, want %d", got, want) return msg.RequestId
} })
headers2 := new(eth.BlockHeadersPacket) if err != nil {
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers2); err != nil { t.Fatal(err)
t.Fatalf("error reading block headers msg: %v", err)
}
if got, want := headers2.RequestId, req2.RequestId; got != want {
t.Fatalf("unexpected request id in response: got %d, want %d", got, want)
} }
// Check received headers for accuracy. // Check if headers match.
resp1 := resp[111]
if expected, err := s.chain.GetHeaders(req1); err != nil { if expected, err := s.chain.GetHeaders(req1); err != nil {
t.Fatalf("failed to get expected headers for request 1: %v", err) t.Fatalf("failed to get expected headers for request 1: %v", err)
} else if !headersMatch(expected, headers1.BlockHeadersRequest) { } else if !headersMatch(expected, resp1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers1) t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 111, expected, resp1)
} }
resp2 := resp[222]
if expected, err := s.chain.GetHeaders(req2); err != nil { if expected, err := s.chain.GetHeaders(req2); err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err) t.Fatalf("failed to get expected headers for request 2: %v", err)
} else if !headersMatch(expected, headers2.BlockHeadersRequest) { } else if !headersMatch(expected, resp2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers2) t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 222, expected, resp2)
} }
} }
func (s *Suite) TestSameRequestID(t *utesting.T) { func (s *Suite) TestSameRequestID(t *utesting.T) {
t.Log(`This test requests block headers, performing two concurrent requests with the t.Log(`This test requests block headers, performing two concurrent requests with the
same request ID. The node should handle the request by responding to both requests.`) same request ID. The node should handle the request by responding to both requests.`)
conn, err := s.dialAndPeer(nil) conn, err := s.dialAndPeer(nil)
if err != nil { if err != nil {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
@ -289,7 +289,7 @@ same request ID. The node should handle the request by responding to both reques
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: 33, Number: 33,
}, },
Amount: 2, Amount: 3,
}, },
} }
@ -301,35 +301,52 @@ same request ID. The node should handle the request by responding to both reques
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
// Wait for the responses. // Wait for the responses. They can arrive in either order, and we can't tell them
headers1 := new(eth.BlockHeadersPacket) // apart by their request ID, so use the number of headers instead.
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers1); err != nil { resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
t.Fatalf("error reading from connection: %v", err) id := uint64(len(msg.BlockHeadersRequest))
} if id != 2 && id != 3 {
if got, want := headers1.RequestId, request1.RequestId; got != want { t.Fatalf("invalid number of headers in response: %d", id)
t.Fatalf("unexpected request id: got %d, want %d", got, want) }
} return id
headers2 := new(eth.BlockHeadersPacket) })
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers2); err != nil { if err != nil {
t.Fatalf("error reading from connection: %v", err) t.Fatal(err)
}
if got, want := headers2.RequestId, request2.RequestId; got != want {
t.Fatalf("unexpected request id: got %d, want %d", got, want)
} }
// Check if headers match. // Check if headers match.
resp1 := resp[2]
if expected, err := s.chain.GetHeaders(request1); err != nil { if expected, err := s.chain.GetHeaders(request1); err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected headers for request 1: %v", err)
} else if !headersMatch(expected, headers1.BlockHeadersRequest) { } else if !headersMatch(expected, resp1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers1) t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp1)
} }
resp2 := resp[3]
if expected, err := s.chain.GetHeaders(request2); err != nil { if expected, err := s.chain.GetHeaders(request2); err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected headers for request 2: %v", err)
} else if !headersMatch(expected, headers2.BlockHeadersRequest) { } else if !headersMatch(expected, resp2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers2) t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp2)
} }
} }
// collectResponses waits for n messages of type T on the given connection.
// The messsages are collected according to the 'identity' function.
func collectResponses[T any, P msgTypePtr[T]](conn *Conn, n int, identity func(P) uint64) (map[uint64]P, error) {
resp := make(map[uint64]P, n)
for range n {
r := new(T)
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, r); err != nil {
return resp, fmt.Errorf("read error: %v", err)
}
id := identity(r)
if resp[id] != nil {
return resp, fmt.Errorf("duplicate response %v", r)
}
resp[id] = r
}
return resp, nil
}
func (s *Suite) TestZeroRequestID(t *utesting.T) { func (s *Suite) TestZeroRequestID(t *utesting.T) {
t.Log(`This test sends a GetBlockHeaders message with a request-id of zero, t.Log(`This test sends a GetBlockHeaders message with a request-id of zero,
and expects a response.`) and expects a response.`)
@ -887,7 +904,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
from, nonce := s.chain.GetSender(5) from, nonce := s.chain.GetSender(5)
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
// Make blob data, max of 2 blobs per tx. // Make blob data, max of 2 blobs per tx.
blobdata := make([]byte, blobs%3) blobdata := make([]byte, min(blobs, 2))
for i := range blobdata { for i := range blobdata {
blobdata[i] = discriminator blobdata[i] = discriminator
blobs -= 1 blobs -= 1

Binary file not shown.

View file

@ -1,20 +1,27 @@
{ {
"HIVE_CANCUN_TIMESTAMP": "840", "HIVE_CANCUN_BLOB_BASE_FEE_UPDATE_FRACTION": "3338477",
"HIVE_CANCUN_BLOB_MAX": "6",
"HIVE_CANCUN_BLOB_TARGET": "3",
"HIVE_CANCUN_TIMESTAMP": "60",
"HIVE_CHAIN_ID": "3503995874084926", "HIVE_CHAIN_ID": "3503995874084926",
"HIVE_FORK_ARROW_GLACIER": "60", "HIVE_FORK_ARROW_GLACIER": "0",
"HIVE_FORK_BERLIN": "48", "HIVE_FORK_BERLIN": "0",
"HIVE_FORK_BYZANTIUM": "18", "HIVE_FORK_BYZANTIUM": "0",
"HIVE_FORK_CONSTANTINOPLE": "24", "HIVE_FORK_CONSTANTINOPLE": "0",
"HIVE_FORK_GRAY_GLACIER": "66", "HIVE_FORK_GRAY_GLACIER": "0",
"HIVE_FORK_HOMESTEAD": "0", "HIVE_FORK_HOMESTEAD": "0",
"HIVE_FORK_ISTANBUL": "36", "HIVE_FORK_ISTANBUL": "0",
"HIVE_FORK_LONDON": "54", "HIVE_FORK_LONDON": "0",
"HIVE_FORK_MUIR_GLACIER": "42", "HIVE_FORK_MUIR_GLACIER": "0",
"HIVE_FORK_PETERSBURG": "30", "HIVE_FORK_PETERSBURG": "0",
"HIVE_FORK_SPURIOUS": "12", "HIVE_FORK_SPURIOUS": "0",
"HIVE_FORK_TANGERINE": "6", "HIVE_FORK_TANGERINE": "0",
"HIVE_MERGE_BLOCK_ID": "72", "HIVE_MERGE_BLOCK_ID": "0",
"HIVE_NETWORK_ID": "3503995874084926", "HIVE_NETWORK_ID": "3503995874084926",
"HIVE_SHANGHAI_TIMESTAMP": "780", "HIVE_PRAGUE_BLOB_BASE_FEE_UPDATE_FRACTION": "5007716",
"HIVE_TERMINAL_TOTAL_DIFFICULTY": "9454784" "HIVE_PRAGUE_BLOB_MAX": "9",
"HIVE_PRAGUE_BLOB_TARGET": "6",
"HIVE_PRAGUE_TIMESTAMP": "120",
"HIVE_SHANGHAI_TIMESTAMP": "0",
"HIVE_TERMINAL_TOTAL_DIFFICULTY": "131072"
} }

View file

@ -2,28 +2,35 @@
"config": { "config": {
"chainId": 3503995874084926, "chainId": 3503995874084926,
"homesteadBlock": 0, "homesteadBlock": 0,
"eip150Block": 6, "eip150Block": 0,
"eip155Block": 12, "eip155Block": 0,
"eip158Block": 12, "eip158Block": 0,
"byzantiumBlock": 18, "byzantiumBlock": 0,
"constantinopleBlock": 24, "constantinopleBlock": 0,
"petersburgBlock": 30, "petersburgBlock": 0,
"istanbulBlock": 36, "istanbulBlock": 0,
"muirGlacierBlock": 42, "muirGlacierBlock": 0,
"berlinBlock": 48, "berlinBlock": 0,
"londonBlock": 54, "londonBlock": 0,
"arrowGlacierBlock": 60, "arrowGlacierBlock": 0,
"grayGlacierBlock": 66, "grayGlacierBlock": 0,
"mergeNetsplitBlock": 72, "mergeNetsplitBlock": 0,
"shanghaiTime": 780, "shanghaiTime": 0,
"cancunTime": 840, "cancunTime": 60,
"terminalTotalDifficulty": 9454784, "pragueTime": 120,
"terminalTotalDifficulty": 131072,
"depositContractAddress": "0x0000000000000000000000000000000000000000",
"ethash": {}, "ethash": {},
"blobSchedule": { "blobSchedule": {
"cancun": { "cancun": {
"target": 3, "target": 3,
"max": 6, "max": 6,
"baseFeeUpdateFraction": 3338477 "baseFeeUpdateFraction": 3338477
},
"prague": {
"target": 6,
"max": 9,
"baseFeeUpdateFraction": 5007716
} }
} }
}, },
@ -35,6 +42,18 @@
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000", "coinbase": "0x0000000000000000000000000000000000000000",
"alloc": { "alloc": {
"00000961ef480eb55e80d19ad83579a64c007002": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460cb5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f457600182026001905f5b5f82111560685781019083028483029004916001019190604d565b909390049250505036603814608857366101f457346101f4575f5260205ff35b34106101f457600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160df575060105b5f5b8181146101835782810160030260040181604c02815460601b8152601401816001015481526020019060020154807fffffffffffffffffffffffffffffffff00000000000000000000000000000000168252906010019060401c908160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160e1565b910180921461019557906002556101a0565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101cd57505f5b6001546002828201116101e25750505f6101e8565b01600290035b5f555f600155604c025ff35b5f5ffd",
"balance": "0x1"
},
"0000bbddc7ce488642fb579f8b00f3a590007251": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460d35760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f82111560685781019083028483029004916001019190604d565b9093900492505050366060146088573661019a573461019a575f5260205ff35b341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060021160e7575060025b5f5b8181146101295782810160040260040181607402815460601b815260140181600101548152602001816002015481526020019060030154905260010160e9565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd",
"balance": "0x1"
},
"0000f90827f1c53a10cb7a02335b175320002935": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604657602036036042575f35600143038111604257611fff81430311604257611fff9006545f5260205ff35b5f5ffd5b5f35611fff60014303065500",
"balance": "0x1"
},
"000f3df6d732807ef1319fb7b8bb8522d0beac02": { "000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500", "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500",
"balance": "0x2a" "balance": "0x2a"
@ -81,6 +100,10 @@
"7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": { "7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": {
"balance": "0xc097ce7bc90715b34b9f1000000000" "balance": "0xc097ce7bc90715b34b9f1000000000"
}, },
"7dcd17433742f4c0ca53122ab541d0ba67fc27df": {
"code": "0x3680600080376000206000548082558060010160005560005263656d697460206000a2",
"balance": "0x0"
},
"83c7e323d189f18725ac510004fdc2941f8c4a78": { "83c7e323d189f18725ac510004fdc2941f8c4a78": {
"balance": "0xc097ce7bc90715b34b9f1000000000" "balance": "0xc097ce7bc90715b34b9f1000000000"
}, },
@ -112,7 +135,7 @@
"number": "0x0", "number": "0x0",
"gasUsed": "0x0", "gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"baseFeePerGas": null, "baseFeePerGas": "0x3b9aca00",
"excessBlobGas": null, "excessBlobGas": null,
"blobGasUsed": null "blobGasUsed": null
} }

View file

@ -1,16 +1,16 @@
{ {
"parentHash": "0x96a73007443980c5e0985dfbb45279aa496dadea16918ad42c65c0bf8122ec39", "parentHash": "0x65151b101682b54cd08ba226f640c14c86176865ff9bfc57e0147dadaeac34bb",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000", "miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0xea4c1f4d9fa8664c22574c5b2f948a78c4b1a753cebc1861e7fb5b1aa21c5a94", "stateRoot": "0xce423ebc60fc7764a43f09f1fe3ae61eef25e3eb8d09b1108f7e7eb77dfff5e6",
"transactionsRoot": "0xecda39025fc4c609ce778d75eed0aa53b65ce1e3d1373b34bad8578cc31e5b48", "transactionsRoot": "0x7ec1ae3989efa75d7bcc766e5e2443afa8a89a5fda42ebba90050e7e702980f7",
"receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", "receiptsRoot": "0xfe160832b1ca85f38c6674cb0aae3a24693bc49be56e2ecdf3698b71a794de86",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x0", "difficulty": "0x0",
"number": "0x1f4", "number": "0x258",
"gasLimit": "0x47e7c40", "gasLimit": "0x23f3e20",
"gasUsed": "0x5208", "gasUsed": "0x19d36",
"timestamp": "0x1388", "timestamp": "0x1770",
"extraData": "0x", "extraData": "0x",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000", "nonce": "0x0000000000000000",
@ -18,6 +18,7 @@
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"blobGasUsed": "0x0", "blobGasUsed": "0x0",
"excessBlobGas": "0x0", "excessBlobGas": "0x0",
"parentBeaconBlockRoot": "0xf653da50cdff4733f13f7a5e338290e883bdf04adf3f112709728063ea965d6c", "parentBeaconBlockRoot": "0xf5003fc8f92358e790a114bce93ce1d9c283c85e1787f8d7d56714d3489b49e6",
"hash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7" "requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"hash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0"
} }

View file

@ -1,12 +1,12 @@
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": "fcu500", "id": "fcu600",
"method": "engine_forkchoiceUpdatedV3", "method": "engine_forkchoiceUpdatedV3",
"params": [ "params": [
{ {
"headBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7", "headBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0",
"safeBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7", "safeBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0",
"finalizedBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7" "finalizedBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0"
}, },
null null
] ]

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -374,7 +374,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB { func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true}) tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
sdb := state.NewDatabase(tdb, nil) sdb := state.NewDatabase(tdb, nil)
statedb, _ := state.New(types.EmptyRootHash, sdb) statedb, err := state.New(types.EmptyRootHash, sdb)
if err != nil {
panic(fmt.Errorf("failed to create initial state: %v", err))
}
for addr, a := range accounts { for addr, a := range accounts {
statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified) statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis) statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis)
@ -384,8 +387,14 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
} }
} }
// Commit and re-open to start with a clean state. // Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false, false) root, err := statedb.Commit(0, false, false)
statedb, _ = state.New(root, sdb) if err != nil {
panic(fmt.Errorf("failed to commit initial state: %v", err))
}
statedb, err = state.New(root, sdb)
if err != nil {
panic(fmt.Errorf("failed to reopen state after commit: %v", err))
}
return statedb return statedb
} }

View file

@ -9,7 +9,7 @@ require (
require ( require (
github.com/StackExchange/wmi v1.2.1 // indirect github.com/StackExchange/wmi v1.2.1 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/VictoriaMetrics/fastcache v1.13.0 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/consensys/gnark-crypto v0.18.0 // indirect
@ -24,7 +24,7 @@ require (
github.com/ferranbt/fastssz v0.1.4 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect github.com/gofrs/flock v0.12.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golang/snappy v1.0.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect github.com/holiman/uint256 v1.3.2 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect

View file

@ -2,15 +2,14 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
@ -31,7 +30,6 @@ github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
@ -61,9 +59,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
@ -112,8 +109,6 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw=
@ -134,7 +129,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=

View file

@ -182,13 +182,14 @@ func (s *filterTestSuite) loadQueries() error {
// filterQuery is a single query for testing. // filterQuery is a single query for testing.
type filterQuery struct { type filterQuery struct {
FromBlock int64 `json:"fromBlock"` FromBlock int64 `json:"fromBlock"`
ToBlock int64 `json:"toBlock"` ToBlock int64 `json:"toBlock"`
Address []common.Address `json:"address"` lastBlockHash common.Hash
Topics [][]common.Hash `json:"topics"` Address []common.Address `json:"address"`
ResultHash *common.Hash `json:"resultHash,omitempty"` Topics [][]common.Hash `json:"topics"`
results []types.Log ResultHash *common.Hash `json:"resultHash,omitempty"`
Err error `json:"error,omitempty"` results []types.Log
Err error `json:"error,omitempty"`
} }
func (fq *filterQuery) isWildcard() bool { func (fq *filterQuery) isWildcard() bool {

View file

@ -0,0 +1,337 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"math/big"
"reflect"
"slices"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
const maxFilterRangeForTestFuzz = 300
var (
filterFuzzCommand = &cli.Command{
Name: "filterfuzz",
Usage: "Generates queries and compares results against matches derived from receipts",
ArgsUsage: "<RPC endpoint URL>",
Action: filterFuzzCmd,
Flags: []cli.Flag{},
}
)
// filterFuzzCmd is the main function of the filter fuzzer.
func filterFuzzCmd(ctx *cli.Context) error {
f := newFilterTestGen(ctx, maxFilterRangeForTestFuzz)
var lastHead *types.Header
headerCache := lru.NewCache[common.Hash, *types.Header](200)
commonAncestor := func(oldPtr, newPtr *types.Header) *types.Header {
if oldPtr == nil || newPtr == nil {
return nil
}
if newPtr.Number.Uint64() > oldPtr.Number.Uint64()+100 || oldPtr.Number.Uint64() > newPtr.Number.Uint64()+100 {
return nil
}
for oldPtr.Hash() != newPtr.Hash() {
if newPtr.Number.Uint64() >= oldPtr.Number.Uint64() {
if parent, _ := headerCache.Get(newPtr.ParentHash); parent != nil {
newPtr = parent
} else {
newPtr, _ = getHeaderByHash(f.client, newPtr.ParentHash)
if newPtr == nil {
return nil
}
headerCache.Add(newPtr.Hash(), newPtr)
}
}
if oldPtr.Number.Uint64() > newPtr.Number.Uint64() {
oldPtr, _ = headerCache.Get(oldPtr.ParentHash)
if oldPtr == nil {
return nil
}
}
}
return newPtr
}
fetchHead := func() (*types.Header, bool) {
currentHead, err := getLatestHeader(f.client)
if err != nil {
fmt.Println("Could not fetch head block", err)
return nil, false
}
headerCache.Add(currentHead.Hash(), currentHead)
if lastHead != nil && currentHead.Hash() == lastHead.Hash() {
return currentHead, false
}
f.blockLimit = currentHead.Number.Int64()
ca := commonAncestor(lastHead, currentHead)
fmt.Print("*** New head ", f.blockLimit)
if ca == nil {
fmt.Println(" <no common ancestor>")
} else {
if reorged := lastHead.Number.Uint64() - ca.Number.Uint64(); reorged > 0 {
fmt.Print(" reorged ", reorged)
}
if missed := currentHead.Number.Uint64() - ca.Number.Uint64() - 1; missed > 0 {
fmt.Print(" missed ", missed)
}
fmt.Println()
}
lastHead = currentHead
return currentHead, true
}
tryExtendQuery := func(query *filterQuery) *filterQuery {
for {
extQuery := f.extendRange(query)
if extQuery == nil {
return query
}
extQuery.checkLastBlockHash(f.client)
extQuery.run(f.client, nil)
if extQuery.Err == nil && len(extQuery.results) == 0 {
// query is useless now due to major reorg; abandon and continue
fmt.Println("Zero length results")
return nil
}
if extQuery.Err != nil {
extQuery.printError()
return nil
}
if len(extQuery.results) > maxFilterResultSize {
return query
}
query = extQuery
}
}
var (
mmQuery *filterQuery
mmRetry, mmNextRetry int
)
mainLoop:
for {
select {
case <-ctx.Done():
return nil
default:
}
var query *filterQuery
if mmQuery != nil {
if mmRetry == 0 {
query = mmQuery
mmRetry = mmNextRetry
mmNextRetry *= 2
query.checkLastBlockHash(f.client)
query.run(f.client, nil)
if query.Err != nil {
query.printError()
continue
}
fmt.Println("Retrying query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results))
} else {
mmRetry--
}
}
if query == nil {
currentHead, isNewHead := fetchHead()
if currentHead == nil {
select {
case <-ctx.Done():
return nil
case <-time.After(time.Second):
}
continue mainLoop
}
if isNewHead {
query = f.newHeadSeedQuery(currentHead.Number.Int64())
} else {
query = f.newQuery()
}
query.checkLastBlockHash(f.client)
query.run(f.client, nil)
if query.Err != nil {
query.printError()
continue
}
fmt.Println("New query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results))
if len(query.results) == 0 || len(query.results) > maxFilterResultSize {
continue mainLoop
}
if query = tryExtendQuery(query); query == nil {
continue mainLoop
}
}
if !query.checkLastBlockHash(f.client) {
fmt.Println("Reorg during search")
continue mainLoop
}
// now we have a new query; check results
results, err := query.getResultsFromReceipts(f.client)
if err != nil {
fmt.Println("Could not fetch results from receipts", err)
continue mainLoop
}
if !query.checkLastBlockHash(f.client) {
fmt.Println("Reorg during search")
continue mainLoop
}
if !reflect.DeepEqual(query.results, results) {
fmt.Println("Results mismatch from:", query.FromBlock, "to:", query.ToBlock, "addresses:", query.Address, "topics:", query.Topics)
resShared, resGetLogs, resReceipts := compareResults(query.results, results)
fmt.Println(" shared:", len(resShared))
fmt.Println(" only from getLogs:", len(resGetLogs), resGetLogs)
fmt.Println(" only from receipts:", len(resReceipts), resReceipts)
if mmQuery != query {
mmQuery = query
mmRetry = 0
mmNextRetry = 1
}
continue mainLoop
}
fmt.Println("Successful query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results))
f.storeQuery(query)
}
}
func compareResults(a, b []types.Log) (shared, onlya, onlyb []types.Log) {
for len(a) > 0 && len(b) > 0 {
if reflect.DeepEqual(a[0], b[0]) {
shared = append(shared, a[0])
a = a[1:]
b = b[1:]
} else {
for i := 1; ; i++ {
if i >= len(a) { // b[0] not found in a
onlyb = append(onlyb, b[0])
b = b[1:]
break
}
if i >= len(b) { // a[0] not found in b
onlya = append(onlya, a[0])
a = a[1:]
break
}
if reflect.DeepEqual(b[0], a[i]) { // a[:i] not found in b
onlya = append(onlya, a[:i]...)
a = a[i:]
break
}
if reflect.DeepEqual(a[0], b[i]) { // b[:i] not found in a
onlyb = append(onlyb, b[:i]...)
b = b[i:]
break
}
}
}
}
onlya = append(onlya, a...)
onlyb = append(onlyb, b...)
return
}
func getLatestHeader(client *client) (*types.Header, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
return client.Eth.HeaderByNumber(ctx, big.NewInt(int64(rpc.LatestBlockNumber)))
}
func getHeaderByHash(client *client, hash common.Hash) (*types.Header, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
return client.Eth.HeaderByHash(ctx, hash)
}
// newHeadSeedQuery creates a query that gets all logs from the latest head.
func (s *filterTestGen) newHeadSeedQuery(head int64) *filterQuery {
return &filterQuery{
FromBlock: head,
ToBlock: head,
}
}
func (fq *filterQuery) checkLastBlockHash(client *client) bool {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
header, err := client.Eth.HeaderByNumber(ctx, big.NewInt(fq.ToBlock))
if err != nil {
fmt.Println("Cound not fetch last block hash of query number:", fq.ToBlock, "error:", err)
fq.lastBlockHash = common.Hash{}
return false
}
hash := header.Hash()
if fq.lastBlockHash == hash {
return true
}
fq.lastBlockHash = hash
return false
}
func (fq *filterQuery) filterLog(log *types.Log) bool {
if len(fq.Address) > 0 && !slices.Contains(fq.Address, log.Address) {
return false
}
// If the to filtered topics is greater than the amount of topics in logs, skip.
if len(fq.Topics) > len(log.Topics) {
return false
}
for i, sub := range fq.Topics {
if len(sub) == 0 {
continue // empty rule set == wildcard
}
if !slices.Contains(sub, log.Topics[i]) {
return false
}
}
return true
}
func (fq *filterQuery) getResultsFromReceipts(client *client) ([]types.Log, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
var results []types.Log
for blockNumber := fq.FromBlock; blockNumber <= fq.ToBlock; blockNumber++ {
receipts, err := client.Eth.BlockReceipts(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber)))
if err != nil {
return nil, err
}
for _, receipt := range receipts {
for _, log := range receipt.Logs {
if fq.filterLog(log) {
results = append(results, *log)
}
}
}
}
return results, nil
}

View file

@ -32,6 +32,17 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
const (
// Parameter of the random filter query generator.
maxFilterRangeForTestGen = 100000000000
maxFilterResultSize = 1000
filterBuckets = 10
maxFilterBucketSize = 100
filterSeedChance = 10
filterMergeChance = 45
filterExtendChance = 50
)
var ( var (
filterGenerateCommand = &cli.Command{ filterGenerateCommand = &cli.Command{
Name: "filtergen", Name: "filtergen",
@ -58,7 +69,7 @@ var (
// filterGenCmd is the main function of the filter tests generator. // filterGenCmd is the main function of the filter tests generator.
func filterGenCmd(ctx *cli.Context) error { func filterGenCmd(ctx *cli.Context) error {
f := newFilterTestGen(ctx) f := newFilterTestGen(ctx, maxFilterRangeForTestGen)
lastWrite := time.Now() lastWrite := time.Now()
for { for {
select { select {
@ -67,7 +78,7 @@ func filterGenCmd(ctx *cli.Context) error {
default: default:
} }
f.updateFinalizedBlock() f.setLimitToFinalizedBlock()
query := f.newQuery() query := f.newQuery()
query.run(f.client, nil) query.run(f.client, nil)
if query.Err != nil { if query.Err != nil {
@ -75,7 +86,7 @@ func filterGenCmd(ctx *cli.Context) error {
exit("filter query failed") exit("filter query failed")
} }
if len(query.results) > 0 && len(query.results) <= maxFilterResultSize { if len(query.results) > 0 && len(query.results) <= maxFilterResultSize {
for { for rand.Intn(100) < filterExtendChance {
extQuery := f.extendRange(query) extQuery := f.extendRange(query)
if extQuery == nil { if extQuery == nil {
break break
@ -108,39 +119,32 @@ func filterGenCmd(ctx *cli.Context) error {
// filterTestGen is the filter query test generator. // filterTestGen is the filter query test generator.
type filterTestGen struct { type filterTestGen struct {
client *client client *client
queryFile string queryFile string
maxFilterRange int64
finalizedBlock int64 blockLimit int64
queries [filterBuckets][]*filterQuery queries [filterBuckets][]*filterQuery
} }
func newFilterTestGen(ctx *cli.Context) *filterTestGen { func newFilterTestGen(ctx *cli.Context, maxFilterRange int64) *filterTestGen {
return &filterTestGen{ return &filterTestGen{
client: makeClient(ctx), client: makeClient(ctx),
queryFile: ctx.String(filterQueryFileFlag.Name), queryFile: ctx.String(filterQueryFileFlag.Name),
maxFilterRange: maxFilterRange,
} }
} }
func (s *filterTestGen) updateFinalizedBlock() { func (s *filterTestGen) setLimitToFinalizedBlock() {
s.finalizedBlock = mustGetFinalizedBlock(s.client) s.blockLimit = mustGetFinalizedBlock(s.client)
} }
const (
// Parameter of the random filter query generator.
maxFilterRange = 10000000
maxFilterResultSize = 300
filterBuckets = 10
maxFilterBucketSize = 100
filterSeedChance = 10
filterMergeChance = 45
)
// storeQuery adds a filter query to the output file. // storeQuery adds a filter query to the output file.
func (s *filterTestGen) storeQuery(query *filterQuery) { func (s *filterTestGen) storeQuery(query *filterQuery) {
query.ResultHash = new(common.Hash) query.ResultHash = new(common.Hash)
*query.ResultHash = query.calculateHash() *query.ResultHash = query.calculateHash()
logRatio := math.Log(float64(len(query.results))*float64(s.finalizedBlock)/float64(query.ToBlock+1-query.FromBlock)) / math.Log(float64(s.finalizedBlock)*maxFilterResultSize) maxFilterRange := min(s.maxFilterRange, s.blockLimit)
logRatio := math.Log(float64(len(query.results))*float64(maxFilterRange)/float64(query.ToBlock+1-query.FromBlock)) / math.Log(float64(maxFilterRange)*maxFilterResultSize)
bucket := int(math.Floor(logRatio * filterBuckets)) bucket := int(math.Floor(logRatio * filterBuckets))
if bucket >= filterBuckets { if bucket >= filterBuckets {
bucket = filterBuckets - 1 bucket = filterBuckets - 1
@ -160,13 +164,13 @@ func (s *filterTestGen) storeQuery(query *filterQuery) {
func (s *filterTestGen) extendRange(q *filterQuery) *filterQuery { func (s *filterTestGen) extendRange(q *filterQuery) *filterQuery {
rangeLen := q.ToBlock + 1 - q.FromBlock rangeLen := q.ToBlock + 1 - q.FromBlock
extLen := rand.Int63n(rangeLen) + 1 extLen := rand.Int63n(rangeLen) + 1
if rangeLen+extLen > s.finalizedBlock { if rangeLen+extLen > min(s.maxFilterRange, s.blockLimit) {
return nil return nil
} }
extBefore := min(rand.Int63n(extLen+1), q.FromBlock) extBefore := min(rand.Int63n(extLen+1), q.FromBlock)
extAfter := extLen - extBefore extAfter := extLen - extBefore
if q.ToBlock+extAfter > s.finalizedBlock { if q.ToBlock+extAfter > s.blockLimit {
d := q.ToBlock + extAfter - s.finalizedBlock d := q.ToBlock + extAfter - s.blockLimit
extAfter -= d extAfter -= d
if extBefore+d <= q.FromBlock { if extBefore+d <= q.FromBlock {
extBefore += d extBefore += d
@ -203,7 +207,7 @@ func (s *filterTestGen) newQuery() *filterQuery {
// newSeedQuery creates a query that gets all logs in a random non-finalized block. // newSeedQuery creates a query that gets all logs in a random non-finalized block.
func (s *filterTestGen) newSeedQuery() *filterQuery { func (s *filterTestGen) newSeedQuery() *filterQuery {
block := rand.Int63n(s.finalizedBlock + 1) block := rand.Int63n(s.blockLimit + 1)
return &filterQuery{ return &filterQuery{
FromBlock: block, FromBlock: block,
ToBlock: block, ToBlock: block,
@ -358,6 +362,7 @@ func (s *filterTestGen) writeQueries() {
func mustGetFinalizedBlock(client *client) int64 { func mustGetFinalizedBlock(client *client) int64 {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel() defer cancel()
header, err := client.Eth.HeaderByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) header, err := client.Eth.HeaderByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
if err != nil { if err != nil {
exit(fmt.Errorf("could not fetch finalized header (error: %v)", err)) exit(fmt.Errorf("could not fetch finalized header (error: %v)", err))

View file

@ -49,6 +49,7 @@ func init() {
filterGenerateCommand, filterGenerateCommand,
traceGenerateCommand, traceGenerateCommand,
filterPerfCommand, filterPerfCommand,
filterFuzzCommand,
} }
} }

View file

@ -394,7 +394,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
bc.statedb = state.NewDatabase(bc.triedb, nil) bc.statedb = state.NewDatabase(bc.triedb, nil)
bc.validator = NewBlockValidator(chainConfig, bc) bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc) bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(chainConfig, bc.hc) bc.processor = NewStateProcessor(bc.hc)
genesisHeader := bc.GetHeaderByNumber(0) genesisHeader := bc.GetHeaderByNumber(0)
if genesisHeader == nil { if genesisHeader == nil {
@ -1690,7 +1690,12 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Set new head. // Set new head.
bc.writeHeadBlock(block) bc.writeHeadBlock(block)
bc.chainFeed.Send(ChainEvent{Header: block.Header()}) bc.chainFeed.Send(ChainEvent{
Header: block.Header(),
Receipts: receipts,
Transactions: block.Transactions(),
})
if len(logs) > 0 { if len(logs) > 0 {
bc.logsFeed.Send(logs) bc.logsFeed.Send(logs)
} }
@ -2342,6 +2347,13 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co
// collectLogs collects the logs that were generated or removed during the // collectLogs collects the logs that were generated or removed during the
// processing of a block. These logs are later announced as deleted or reborn. // processing of a block. These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
_, logs := bc.collectReceiptsAndLogs(b, removed)
return logs
}
// collectReceiptsAndLogs retrieves receipts from the database and returns both receipts and logs.
// This avoids duplicate database reads when both are needed.
func (bc *BlockChain) collectReceiptsAndLogs(b *types.Block, removed bool) ([]*types.Receipt, []*types.Log) {
var blobGasPrice *big.Int var blobGasPrice *big.Int
if b.ExcessBlobGas() != nil { if b.ExcessBlobGas() != nil {
blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header()) blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header())
@ -2359,7 +2371,7 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
logs = append(logs, log) logs = append(logs, log)
} }
} }
return logs return receipts, logs
} }
// reorg takes two blocks, an old chain and a new chain and will reconstruct the // reorg takes two blocks, an old chain and a new chain and will reconstruct the
@ -2588,8 +2600,14 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
bc.writeHeadBlock(head) bc.writeHeadBlock(head)
// Emit events // Emit events
logs := bc.collectLogs(head, false) receipts, logs := bc.collectReceiptsAndLogs(head, false)
bc.chainFeed.Send(ChainEvent{Header: head.Header()})
bc.chainFeed.Send(ChainEvent{
Header: head.Header(),
Receipts: receipts,
Transactions: head.Transactions(),
})
if len(logs) > 0 { if len(logs) > 0 {
bc.logsFeed.Send(logs) bc.logsFeed.Send(logs)
} }

View file

@ -27,7 +27,9 @@ type NewTxsEvent struct{ Txs []*types.Transaction }
type RemovedLogsEvent struct{ Logs []*types.Log } type RemovedLogsEvent struct{ Logs []*types.Log }
type ChainEvent struct { type ChainEvent struct {
Header *types.Header Header *types.Header
Receipts []*types.Receipt
Transactions []*types.Transaction
} }
type ChainHeadEvent struct { type ChainHeadEvent struct {

View file

@ -25,21 +25,16 @@ import (
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
// ChainContext supports retrieving headers and consensus parameters from the // ChainContext supports retrieving headers and consensus parameters from the
// current blockchain to be used during transaction processing. // current blockchain to be used during transaction processing.
type ChainContext interface { type ChainContext interface {
consensus.ChainHeaderReader
// Engine retrieves the chain's consensus engine. // Engine retrieves the chain's consensus engine.
Engine() consensus.Engine Engine() consensus.Engine
// GetHeader returns the header corresponding to the hash/number argument pair.
GetHeader(common.Hash, uint64) *types.Header
// Config returns the chain's configuration.
Config() *params.ChainConfig
} }
// NewEVMBlockContext creates a new context for use in the EVM. // NewEVMBlockContext creates a new context for use in the EVM.

View file

@ -664,15 +664,6 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
} }
} }
// storedReceiptRLP is the storage encoding of a receipt.
// Re-definition in core/types/receipt.go.
// TODO: Re-use the existing definition.
type storedReceiptRLP struct {
PostStateOrStatus []byte
CumulativeGasUsed uint64
Logs []*types.Log
}
// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
// the list of logs. When decoding a stored receipt into this object we // the list of logs. When decoding a stored receipt into this object we
// avoid creating the bloom filter. // avoid creating the bloom filter.
@ -682,11 +673,11 @@ type receiptLogs struct {
// DecodeRLP implements rlp.Decoder. // DecodeRLP implements rlp.Decoder.
func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
var stored storedReceiptRLP var rs types.ReceiptForStorage
if err := s.Decode(&stored); err != nil { if err := rs.DecodeRLP(s); err != nil {
return err return err
} }
r.Logs = stored.Logs r.Logs = rs.Logs
return nil return nil
} }

View file

@ -46,6 +46,27 @@ func DeleteStateHistoryIndexMetadata(db ethdb.KeyValueWriter) {
} }
} }
// ReadTrienodeHistoryIndexMetadata retrieves the metadata of trienode history index.
func ReadTrienodeHistoryIndexMetadata(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(headTrienodeHistoryIndexKey)
return data
}
// WriteTrienodeHistoryIndexMetadata stores the metadata of trienode history index
// into database.
func WriteTrienodeHistoryIndexMetadata(db ethdb.KeyValueWriter, blob []byte) {
if err := db.Put(headTrienodeHistoryIndexKey, blob); err != nil {
log.Crit("Failed to store the metadata of trienode history index", "err", err)
}
}
// DeleteTrienodeHistoryIndexMetadata removes the metadata of trienode history index.
func DeleteTrienodeHistoryIndexMetadata(db ethdb.KeyValueWriter) {
if err := db.Delete(headTrienodeHistoryIndexKey); err != nil {
log.Crit("Failed to delete the metadata of trienode history index", "err", err)
}
}
// ReadAccountHistoryIndex retrieves the account history index with the provided // ReadAccountHistoryIndex retrieves the account history index with the provided
// account address. // account address.
func ReadAccountHistoryIndex(db ethdb.KeyValueReader, addressHash common.Hash) []byte { func ReadAccountHistoryIndex(db ethdb.KeyValueReader, addressHash common.Hash) []byte {
@ -95,6 +116,30 @@ func DeleteStorageHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash,
} }
} }
// ReadTrienodeHistoryIndex retrieves the trienode history index with the provided
// account address and storage key hash.
func ReadTrienodeHistoryIndex(db ethdb.KeyValueReader, addressHash common.Hash, path []byte) []byte {
data, err := db.Get(trienodeHistoryIndexKey(addressHash, path))
if err != nil || len(data) == 0 {
return nil
}
return data
}
// WriteTrienodeHistoryIndex writes the provided trienode history index into database.
func WriteTrienodeHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, data []byte) {
if err := db.Put(trienodeHistoryIndexKey(addressHash, path), data); err != nil {
log.Crit("Failed to store trienode history index", "err", err)
}
}
// DeleteTrienodeHistoryIndex deletes the specified trienode index from the database.
func DeleteTrienodeHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte) {
if err := db.Delete(trienodeHistoryIndexKey(addressHash, path)); err != nil {
log.Crit("Failed to delete trienode history index", "err", err)
}
}
// ReadAccountHistoryIndexBlock retrieves the index block with the provided // ReadAccountHistoryIndexBlock retrieves the index block with the provided
// account address along with the block id. // account address along with the block id.
func ReadAccountHistoryIndexBlock(db ethdb.KeyValueReader, addressHash common.Hash, blockID uint32) []byte { func ReadAccountHistoryIndexBlock(db ethdb.KeyValueReader, addressHash common.Hash, blockID uint32) []byte {
@ -143,6 +188,30 @@ func DeleteStorageHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common.
} }
} }
// ReadTrienodeHistoryIndexBlock retrieves the index block with the provided state
// identifier along with the block id.
func ReadTrienodeHistoryIndexBlock(db ethdb.KeyValueReader, addressHash common.Hash, path []byte, blockID uint32) []byte {
data, err := db.Get(trienodeHistoryIndexBlockKey(addressHash, path, blockID))
if err != nil || len(data) == 0 {
return nil
}
return data
}
// WriteTrienodeHistoryIndexBlock writes the provided index block into database.
func WriteTrienodeHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, id uint32, data []byte) {
if err := db.Put(trienodeHistoryIndexBlockKey(addressHash, path, id), data); err != nil {
log.Crit("Failed to store trienode index block", "err", err)
}
}
// DeleteTrienodeHistoryIndexBlock deletes the specified index block from the database.
func DeleteTrienodeHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, id uint32) {
if err := db.Delete(trienodeHistoryIndexBlockKey(addressHash, path, id)); err != nil {
log.Crit("Failed to delete trienode index block", "err", err)
}
}
// increaseKey increase the input key by one bit. Return nil if the entire // increaseKey increase the input key by one bit. Return nil if the entire
// addition operation overflows. // addition operation overflows.
func increaseKey(key []byte) []byte { func increaseKey(key []byte) []byte {
@ -155,14 +224,26 @@ func increaseKey(key []byte) []byte {
return nil return nil
} }
// DeleteStateHistoryIndex completely removes all history indexing data, including // DeleteStateHistoryIndexes completely removes all history indexing data, including
// indexes for accounts and storages. // indexes for accounts and storages.
// func DeleteStateHistoryIndexes(db ethdb.KeyValueRangeDeleter) {
// Note, this method assumes the storage space with prefix `StateHistoryIndexPrefix` DeleteHistoryByRange(db, StateHistoryAccountMetadataPrefix)
// is exclusively occupied by the history indexing data! DeleteHistoryByRange(db, StateHistoryStorageMetadataPrefix)
func DeleteStateHistoryIndex(db ethdb.KeyValueRangeDeleter) { DeleteHistoryByRange(db, StateHistoryAccountBlockPrefix)
start := StateHistoryIndexPrefix DeleteHistoryByRange(db, StateHistoryStorageBlockPrefix)
limit := increaseKey(bytes.Clone(StateHistoryIndexPrefix)) }
// DeleteTrienodeHistoryIndexes completely removes all trienode history indexing data.
func DeleteTrienodeHistoryIndexes(db ethdb.KeyValueRangeDeleter) {
DeleteHistoryByRange(db, TrienodeHistoryMetadataPrefix)
DeleteHistoryByRange(db, TrienodeHistoryBlockPrefix)
}
// DeleteHistoryByRange completely removes all database entries with the specific prefix.
// Note, this method assumes the space with the given prefix is exclusively occupied!
func DeleteHistoryByRange(db ethdb.KeyValueRangeDeleter, prefix []byte) {
start := prefix
limit := increaseKey(bytes.Clone(prefix))
// Try to remove the data in the range by a loop, as the leveldb // Try to remove the data in the range by a loop, as the leveldb
// doesn't support the native range deletion. // doesn't support the native range deletion.

View file

@ -170,9 +170,11 @@ func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint
return db.AncientRange(stateHistoryMeta, start-1, count, 0) return db.AncientRange(stateHistoryMeta, start-1, count, 0)
} }
// ReadStateAccountIndex retrieves the state root corresponding to the specified // ReadStateAccountIndex retrieves the account index blob for the specified
// state history. Compute the position of state history in freezer by minus one // state history. The index contains fixed-size entries with offsets and lengths
// since the id of first state history starts from one(zero for initial state). // into the concatenated account data table. Compute the position of state
// history in freezer by minus one since the id of first state history starts
// from one (zero for initial state).
func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte {
blob, err := db.Ancient(stateHistoryAccountIndex, id-1) blob, err := db.Ancient(stateHistoryAccountIndex, id-1)
if err != nil { if err != nil {
@ -181,37 +183,30 @@ func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte {
return blob return blob
} }
// ReadStateStorageIndex retrieves the state root corresponding to the specified // ReadStateStorageIndex retrieves the storage index blob for the specified
// state history. Compute the position of state history in freezer by minus one // state history. The index contains fixed-size entries that locate storage slot
// since the id of first state history starts from one(zero for initial state). // data in the concatenated storage data table. Compute the position of state
func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { // history in freezer by minus one since the id of first state history starts
blob, err := db.Ancient(stateHistoryStorageIndex, id-1) // from one (zero for initial state).
if err != nil { func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) {
return nil return db.AncientBytes(stateHistoryStorageIndex, id-1, uint64(offset), uint64(length))
}
return blob
} }
// ReadStateAccountHistory retrieves the state root corresponding to the specified // ReadStateAccountHistory retrieves the concatenated account data blob for the
// state history. Compute the position of state history in freezer by minus one // specified state history. Offsets and lengths are resolved via the account
// since the id of first state history starts from one(zero for initial state). // index. Compute the position of state history in freezer by minus one since
func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { // the id of first state history starts from one (zero for initial state).
blob, err := db.Ancient(stateHistoryAccountData, id-1) func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) {
if err != nil { return db.AncientBytes(stateHistoryAccountData, id-1, uint64(offset), uint64(length))
return nil
}
return blob
} }
// ReadStateStorageHistory retrieves the state root corresponding to the specified // ReadStateStorageHistory retrieves the concatenated storage slot data blob for
// state history. Compute the position of state history in freezer by minus one // the specified state history. Locations are resolved via the account and
// since the id of first state history starts from one(zero for initial state). // storage indexes. Compute the position of state history in freezer by minus
func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte { // one since the id of first state history starts from one (zero for initial
blob, err := db.Ancient(stateHistoryStorageData, id-1) // state).
if err != nil { func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) {
return nil return db.AncientBytes(stateHistoryStorageData, id-1, uint64(offset), uint64(length))
}
return blob
} }
// ReadStateHistory retrieves the state history from database with provided id. // ReadStateHistory retrieves the state history from database with provided id.
@ -292,3 +287,76 @@ func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIn
}) })
return err return err
} }
// ReadTrienodeHistory retrieves the trienode history corresponding to the specified id.
// Compute the position of trienode history in freezer by minus one since the id of first
// trienode history starts from one(zero for initial state).
func ReadTrienodeHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, error) {
header, err := db.Ancient(trienodeHistoryHeaderTable, id-1)
if err != nil {
return nil, nil, nil, err
}
keySection, err := db.Ancient(trienodeHistoryKeySectionTable, id-1)
if err != nil {
return nil, nil, nil, err
}
valueSection, err := db.Ancient(trienodeHistoryValueSectionTable, id-1)
if err != nil {
return nil, nil, nil, err
}
return header, keySection, valueSection, nil
}
// ReadTrienodeHistoryHeader retrieves the header section of trienode history.
func ReadTrienodeHistoryHeader(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
return db.Ancient(trienodeHistoryHeaderTable, id-1)
}
// ReadTrienodeHistoryKeySection retrieves the key section of trienode history.
func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
return db.Ancient(trienodeHistoryKeySectionTable, id-1)
}
// ReadTrienodeHistoryValueSection retrieves the value section of trienode history.
func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) {
return db.Ancient(trienodeHistoryValueSectionTable, id-1)
}
// ReadTrienodeHistoryList retrieves the a list of trienode history corresponding
// to the specified range.
// Compute the position of trienode history in freezer by minus one since the id
// of first trienode history starts from one(zero for initial state).
func ReadTrienodeHistoryList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, [][]byte, [][]byte, error) {
header, err := db.AncientRange(trienodeHistoryHeaderTable, start-1, count, 0)
if err != nil {
return nil, nil, nil, err
}
keySection, err := db.AncientRange(trienodeHistoryKeySectionTable, start-1, count, 0)
if err != nil {
return nil, nil, nil, err
}
valueSection, err := db.AncientRange(trienodeHistoryValueSectionTable, start-1, count, 0)
if err != nil {
return nil, nil, nil, err
}
if len(header) != len(keySection) || len(header) != len(valueSection) {
return nil, nil, nil, errors.New("trienode history is corrupted")
}
return header, keySection, valueSection, nil
}
// WriteTrienodeHistory writes the provided trienode history to database.
// Compute the position of trienode history in freezer by minus one since
// the id of first state history starts from one(zero for initial state).
func WriteTrienodeHistory(db ethdb.AncientWriter, id uint64, header []byte, keySection []byte, valueSection []byte) error {
_, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
if err := op.AppendRaw(trienodeHistoryHeaderTable, id-1, header); err != nil {
return err
}
if err := op.AppendRaw(trienodeHistoryKeySectionTable, id-1, keySection); err != nil {
return err
}
return op.AppendRaw(trienodeHistoryValueSectionTable, id-1, valueSection)
})
return err
}

View file

@ -75,15 +75,38 @@ var stateFreezerTableConfigs = map[string]freezerTableConfig{
stateHistoryStorageData: {noSnappy: false, prunable: true}, stateHistoryStorageData: {noSnappy: false, prunable: true},
} }
const (
trienodeHistoryHeaderTable = "trienode.header"
trienodeHistoryKeySectionTable = "trienode.key"
trienodeHistoryValueSectionTable = "trienode.value"
)
// trienodeFreezerTableConfigs configures the settings for tables in the trienode freezer.
var trienodeFreezerTableConfigs = map[string]freezerTableConfig{
trienodeHistoryHeaderTable: {noSnappy: false, prunable: true},
// Disable snappy compression to allow efficient partial read.
trienodeHistoryKeySectionTable: {noSnappy: true, prunable: true},
// Disable snappy compression to allow efficient partial read.
trienodeHistoryValueSectionTable: {noSnappy: true, prunable: true},
}
// The list of identifiers of ancient stores. // The list of identifiers of ancient stores.
var ( var (
ChainFreezerName = "chain" // the folder name of chain segment ancient store. ChainFreezerName = "chain" // the folder name of chain segment ancient store.
MerkleStateFreezerName = "state" // the folder name of state history ancient store. MerkleStateFreezerName = "state" // the folder name of state history ancient store.
VerkleStateFreezerName = "state_verkle" // the folder name of state history ancient store. VerkleStateFreezerName = "state_verkle" // the folder name of state history ancient store.
MerkleTrienodeFreezerName = "trienode" // the folder name of trienode history ancient store.
VerkleTrienodeFreezerName = "trienode_verkle" // the folder name of trienode history ancient store.
) )
// freezers the collections of all builtin freezers. // freezers the collections of all builtin freezers.
var freezers = []string{ChainFreezerName, MerkleStateFreezerName, VerkleStateFreezerName} var freezers = []string{
ChainFreezerName,
MerkleStateFreezerName, VerkleStateFreezerName,
MerkleTrienodeFreezerName, VerkleTrienodeFreezerName,
}
// NewStateFreezer initializes the ancient store for state history. // NewStateFreezer initializes the ancient store for state history.
// //
@ -103,3 +126,22 @@ func NewStateFreezer(ancientDir string, verkle bool, readOnly bool) (ethdb.Reset
} }
return newResettableFreezer(name, "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerTableConfigs) return newResettableFreezer(name, "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerTableConfigs)
} }
// NewTrienodeFreezer initializes the ancient store for trienode history.
//
// - if the empty directory is given, initializes the pure in-memory
// trienode freezer (e.g. dev mode).
// - if non-empty directory is given, initializes the regular file-based
// trienode freezer.
func NewTrienodeFreezer(ancientDir string, verkle bool, readOnly bool) (ethdb.ResettableAncientStore, error) {
if ancientDir == "" {
return NewMemoryFreezer(readOnly, trienodeFreezerTableConfigs), nil
}
var name string
if verkle {
name = filepath.Join(ancientDir, VerkleTrienodeFreezerName)
} else {
name = filepath.Join(ancientDir, MerkleTrienodeFreezerName)
}
return newResettableFreezer(name, "eth/db/trienode", readOnly, stateHistoryTableSize, trienodeFreezerTableConfigs)
}

View file

@ -403,6 +403,10 @@ func (f *chainFreezer) AncientRange(kind string, start, count, maxBytes uint64)
return f.ancients.AncientRange(kind, start, count, maxBytes) return f.ancients.AncientRange(kind, start, count, maxBytes)
} }
func (f *chainFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
return f.ancients.AncientBytes(kind, id, offset, length)
}
func (f *chainFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { func (f *chainFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
return f.ancients.ModifyAncients(fn) return f.ancients.ModifyAncients(fn)
} }

View file

@ -100,6 +100,12 @@ func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64)
return nil, errNotSupported return nil, errNotSupported
} }
// AncientBytes retrieves the value segment of the element specified by the id
// and value offsets.
func (db *nofreezedb) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
return nil, errNotSupported
}
// Ancients returns an error as we don't have a backing chain freezer. // Ancients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancients() (uint64, error) { func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported return 0, errNotSupported

View file

@ -76,8 +76,9 @@ type Freezer struct {
// NewFreezer creates a freezer instance for maintaining immutable ordered // NewFreezer creates a freezer instance for maintaining immutable ordered
// data according to the given parameters. // data according to the given parameters.
// //
// The 'tables' argument defines the data tables. If the value of a map // The 'tables' argument defines the freezer tables and their configuration.
// entry is true, snappy compression is disabled for the table. // Each value is a freezerTableConfig specifying whether snappy compression is
// disabled (noSnappy) and whether the table is prunable (prunable).
func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]freezerTableConfig) (*Freezer, error) { func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]freezerTableConfig) (*Freezer, error) {
// Create the initial freezer object // Create the initial freezer object
var ( var (
@ -201,6 +202,15 @@ func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]
return nil, errUnknownTable return nil, errUnknownTable
} }
// AncientBytes retrieves the value segment of the element specified by the id
// and value offsets.
func (f *Freezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveBytes(id, offset, length)
}
return nil, errUnknownTable
}
// Ancients returns the length of the frozen items. // Ancients returns the length of the frozen items.
func (f *Freezer) Ancients() (uint64, error) { func (f *Freezer) Ancients() (uint64, error) {
return f.frozen.Load(), nil return f.frozen.Load(), nil

View file

@ -412,3 +412,28 @@ func (f *MemoryFreezer) Reset() error {
func (f *MemoryFreezer) AncientDatadir() (string, error) { func (f *MemoryFreezer) AncientDatadir() (string, error) {
return "", nil return "", nil
} }
// AncientBytes retrieves the value segment of the element specified by the id
// and value offsets.
func (f *MemoryFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
f.lock.RLock()
defer f.lock.RUnlock()
table := f.tables[kind]
if table == nil {
return nil, errUnknownTable
}
entries, err := table.retrieve(id, 1, 0)
if err != nil {
return nil, err
}
if len(entries) == 0 {
return nil, errOutOfBounds
}
data := entries[0]
if offset > uint64(len(data)) || offset+length > uint64(len(data)) {
return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", len(data), offset, length)
}
return data[offset : offset+length], nil
}

View file

@ -126,6 +126,15 @@ func (f *resettableFreezer) AncientRange(kind string, start, count, maxBytes uin
return f.freezer.AncientRange(kind, start, count, maxBytes) return f.freezer.AncientRange(kind, start, count, maxBytes)
} }
// AncientBytes retrieves the value segment of the element specified by the id
// and value offsets.
func (f *resettableFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
f.lock.RLock()
defer f.lock.RUnlock()
return f.freezer.AncientBytes(kind, id, offset, length)
}
// Ancients returns the length of the frozen items. // Ancients returns the length of the frozen items.
func (f *resettableFreezer) Ancients() (uint64, error) { func (f *resettableFreezer) Ancients() (uint64, error) {
f.lock.RLock() f.lock.RLock()

View file

@ -100,7 +100,7 @@ type freezerTable struct {
// should never be lower than itemOffset. // should never be lower than itemOffset.
itemHidden atomic.Uint64 itemHidden atomic.Uint64
config freezerTableConfig // if true, disables snappy compression. Note: does not work retroactively config freezerTableConfig // table configuration (compression, prunability). Note: compression flag does not apply retroactively to existing files
readonly bool readonly bool
maxFileSize uint32 // Max file size for data-files maxFileSize uint32 // Max file size for data-files
name string name string
@ -1107,6 +1107,71 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output, sizes, nil return output, sizes, nil
} }
// RetrieveBytes retrieves the value segment of the element specified by the id
// and value offsets.
func (t *freezerTable) RetrieveBytes(item, offset, length uint64) ([]byte, error) {
t.lock.RLock()
defer t.lock.RUnlock()
if t.index == nil || t.head == nil || t.metadata.file == nil {
return nil, errClosed
}
items, hidden := t.items.Load(), t.itemHidden.Load()
if items <= item || hidden > item {
return nil, errOutOfBounds
}
// Retrieves the index entries for the specified ID and its immediate successor
indices, err := t.getIndices(item, 1)
if err != nil {
return nil, err
}
index0, index1 := indices[0], indices[1]
itemStart, itemLimit, fileId := index0.bounds(index1)
itemSize := itemLimit - itemStart
dataFile, exist := t.files[fileId]
if !exist {
return nil, fmt.Errorf("missing data file %d", fileId)
}
// Perform the partial read if no-compression was enabled upon
if t.config.noSnappy {
if offset > uint64(itemSize) || offset+length > uint64(itemSize) {
return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", itemSize, offset, length)
}
itemStart += uint32(offset)
buf := make([]byte, length)
_, err = dataFile.ReadAt(buf, int64(itemStart))
if err != nil {
return nil, err
}
t.readMeter.Mark(int64(length))
return buf, nil
} else {
// If compressed, read the full item, decompress, then slice.
// Unfortunately, in this case, there is no performance gain
// by performing the partial read at all.
buf := make([]byte, itemSize)
_, err = dataFile.ReadAt(buf, int64(itemStart))
if err != nil {
return nil, err
}
t.readMeter.Mark(int64(itemSize))
data, err := snappy.Decode(nil, buf)
if err != nil {
return nil, err
}
if offset > uint64(len(data)) || offset+length > uint64(len(data)) {
return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", len(data), offset, length)
}
return data[offset : offset+length], nil
}
}
// size returns the total data size in the freezer table. // size returns the total data size in the freezer table.
func (t *freezerTable) size() (uint64, error) { func (t *freezerTable) size() (uint64, error) {
t.lock.RLock() t.lock.RLock()

View file

@ -1571,3 +1571,65 @@ func TestTailTruncationCrash(t *testing.T) {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset) t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset)
} }
} }
func TestFreezerAncientBytes(t *testing.T) {
t.Parallel()
types := []struct {
name string
config freezerTableConfig
}{
{"uncompressed", freezerTableConfig{noSnappy: true}},
{"compressed", freezerTableConfig{noSnappy: false}},
}
for _, typ := range types {
t.Run(typ.name, func(t *testing.T) {
f, err := newTable(os.TempDir(), fmt.Sprintf("ancientbytes-%s-%d", typ.name, rand.Uint64()), metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 1000, typ.config, false)
if err != nil {
t.Fatal(err)
}
defer f.Close()
for i := 0; i < 10; i++ {
data := getChunk(100, i)
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(uint64(i), data))
require.NoError(t, batch.commit())
}
for i := 0; i < 10; i++ {
full, err := f.Retrieve(uint64(i))
require.NoError(t, err)
// Full read
got, err := f.RetrieveBytes(uint64(i), 0, uint64(len(full)))
require.NoError(t, err)
if !bytes.Equal(got, full) {
t.Fatalf("full read mismatch for entry %d", i)
}
// Empty read
got, err = f.RetrieveBytes(uint64(i), 0, 0)
require.NoError(t, err)
if !bytes.Equal(got, full[:0]) {
t.Fatalf("empty read mismatch for entry %d", i)
}
// Middle slice
got, err = f.RetrieveBytes(uint64(i), 10, 50)
require.NoError(t, err)
if !bytes.Equal(got, full[10:60]) {
t.Fatalf("middle slice mismatch for entry %d", i)
}
// Single byte
got, err = f.RetrieveBytes(uint64(i), 99, 1)
require.NoError(t, err)
if !bytes.Equal(got, full[99:100]) {
t.Fatalf("single byte mismatch for entry %d", i)
}
// Out of bounds
_, err = f.RetrieveBytes(uint64(i), 100, 1)
if err == nil {
t.Fatalf("expected error for out-of-bounds read for entry %d", i)
}
}
})
}
}

View file

@ -80,6 +80,10 @@ var (
// been indexed. // been indexed.
headStateHistoryIndexKey = []byte("LastStateHistoryIndex") headStateHistoryIndexKey = []byte("LastStateHistoryIndex")
// headTrienodeHistoryIndexKey tracks the ID of the latest state history that has
// been indexed.
headTrienodeHistoryIndexKey = []byte("LastTrienodeHistoryIndex")
// txIndexTailKey tracks the oldest block whose transactions have been indexed. // txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail") txIndexTailKey = []byte("TransactionIndexTail")
@ -125,8 +129,10 @@ var (
StateHistoryIndexPrefix = []byte("m") // The global prefix of state history index data StateHistoryIndexPrefix = []byte("m") // The global prefix of state history index data
StateHistoryAccountMetadataPrefix = []byte("ma") // StateHistoryAccountMetadataPrefix + account address hash => account metadata StateHistoryAccountMetadataPrefix = []byte("ma") // StateHistoryAccountMetadataPrefix + account address hash => account metadata
StateHistoryStorageMetadataPrefix = []byte("ms") // StateHistoryStorageMetadataPrefix + account address hash + storage slot hash => slot metadata StateHistoryStorageMetadataPrefix = []byte("ms") // StateHistoryStorageMetadataPrefix + account address hash + storage slot hash => slot metadata
TrienodeHistoryMetadataPrefix = []byte("mt") // TrienodeHistoryMetadataPrefix + account address hash + trienode path => trienode metadata
StateHistoryAccountBlockPrefix = []byte("mba") // StateHistoryAccountBlockPrefix + account address hash + blockID => account block StateHistoryAccountBlockPrefix = []byte("mba") // StateHistoryAccountBlockPrefix + account address hash + blockID => account block
StateHistoryStorageBlockPrefix = []byte("mbs") // StateHistoryStorageBlockPrefix + account address hash + storage slot hash + blockID => slot block StateHistoryStorageBlockPrefix = []byte("mbs") // StateHistoryStorageBlockPrefix + account address hash + storage slot hash + blockID => slot block
TrienodeHistoryBlockPrefix = []byte("mbt") // TrienodeHistoryBlockPrefix + account address hash + trienode path + blockID => trienode block
// VerklePrefix is the database prefix for Verkle trie data, which includes: // VerklePrefix is the database prefix for Verkle trie data, which includes:
// (a) Trie nodes // (a) Trie nodes
@ -395,27 +401,34 @@ func storageHistoryIndexKey(addressHash common.Hash, storageHash common.Hash) []
return out return out
} }
// trienodeHistoryIndexKey = TrienodeHistoryMetadataPrefix + addressHash + trienode path
func trienodeHistoryIndexKey(addressHash common.Hash, path []byte) []byte {
totalLen := len(TrienodeHistoryMetadataPrefix) + common.HashLength + len(path)
out := make([]byte, totalLen)
off := 0
off += copy(out[off:], TrienodeHistoryMetadataPrefix)
off += copy(out[off:], addressHash.Bytes())
copy(out[off:], path)
return out
}
// accountHistoryIndexBlockKey = StateHistoryAccountBlockPrefix + addressHash + blockID // accountHistoryIndexBlockKey = StateHistoryAccountBlockPrefix + addressHash + blockID
func accountHistoryIndexBlockKey(addressHash common.Hash, blockID uint32) []byte { func accountHistoryIndexBlockKey(addressHash common.Hash, blockID uint32) []byte {
var buf4 [4]byte
binary.BigEndian.PutUint32(buf4[:], blockID)
totalLen := len(StateHistoryAccountBlockPrefix) + common.HashLength + 4 totalLen := len(StateHistoryAccountBlockPrefix) + common.HashLength + 4
out := make([]byte, totalLen) out := make([]byte, totalLen)
off := 0 off := 0
off += copy(out[off:], StateHistoryAccountBlockPrefix) off += copy(out[off:], StateHistoryAccountBlockPrefix)
off += copy(out[off:], addressHash.Bytes()) off += copy(out[off:], addressHash.Bytes())
copy(out[off:], buf4[:]) binary.BigEndian.PutUint32(out[off:], blockID)
return out return out
} }
// storageHistoryIndexBlockKey = StateHistoryStorageBlockPrefix + addressHash + storageHash + blockID // storageHistoryIndexBlockKey = StateHistoryStorageBlockPrefix + addressHash + storageHash + blockID
func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Hash, blockID uint32) []byte { func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Hash, blockID uint32) []byte {
var buf4 [4]byte
binary.BigEndian.PutUint32(buf4[:], blockID)
totalLen := len(StateHistoryStorageBlockPrefix) + 2*common.HashLength + 4 totalLen := len(StateHistoryStorageBlockPrefix) + 2*common.HashLength + 4
out := make([]byte, totalLen) out := make([]byte, totalLen)
@ -423,7 +436,21 @@ func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Has
off += copy(out[off:], StateHistoryStorageBlockPrefix) off += copy(out[off:], StateHistoryStorageBlockPrefix)
off += copy(out[off:], addressHash.Bytes()) off += copy(out[off:], addressHash.Bytes())
off += copy(out[off:], storageHash.Bytes()) off += copy(out[off:], storageHash.Bytes())
copy(out[off:], buf4[:]) binary.BigEndian.PutUint32(out[off:], blockID)
return out
}
// trienodeHistoryIndexBlockKey = TrienodeHistoryBlockPrefix + addressHash + trienode path + blockID
func trienodeHistoryIndexBlockKey(addressHash common.Hash, path []byte, blockID uint32) []byte {
totalLen := len(TrienodeHistoryBlockPrefix) + common.HashLength + len(path) + 4
out := make([]byte, totalLen)
off := 0
off += copy(out[off:], TrienodeHistoryBlockPrefix)
off += copy(out[off:], addressHash.Bytes())
off += copy(out[off:], path)
binary.BigEndian.PutUint32(out[off:], blockID)
return out return out
} }

View file

@ -62,6 +62,12 @@ func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]by
return t.db.AncientRange(kind, start, count, maxBytes) return t.db.AncientRange(kind, start, count, maxBytes)
} }
// AncientBytes is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
return t.db.AncientBytes(kind, id, offset, length)
}
// Ancients is a noop passthrough that just forwards the request to the underlying // Ancients is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) Ancients() (uint64, error) { func (t *table) Ancients() (uint64, error) {

View file

@ -131,7 +131,7 @@ func TestMessageCallGas(t *testing.T) {
} }
gas = ae.CodeHashGas(testAddr, false, math.MaxUint64, false) gas = ae.CodeHashGas(testAddr, false, math.MaxUint64, false)
if gas != params.WitnessChunkReadCost { if gas != params.WitnessChunkReadCost {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost)
} }
// Check warm read cost // Check warm read cost

View file

@ -35,18 +35,21 @@ import (
// //
// StateProcessor implements Processor. // StateProcessor implements Processor.
type StateProcessor struct { type StateProcessor struct {
config *params.ChainConfig // Chain configuration options chain ChainContext // Chain context interface
chain *HeaderChain // Canonical header chain
} }
// NewStateProcessor initialises a new StateProcessor. // NewStateProcessor initialises a new StateProcessor.
func NewStateProcessor(config *params.ChainConfig, chain *HeaderChain) *StateProcessor { func NewStateProcessor(chain ChainContext) *StateProcessor {
return &StateProcessor{ return &StateProcessor{
config: config, chain: chain,
chain: chain,
} }
} }
// chainConfig returns the chain configuration.
func (p *StateProcessor) chainConfig() *params.ChainConfig {
return p.chain.Config()
}
// Process processes the state changes according to the Ethereum rules by running // Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both // the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles. // the processor (coinbase) and any included uncles.
@ -56,6 +59,7 @@ func NewStateProcessor(config *params.ChainConfig, chain *HeaderChain) *StatePro
// transactions failed to execute due to insufficient gas it will return an error. // transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error) { func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error) {
var ( var (
config = p.chainConfig()
receipts types.Receipts receipts types.Receipts
usedGas = new(uint64) usedGas = new(uint64)
header = block.Header() header = block.Header()
@ -66,12 +70,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
) )
// Mutate the block and state according to any hard-fork specs // Mutate the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb) misc.ApplyDAOHardFork(statedb)
} }
var ( var (
context vm.BlockContext context vm.BlockContext
signer = types.MakeSigner(p.config, header.Number, header.Time) signer = types.MakeSigner(config, header.Number, header.Time)
) )
// Apply pre-execution system calls. // Apply pre-execution system calls.
@ -80,12 +84,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
tracingStateDB = state.NewHookedState(statedb, hooks) tracingStateDB = state.NewHookedState(statedb, hooks)
} }
context = NewEVMBlockContext(header, p.chain, nil) context = NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, tracingStateDB, p.config, cfg) evm := vm.NewEVM(context, tracingStateDB, config, cfg)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil { if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm) ProcessBeaconBlockRoot(*beaconRoot, evm)
} }
if p.config.IsPrague(block.Number(), block.Time()) || p.config.IsVerkle(block.Number(), block.Time()) { if config.IsPrague(block.Number(), block.Time()) || config.IsVerkle(block.Number(), block.Time()) {
ProcessParentBlockHash(block.ParentHash(), evm) ProcessParentBlockHash(block.ParentHash(), evm)
} }
@ -106,10 +110,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
} }
// Read requests if Prague is enabled. // Read requests if Prague is enabled.
var requests [][]byte var requests [][]byte
if p.config.IsPrague(block.Number(), block.Time()) { if config.IsPrague(block.Number(), block.Time()) {
requests = [][]byte{} requests = [][]byte{}
// EIP-6110 // EIP-6110
if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil { if err := ParseDepositLogs(&requests, allLogs, config); err != nil {
return nil, fmt.Errorf("failed to parse deposit logs: %w", err) return nil, fmt.Errorf("failed to parse deposit logs: %w", err)
} }
// EIP-7002 // EIP-7002
@ -123,7 +127,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
} }
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards) // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.engine.Finalize(p.chain, header, tracingStateDB, block.Body()) p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body())
return &ProcessResult{ return &ProcessResult{
Receipts: receipts, Receipts: receipts,

View file

@ -62,7 +62,7 @@ func ExecuteStateless(config *params.ChainConfig, vmconfig vm.Config, block *typ
headerCache: lru.NewCache[common.Hash, *types.Header](256), headerCache: lru.NewCache[common.Hash, *types.Header](256),
engine: beacon.New(ethash.NewFaker()), engine: beacon.New(ethash.NewFaker()),
} }
processor := NewStateProcessor(config, chain) processor := NewStateProcessor(chain)
validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block
// Run the stateless blocks processing and self-validate certain fields // Run the stateless blocks processing and self-validate certain fields

View file

@ -23,7 +23,6 @@ import (
"math" "math"
"math/big" "math/big"
"slices" "slices"
"sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -238,11 +237,10 @@ type LegacyPool struct {
pendingNonces *noncer // Pending state tracking virtual nonces pendingNonces *noncer // Pending state tracking virtual nonces
reserver txpool.Reserver // Address reserver to ensure exclusivity across subpools reserver txpool.Reserver // Address reserver to ensure exclusivity across subpools
pending map[common.Address]*list // All currently processable transactions pending map[common.Address]*list // All currently processable transactions
queue map[common.Address]*list // Queued but non-processable transactions queue *queue
beats map[common.Address]time.Time // Last heartbeat from each known account all *lookup // All transactions to allow lookups
all *lookup // All transactions to allow lookups priced *pricedList // All transactions sorted by price
priced *pricedList // All transactions sorted by price
reqResetCh chan *txpoolResetRequest reqResetCh chan *txpoolResetRequest
reqPromoteCh chan *accountSet reqPromoteCh chan *accountSet
@ -266,14 +264,14 @@ func New(config Config, chain BlockChain) *LegacyPool {
config = (&config).sanitize() config = (&config).sanitize()
// Create the transaction pool with its initial settings // Create the transaction pool with its initial settings
signer := types.LatestSigner(chain.Config())
pool := &LegacyPool{ pool := &LegacyPool{
config: config, config: config,
chain: chain, chain: chain,
chainconfig: chain.Config(), chainconfig: chain.Config(),
signer: types.LatestSigner(chain.Config()), signer: signer,
pending: make(map[common.Address]*list), pending: make(map[common.Address]*list),
queue: make(map[common.Address]*list), queue: newQueue(config, signer),
beats: make(map[common.Address]time.Time),
all: newLookup(), all: newLookup(),
reqResetCh: make(chan *txpoolResetRequest), reqResetCh: make(chan *txpoolResetRequest),
reqPromoteCh: make(chan *accountSet), reqPromoteCh: make(chan *accountSet),
@ -369,15 +367,8 @@ func (pool *LegacyPool) loop() {
// Handle inactive account transaction eviction // Handle inactive account transaction eviction
case <-evict.C: case <-evict.C:
pool.mu.Lock() pool.mu.Lock()
for addr := range pool.queue { for _, hash := range pool.queue.evictList() {
// Any old enough should be removed pool.removeTx(hash, true, true)
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
list := pool.queue[addr].Flatten()
for _, tx := range list {
pool.removeTx(tx.Hash(), true, true)
}
queuedEvictionMeter.Mark(int64(len(list)))
}
} }
pool.mu.Unlock() pool.mu.Unlock()
} }
@ -459,11 +450,7 @@ func (pool *LegacyPool) stats() (int, int) {
for _, list := range pool.pending { for _, list := range pool.pending {
pending += list.Len() pending += list.Len()
} }
queued := 0 return pending, pool.queue.stats()
for _, list := range pool.queue {
queued += list.Len()
}
return pending, queued
} }
// Content retrieves the data content of the transaction pool, returning all the // Content retrieves the data content of the transaction pool, returning all the
@ -476,10 +463,7 @@ func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[
for addr, list := range pool.pending { for addr, list := range pool.pending {
pending[addr] = list.Flatten() pending[addr] = list.Flatten()
} }
queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) queued := pool.queue.content()
for addr, list := range pool.queue {
queued[addr] = list.Flatten()
}
return pending, queued return pending, queued
} }
@ -493,10 +477,7 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
if list, ok := pool.pending[addr]; ok { if list, ok := pool.pending[addr]; ok {
pending = list.Flatten() pending = list.Flatten()
} }
var queued []*types.Transaction queued := pool.queue.contentFrom(addr)
if list, ok := pool.queue[addr]; ok {
queued = list.Flatten()
}
return pending, queued return pending, queued
} }
@ -644,7 +625,7 @@ func (pool *LegacyPool) validateAuth(tx *types.Transaction) error {
if pending := pool.pending[auth]; pending != nil { if pending := pool.pending[auth]; pending != nil {
count += pending.Len() count += pending.Len()
} }
if queue := pool.queue[auth]; queue != nil { if queue, ok := pool.queue.get(auth); ok {
count += queue.Len() count += queue.Len()
} }
if count > 1 { if count > 1 {
@ -691,7 +672,7 @@ func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) {
// only by this subpool until all transactions are evicted // only by this subpool until all transactions are evicted
var ( var (
_, hasPending = pool.pending[from] _, hasPending = pool.pending[from]
_, hasQueued = pool.queue[from] _, hasQueued = pool.queue.get(from)
) )
if !hasPending && !hasQueued { if !hasPending && !hasQueued {
if err := pool.reserver.Hold(from); err != nil { if err := pool.reserver.Hold(from); err != nil {
@ -790,7 +771,7 @@ func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) {
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// Successful promotion, bump the heartbeat // Successful promotion, bump the heartbeat
pool.beats[from] = time.Now() pool.queue.bump(from)
return old != nil, nil return old != nil, nil
} }
// New transaction isn't replacing a pending one, push into queue // New transaction isn't replacing a pending one, push into queue
@ -815,7 +796,7 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo
} }
// The transaction has a nonce gap with pending list, it's only considered // The transaction has a nonce gap with pending list, it's only considered
// as executable if transactions in queue can fill up the nonce gap. // as executable if transactions in queue can fill up the nonce gap.
queue, ok := pool.queue[from] queue, ok := pool.queue.get(from)
if !ok { if !ok {
return true return true
} }
@ -831,25 +812,12 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo
// //
// Note, this method assumes the pool lock is held! // Note, this method assumes the pool lock is held!
func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) { func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) {
// Try to insert the transaction into the future queue replaced, err := pool.queue.add(tx)
from, _ := types.Sender(pool.signer, tx) // already validated if err != nil {
if pool.queue[from] == nil { return false, err
pool.queue[from] = newList(false)
} }
inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if replaced != nil {
if !inserted { pool.removeTx(*replaced, true, true)
// An older transaction was better, discard this
queuedDiscardMeter.Mark(1)
return false, txpool.ErrReplaceUnderpriced
}
// Discard any previous transaction and mark this
if old != nil {
pool.all.Remove(old.Hash())
pool.priced.Removed(1)
queuedReplaceMeter.Mark(1)
} else {
// Nothing was replaced, bump the queued counter
queuedGauge.Inc(1)
} }
// If the transaction isn't in lookup set but it's expected to be there, // If the transaction isn't in lookup set but it's expected to be there,
// show the error log. // show the error log.
@ -860,11 +828,7 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAl
pool.all.Add(tx) pool.all.Add(tx)
pool.priced.Put(tx) pool.priced.Put(tx)
} }
// If we never record the heartbeat, do it right now. return replaced != nil, nil
if _, exist := pool.beats[from]; !exist {
pool.beats[from] = time.Now()
}
return old != nil, nil
} }
// promoteTx adds a transaction to the pending (processable) list of transactions // promoteTx adds a transaction to the pending (processable) list of transactions
@ -899,7 +863,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
pool.pendingNonces.set(addr, tx.Nonce()+1) pool.pendingNonces.set(addr, tx.Nonce()+1)
// Successful promotion, bump the heartbeat // Successful promotion, bump the heartbeat
pool.beats[addr] = time.Now() pool.queue.bump(addr)
return true return true
} }
@ -984,17 +948,24 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error {
// addTxsLocked attempts to queue a batch of transactions if they are valid. // addTxsLocked attempts to queue a batch of transactions if they are valid.
// The transaction pool lock must be held. // The transaction pool lock must be held.
// Returns the error for each tx, and the set of accounts that might became promotable.
func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) { func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) {
dirty := newAccountSet(pool.signer) var (
errs := make([]error, len(txs)) dirty = newAccountSet(pool.signer)
errs = make([]error, len(txs))
valid int64
)
for i, tx := range txs { for i, tx := range txs {
replaced, err := pool.add(tx) replaced, err := pool.add(tx)
errs[i] = err errs[i] = err
if err == nil && !replaced { if err == nil {
dirty.addTx(tx) if !replaced {
dirty.addTx(tx)
}
valid++
} }
} }
validTxMeter.Mark(int64(len(dirty.accounts))) validTxMeter.Mark(valid)
return errs, dirty return errs, dirty
} }
@ -1012,7 +983,7 @@ func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
return txpool.TxStatusPending return txpool.TxStatusPending
} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { } else if txList, ok := pool.queue.get(from); ok && txList.txs.items[tx.Nonce()] != nil {
return txpool.TxStatusQueued return txpool.TxStatusQueued
} }
return txpool.TxStatusUnknown return txpool.TxStatusUnknown
@ -1089,7 +1060,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo
defer func() { defer func() {
var ( var (
_, hasPending = pool.pending[addr] _, hasPending = pool.pending[addr]
_, hasQueued = pool.queue[addr] _, hasQueued = pool.queue.get(addr)
) )
if !hasPending && !hasQueued { if !hasPending && !hasQueued {
pool.reserver.Release(addr) pool.reserver.Release(addr)
@ -1121,16 +1092,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo
} }
} }
// Transaction is in the future queue // Transaction is in the future queue
if future := pool.queue[addr]; future != nil { pool.queue.remove(addr, tx)
if removed, _ := future.Remove(tx); removed {
// Reduce the queued counter
queuedGauge.Dec(1)
}
if future.Empty() {
delete(pool.queue, addr)
delete(pool.beats, addr)
}
}
return 0 return 0
} }
@ -1278,10 +1240,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
} }
} }
// Reset needs promote for all addresses // Reset needs promote for all addresses
promoteAddrs = make([]common.Address, 0, len(pool.queue)) promoteAddrs = pool.queue.addresses()
for addr := range pool.queue {
promoteAddrs = append(promoteAddrs, addr)
}
} }
// Check for pending transactions for every account that sent new ones // Check for pending transactions for every account that sent new ones
promoted := pool.promoteExecutables(promoteAddrs) promoted := pool.promoteExecutables(promoteAddrs)
@ -1435,60 +1394,29 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// future queue to the set of pending transactions. During this process, all // future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted. // invalidated transactions (low nonce, low balance) are deleted.
func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
// Track the promoted transactions to broadcast them at once
var promoted []*types.Transaction
// Iterate over all accounts and promote any executable transactions
gasLimit := pool.currentHead.Load().GasLimit gasLimit := pool.currentHead.Load().GasLimit
for _, addr := range accounts { promotable, dropped, removedAddresses := pool.queue.promoteExecutables(accounts, gasLimit, pool.currentState, pool.pendingNonces)
list := pool.queue[addr]
if list == nil {
continue // Just in case someone calls with a non existing account
}
// Drop all transactions that are deemed too old (low nonce)
forwards := list.Forward(pool.currentState.GetNonce(addr))
for _, tx := range forwards {
pool.all.Remove(tx.Hash())
}
log.Trace("Removed old queued transactions", "count", len(forwards))
// Drop all transactions that are too costly (low balance or out of gas)
drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
for _, tx := range drops {
pool.all.Remove(tx.Hash())
}
log.Trace("Removed unpayable queued transactions", "count", len(drops))
queuedNofundsMeter.Mark(int64(len(drops)))
// Gather all executable transactions and promote them // promote all promotable transactions
readies := list.Ready(pool.pendingNonces.get(addr)) promoted := make([]*types.Transaction, 0, len(promotable))
for _, tx := range readies { for _, tx := range promotable {
hash := tx.Hash() from, _ := pool.signer.Sender(tx)
if pool.promoteTx(addr, hash, tx) { if pool.promoteTx(from, tx.Hash(), tx) {
promoted = append(promoted, tx) promoted = append(promoted, tx)
}
} }
log.Trace("Promoted queued transactions", "count", len(promoted)) }
queuedGauge.Dec(int64(len(readies)))
// Drop all transactions over the allowed limit // remove all removable transactions
var caps = list.Cap(int(pool.config.AccountQueue)) for _, hash := range dropped {
for _, tx := range caps { pool.all.Remove(hash)
hash := tx.Hash() }
pool.all.Remove(hash) pool.priced.Removed(len(dropped))
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
}
queuedRateLimitMeter.Mark(int64(len(caps)))
// Mark all the items dropped as removed
pool.priced.Removed(len(forwards) + len(drops) + len(caps))
queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
// Delete the entire queue entry if it became empty. // release all accounts that have no more transactions in the pool
if list.Empty() { for _, addr := range removedAddresses {
delete(pool.queue, addr) _, hasPending := pool.pending[addr]
delete(pool.beats, addr) if !hasPending {
if _, ok := pool.pending[addr]; !ok { pool.reserver.Release(addr)
pool.reserver.Release(addr)
}
} }
} }
return promoted return promoted
@ -1578,43 +1506,18 @@ func (pool *LegacyPool) truncatePending() {
// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
func (pool *LegacyPool) truncateQueue() { func (pool *LegacyPool) truncateQueue() {
queued := uint64(0) removed, removedAddresses := pool.queue.truncate()
for _, list := range pool.queue {
queued += uint64(list.Len()) // Remove all removable transactions from the lookup and global price list
} for _, hash := range removed {
if queued <= pool.config.GlobalQueue { pool.all.Remove(hash)
return
} }
pool.priced.Removed(len(removed))
// Sort all accounts with queued transactions by heartbeat for _, addr := range removedAddresses {
addresses := make(addressesByHeartbeat, 0, len(pool.queue)) _, hasPending := pool.pending[addr]
for addr := range pool.queue { if !hasPending {
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) pool.reserver.Release(addr)
}
sort.Sort(sort.Reverse(addresses))
// Drop transactions until the total is below the limit
for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
addr := addresses[len(addresses)-1]
list := pool.queue[addr.address]
addresses = addresses[:len(addresses)-1]
// Drop all transactions if they are less than the overflow
if size := uint64(list.Len()); size <= drop {
for _, tx := range list.Flatten() {
pool.removeTx(tx.Hash(), true, true)
}
drop -= size
queuedRateLimitMeter.Mark(int64(size))
continue
}
// Otherwise drop only last few transactions
txs := list.Flatten()
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
pool.removeTx(txs[i].Hash(), true, true)
drop--
queuedRateLimitMeter.Mark(1)
} }
} }
} }
@ -1672,25 +1575,13 @@ func (pool *LegacyPool) demoteUnexecutables() {
// Delete the entire pending entry if it became empty. // Delete the entire pending entry if it became empty.
if list.Empty() { if list.Empty() {
delete(pool.pending, addr) delete(pool.pending, addr)
if _, ok := pool.queue[addr]; !ok { if _, ok := pool.queue.get(addr); !ok {
pool.reserver.Release(addr) pool.reserver.Release(addr)
} }
} }
} }
} }
// addressByHeartbeat is an account address tagged with its last activity timestamp.
type addressByHeartbeat struct {
address common.Address
heartbeat time.Time
}
type addressesByHeartbeat []addressByHeartbeat
func (a addressesByHeartbeat) Len() int { return len(a) }
func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// accountSet is simply a set of addresses to check for existence, and a signer // accountSet is simply a set of addresses to check for existence, and a signer
// capable of deriving addresses from transactions. // capable of deriving addresses from transactions.
type accountSet struct { type accountSet struct {
@ -1931,17 +1822,17 @@ func (pool *LegacyPool) Clear() {
// acquire the subpool lock until the transaction addition is completed. // acquire the subpool lock until the transaction addition is completed.
for addr := range pool.pending { for addr := range pool.pending {
if _, ok := pool.queue[addr]; !ok { if _, ok := pool.queue.get(addr); !ok {
pool.reserver.Release(addr) pool.reserver.Release(addr)
} }
} }
for addr := range pool.queue { for _, addr := range pool.queue.addresses() {
pool.reserver.Release(addr) pool.reserver.Release(addr)
} }
pool.all.Clear() pool.all.Clear()
pool.priced.Reheap() pool.priced.Reheap()
pool.pending = make(map[common.Address]*list) pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list) pool.queue = newQueue(pool.config, pool.signer)
pool.pendingNonces = newNoncer(pool.currentState) pool.pendingNonces = newNoncer(pool.currentState)
} }

View file

@ -466,8 +466,8 @@ func TestQueue(t *testing.T) {
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
t.Error("expected transaction to be in tx pool") t.Error("expected transaction to be in tx pool")
} }
if len(pool.queue) > 0 { if len(pool.queue.queued) > 0 {
t.Error("expected transaction queue to be empty. is", len(pool.queue)) t.Error("expected transaction queue to be empty. is", len(pool.queue.queued))
} }
} }
@ -492,8 +492,8 @@ func TestQueue2(t *testing.T) {
if len(pool.pending) != 1 { if len(pool.pending) != 1 {
t.Error("expected pending length to be 1, got", len(pool.pending)) t.Error("expected pending length to be 1, got", len(pool.pending))
} }
if pool.queue[from].Len() != 2 { if list, _ := pool.queue.get(from); list.Len() != 2 {
t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) t.Error("expected len(queue) == 2, got", list.Len())
} }
} }
@ -639,8 +639,8 @@ func TestMissingNonce(t *testing.T) {
if len(pool.pending) != 0 { if len(pool.pending) != 0 {
t.Error("expected 0 pending transactions, got", len(pool.pending)) t.Error("expected 0 pending transactions, got", len(pool.pending))
} }
if pool.queue[addr].Len() != 1 { if list, _ := pool.queue.get(addr); list.Len() != 1 {
t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) t.Error("expected 1 queued transaction, got", list.Len())
} }
if pool.all.Count() != 1 { if pool.all.Count() != 1 {
t.Error("expected 1 total transactions, got", pool.all.Count()) t.Error("expected 1 total transactions, got", pool.all.Count())
@ -712,8 +712,8 @@ func TestDropping(t *testing.T) {
if pool.pending[account].Len() != 3 { if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
} }
if pool.queue[account].Len() != 3 { if list, _ := pool.queue.get(account); list.Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) t.Errorf("queued transaction mismatch: have %d, want %d", list.Len(), 3)
} }
if pool.all.Count() != 6 { if pool.all.Count() != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
@ -722,8 +722,8 @@ func TestDropping(t *testing.T) {
if pool.pending[account].Len() != 3 { if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
} }
if pool.queue[account].Len() != 3 { if list, _ := pool.queue.get(account); list.Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) t.Errorf("queued transaction mismatch: have %d, want %d", list.Len(), 3)
} }
if pool.all.Count() != 6 { if pool.all.Count() != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
@ -741,13 +741,14 @@ func TestDropping(t *testing.T) {
if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1) t.Errorf("out-of-fund pending transaction present: %v", tx1)
} }
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { list, _ := pool.queue.get(account)
if _, ok := list.txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10) t.Errorf("funded queued transaction missing: %v", tx10)
} }
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok { if _, ok := list.txs.items[tx11.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10) t.Errorf("funded queued transaction missing: %v", tx10)
} }
if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { if _, ok := list.txs.items[tx12.Nonce()]; ok {
t.Errorf("out-of-fund queued transaction present: %v", tx11) t.Errorf("out-of-fund queued transaction present: %v", tx11)
} }
if pool.all.Count() != 4 { if pool.all.Count() != 4 {
@ -763,10 +764,11 @@ func TestDropping(t *testing.T) {
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
t.Errorf("over-gased pending transaction present: %v", tx1) t.Errorf("over-gased pending transaction present: %v", tx1)
} }
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { list, _ = pool.queue.get(account)
if _, ok := list.txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10) t.Errorf("funded queued transaction missing: %v", tx10)
} }
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { if _, ok := list.txs.items[tx11.Nonce()]; ok {
t.Errorf("over-gased queued transaction present: %v", tx11) t.Errorf("over-gased queued transaction present: %v", tx11)
} }
if pool.all.Count() != 2 { if pool.all.Count() != 2 {
@ -820,8 +822,8 @@ func TestPostponing(t *testing.T) {
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
} }
if len(pool.queue) != 0 { if len(pool.queue.addresses()) != 0 {
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue.addresses()), 0)
} }
if pool.all.Count() != len(txs) { if pool.all.Count() != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
@ -830,8 +832,8 @@ func TestPostponing(t *testing.T) {
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
} }
if len(pool.queue) != 0 { if len(pool.queue.addresses()) != 0 {
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue.addresses()), 0)
} }
if pool.all.Count() != len(txs) { if pool.all.Count() != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
@ -847,7 +849,8 @@ func TestPostponing(t *testing.T) {
if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0])
} }
if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { list, _ := pool.queue.get(accs[0])
if _, ok := list.txs.items[txs[0].Nonce()]; ok {
t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0])
} }
for i, tx := range txs[1:100] { for i, tx := range txs[1:100] {
@ -855,14 +858,14 @@ func TestPostponing(t *testing.T) {
if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx)
} }
if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok { if _, ok := list.txs.items[tx.Nonce()]; !ok {
t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx)
} }
} else { } else {
if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx)
} }
if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok { if _, ok := list.txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx)
} }
} }
@ -872,13 +875,14 @@ func TestPostponing(t *testing.T) {
if pool.pending[accs[1]] != nil { if pool.pending[accs[1]] != nil {
t.Errorf("invalidated account still has pending transactions") t.Errorf("invalidated account still has pending transactions")
} }
list, _ = pool.queue.get(accs[1])
for i, tx := range txs[100:] { for i, tx := range txs[100:] {
if i%2 == 1 { if i%2 == 1 {
if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { if _, ok := list.txs.items[tx.Nonce()]; !ok {
t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx)
} }
} else { } else {
if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok { if _, ok := list.txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx)
} }
} }
@ -963,13 +967,14 @@ func TestQueueAccountLimiting(t *testing.T) {
if len(pool.pending) != 0 { if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
} }
list, _ := pool.queue.get(account)
if i <= testTxPoolConfig.AccountQueue { if i <= testTxPoolConfig.AccountQueue {
if pool.queue[account].Len() != int(i) { if list.Len() != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, list.Len(), i)
} }
} else { } else {
if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) { if list.Len() != int(testTxPoolConfig.AccountQueue) {
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue) t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, list.Len(), testTxPoolConfig.AccountQueue)
} }
} }
} }
@ -1020,7 +1025,7 @@ func TestQueueGlobalLimiting(t *testing.T) {
pool.addRemotesSync(txs) pool.addRemotesSync(txs)
queued := 0 queued := 0
for addr, list := range pool.queue { for addr, list := range pool.queue.queued {
if list.Len() > int(config.AccountQueue) { if list.Len() > int(config.AccountQueue) {
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
} }
@ -1179,8 +1184,8 @@ func TestPendingLimiting(t *testing.T) {
if pool.pending[account].Len() != int(i)+1 { if pool.pending[account].Len() != int(i)+1 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1)
} }
if len(pool.queue) != 0 { if len(pool.queue.addresses()) != 0 {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, len(pool.queue.addresses()), 0)
} }
} }
if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) { if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) {

View file

@ -0,0 +1,275 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package legacypool
import (
"sort"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// queue manages nonce-gapped transactions that have been validated but are
// not yet processable.
type queue struct {
config Config
signer types.Signer
queued map[common.Address]*list // Queued but non-processable transactions
beats map[common.Address]time.Time // Last heartbeat from each known account
}
func newQueue(config Config, signer types.Signer) *queue {
return &queue{
signer: signer,
config: config,
queued: make(map[common.Address]*list),
beats: make(map[common.Address]time.Time),
}
}
// evictList returns the hashes of transactions that are old enough to be evicted.
func (q *queue) evictList() []common.Hash {
var removed []common.Hash
for addr, list := range q.queued {
if time.Since(q.beats[addr]) > q.config.Lifetime {
for _, tx := range list.Flatten() {
removed = append(removed, tx.Hash())
}
}
}
queuedEvictionMeter.Mark(int64(len(removed)))
return removed
}
func (q *queue) stats() int {
queued := 0
for _, list := range q.queued {
queued += list.Len()
}
return queued
}
func (q *queue) content() map[common.Address][]*types.Transaction {
queued := make(map[common.Address][]*types.Transaction, len(q.queued))
for addr, list := range q.queued {
queued[addr] = list.Flatten()
}
return queued
}
func (q *queue) contentFrom(addr common.Address) []*types.Transaction {
var queued []*types.Transaction
if list, ok := q.get(addr); ok {
queued = list.Flatten()
}
return queued
}
func (q *queue) get(addr common.Address) (*list, bool) {
l, ok := q.queued[addr]
return l, ok
}
func (q *queue) bump(addr common.Address) {
q.beats[addr] = time.Now()
}
func (q *queue) addresses() []common.Address {
addrs := make([]common.Address, 0, len(q.queued))
for addr := range q.queued {
addrs = append(addrs, addr)
}
return addrs
}
func (q *queue) remove(addr common.Address, tx *types.Transaction) {
if future := q.queued[addr]; future != nil {
if txOld := future.txs.Get(tx.Nonce()); txOld != nil && txOld.Hash() != tx.Hash() {
// Edge case, a different transaction
// with the same nonce is in the queued, just ignore
return
}
if removed, _ := future.Remove(tx); removed {
// Reduce the queued counter
queuedGauge.Dec(1)
}
if future.Empty() {
delete(q.queued, addr)
delete(q.beats, addr)
}
}
}
func (q *queue) add(tx *types.Transaction) (*common.Hash, error) {
// Try to insert the transaction into the future queue
from, _ := types.Sender(q.signer, tx) // already validated
if q.queued[from] == nil {
q.queued[from] = newList(false)
}
inserted, old := q.queued[from].Add(tx, q.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
queuedDiscardMeter.Mark(1)
return nil, txpool.ErrReplaceUnderpriced
}
// If we never record the heartbeat, do it right now.
if _, exist := q.beats[from]; !exist {
q.beats[from] = time.Now()
}
if old == nil {
// Nothing was replaced, bump the queued counter
queuedGauge.Inc(1)
return nil, nil
}
h := old.Hash()
// Transaction was replaced, bump the replacement counter
queuedReplaceMeter.Mark(1)
return &h, nil
}
// promoteExecutables iterates over all accounts with queued transactions, selecting
// for promotion any that are now executable. It also drops any transactions that are
// deemed too old (nonce too low) or too costly (insufficient funds or over gas limit).
//
// Returns three lists:
// - all transactions that were removed from the queue and selected for promotion;
// - all other transactions that were removed from the queue and dropped;
// - the list of addresses removed.
func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, currentState *state.StateDB, nonces *noncer) ([]*types.Transaction, []common.Hash, []common.Address) {
// Track the promotable transactions to broadcast them at once
var (
promotable []*types.Transaction
dropped []common.Hash
removedAddresses []common.Address
)
// Iterate over all accounts and promote any executable transactions
for _, addr := range accounts {
list := q.queued[addr]
if list == nil {
continue // Just in case someone calls with a non existing account
}
// Drop all transactions that are deemed too old (low nonce)
forwards := list.Forward(currentState.GetNonce(addr))
for _, tx := range forwards {
dropped = append(dropped, tx.Hash())
}
log.Trace("Removing old queued transactions", "count", len(forwards))
// Drop all transactions that are too costly (low balance or out of gas)
drops, _ := list.Filter(currentState.GetBalance(addr), gasLimit)
for _, tx := range drops {
dropped = append(dropped, tx.Hash())
}
log.Trace("Removing unpayable queued transactions", "count", len(drops))
queuedNofundsMeter.Mark(int64(len(drops)))
// Gather all executable transactions and promote them
readies := list.Ready(nonces.get(addr))
promotable = append(promotable, readies...)
log.Trace("Promoting queued transactions", "count", len(promotable))
queuedGauge.Dec(int64(len(readies)))
// Drop all transactions over the allowed limit
var caps = list.Cap(int(q.config.AccountQueue))
for _, tx := range caps {
hash := tx.Hash()
dropped = append(dropped, hash)
log.Trace("Removing cap-exceeding queued transaction", "hash", hash)
}
queuedRateLimitMeter.Mark(int64(len(caps)))
// Delete the entire queue entry if it became empty.
if list.Empty() {
delete(q.queued, addr)
delete(q.beats, addr)
removedAddresses = append(removedAddresses, addr)
}
}
queuedGauge.Dec(int64(len(dropped)))
return promotable, dropped, removedAddresses
}
// truncate drops the oldest transactions from the queue until the total
// number is below the configured limit. Returns the hashes of all dropped
// transactions and the addresses of accounts that became empty due to
// the truncation.
func (q *queue) truncate() ([]common.Hash, []common.Address) {
queued := uint64(0)
for _, list := range q.queued {
queued += uint64(list.Len())
}
if queued <= q.config.GlobalQueue {
return nil, nil
}
// Sort all accounts with queued transactions by heartbeat
addresses := make(addressesByHeartbeat, 0, len(q.queued))
for addr := range q.queued {
addresses = append(addresses, addressByHeartbeat{addr, q.beats[addr]})
}
sort.Sort(sort.Reverse(addresses))
// Drop transactions until the total is below the limit
var (
removed = make([]common.Hash, 0)
removedAddresses = make([]common.Address, 0)
)
for drop := queued - q.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
addr := addresses[len(addresses)-1]
list := q.queued[addr.address]
addresses = addresses[:len(addresses)-1]
// Drop all transactions if they are less than the overflow
if size := uint64(list.Len()); size <= drop {
for _, tx := range list.Flatten() {
q.remove(addr.address, tx)
removed = append(removed, tx.Hash())
}
drop -= size
queuedRateLimitMeter.Mark(int64(size))
removedAddresses = append(removedAddresses, addr.address)
continue
}
// Otherwise drop only last few transactions
txs := list.Flatten()
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
q.remove(addr.address, txs[i])
removed = append(removed, txs[i].Hash())
drop--
queuedRateLimitMeter.Mark(1)
}
}
// No need to clear empty accounts, remove already does that
return removed, removedAddresses
}
// addressByHeartbeat is an account address tagged with its last activity timestamp.
type addressByHeartbeat struct {
address common.Address
heartbeat time.Time
}
type addressesByHeartbeat []addressByHeartbeat
func (a addressesByHeartbeat) Len() int { return len(a) }
func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }

View file

@ -65,7 +65,6 @@ type BlockChain interface {
type TxPool struct { type TxPool struct {
subpools []SubPool // List of subpools for specialized transaction handling subpools []SubPool // List of subpools for specialized transaction handling
chain BlockChain chain BlockChain
signer types.Signer
stateLock sync.RWMutex // The lock for protecting state instance stateLock sync.RWMutex // The lock for protecting state instance
state *state.StateDB // Current state at the blockchain head state *state.StateDB // Current state at the blockchain head
@ -98,7 +97,6 @@ func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
pool := &TxPool{ pool := &TxPool{
subpools: subpools, subpools: subpools,
chain: chain, chain: chain,
signer: types.LatestSigner(chain.Config()),
state: statedb, state: statedb,
quit: make(chan chan error), quit: make(chan chan error),
term: make(chan struct{}), term: make(chan struct{}),

View file

@ -240,7 +240,7 @@ type extblock struct {
// //
// The receipt's bloom must already calculated for the block's bloom to be // The receipt's bloom must already calculated for the block's bloom to be
// correctly calculated. // correctly calculated.
func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher) *Block { func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher) *Block {
if body == nil { if body == nil {
body = &Body{} body = &Body{}
} }

View file

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
) )
@ -125,9 +126,7 @@ func MergeBloom(receipts Receipts) Bloom {
for _, receipt := range receipts { for _, receipt := range receipts {
if len(receipt.Logs) != 0 { if len(receipt.Logs) != 0 {
bl := receipt.Bloom.Bytes() bl := receipt.Bloom.Bytes()
for i := range bin { bitutil.ORBytes(bin[:], bin[:], bl)
bin[i] |= bl[i]
}
} }
} }
return bin return bin

View file

@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
// hasherPool holds LegacyKeccak256 hashers for rlpHash. // hasherPool holds LegacyKeccak256 buffer for rlpHash.
var hasherPool = sync.Pool{ var hasherPool = sync.Pool{
New: func() interface{} { return crypto.NewKeccakState() }, New: func() interface{} { return crypto.NewKeccakState() },
} }
@ -75,11 +75,17 @@ func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) {
return h return h
} }
// TrieHasher is the tool used to calculate the hash of derivable list. // ListHasher defines the interface for computing the hash of a derivable list.
// This is internal, do not use. type ListHasher interface {
type TrieHasher interface { // Reset clears the internal state of the hasher, preparing it for reuse.
Reset() Reset()
Update([]byte, []byte) error
// Update inserts the given key-value pair into the hasher.
// The implementation must copy the provided slices, allowing the caller
// to safely modify them after the call returns.
Update(key []byte, value []byte) error
// Hash computes and returns the final hash of all inserted key-value pairs.
Hash() common.Hash Hash() common.Hash
} }
@ -91,19 +97,20 @@ type DerivableList interface {
EncodeIndex(int, *bytes.Buffer) EncodeIndex(int, *bytes.Buffer)
} }
// encodeForDerive encodes the element in the list at the position i into the buffer.
func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
buf.Reset() buf.Reset()
list.EncodeIndex(i, buf) list.EncodeIndex(i, buf)
// It's really unfortunate that we need to perform this copy. return buf.Bytes()
// StackTrie holds onto the values until Hash is called, so the values
// written to it must not alias.
return common.CopyBytes(buf.Bytes())
} }
// DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header. // DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header.
func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { func DeriveSha(list DerivableList, hasher ListHasher) common.Hash {
hasher.Reset() hasher.Reset()
// Allocate a buffer for value encoding. As the hasher is claimed that all
// supplied key value pairs will be copied by hasher and safe to reuse the
// encoding buffer.
valueBuf := encodeBufferPool.Get().(*bytes.Buffer) valueBuf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(valueBuf) defer encodeBufferPool.Put(valueBuf)

View file

@ -26,12 +26,10 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
) )
func TestDeriveSha(t *testing.T) { func TestDeriveSha(t *testing.T) {
@ -40,7 +38,7 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for len(txs) < 1000 { for len(txs) < 1000 {
exp := types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(txs, trie.NewListHasher())
got := types.DeriveSha(txs, trie.NewStackTrie(nil)) got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@ -76,30 +74,45 @@ func TestEIP2718DeriveSha(t *testing.T) {
} }
} }
// goos: darwin
// goarch: arm64
// pkg: github.com/ethereum/go-ethereum/core/types
// cpu: Apple M1 Pro
// BenchmarkDeriveSha200
// BenchmarkDeriveSha200/std_trie
// BenchmarkDeriveSha200/std_trie-8 6754 174074 ns/op 80054 B/op 1926 allocs/op
// BenchmarkDeriveSha200/stack_trie
// BenchmarkDeriveSha200/stack_trie-8 7296 162675 ns/op 745 B/op 19 allocs/op
func BenchmarkDeriveSha200(b *testing.B) { func BenchmarkDeriveSha200(b *testing.B) {
txs, err := genTxs(200) txs, err := genTxs(200)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
var exp common.Hash want := types.DeriveSha(txs, trie.NewListHasher())
var got common.Hash
b.Run("std_trie", func(b *testing.B) { b.Run("std_trie", func(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
var have common.Hash
for b.Loop() { for b.Loop() {
exp = types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) have = types.DeriveSha(txs, trie.NewListHasher())
}
if have != want {
b.Errorf("have %x want %x", have, want)
} }
}) })
st := trie.NewStackTrie(nil)
b.Run("stack_trie", func(b *testing.B) { b.Run("stack_trie", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
var have common.Hash
for b.Loop() { for b.Loop() {
got = types.DeriveSha(txs, trie.NewStackTrie(nil)) st.Reset()
have = types.DeriveSha(txs, st)
}
if have != want {
b.Errorf("have %x want %x", have, want)
} }
}) })
if got != exp {
b.Errorf("got %x exp %x", got, exp)
}
} }
func TestFuzzDeriveSha(t *testing.T) { func TestFuzzDeriveSha(t *testing.T) {
@ -107,7 +120,7 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int() rndSeed := mrand.Int()
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
seed := rndSeed + i seed := rndSeed + i
exp := types.DeriveSha(newDummy(i), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(newDummy(i), trie.NewListHasher())
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
printList(t, newDummy(seed)) printList(t, newDummy(seed))
@ -135,7 +148,7 @@ func TestDerivableList(t *testing.T) {
}, },
} }
for i, tc := range tcs[1:] { for i, tc := range tcs[1:] {
exp := types.DeriveSha(flatList(tc), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) exp := types.DeriveSha(flatList(tc), trie.NewListHasher())
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) { if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp) t.Fatalf("case %d: got %x exp %x", i, got, exp)

View file

@ -312,6 +312,18 @@ func (d *dummyChain) Config() *params.ChainConfig {
return nil return nil
} }
func (d *dummyChain) CurrentHeader() *types.Header {
return nil
}
func (d *dummyChain) GetHeaderByNumber(n uint64) *types.Header {
return d.GetHeader(common.Hash{}, n)
}
func (d *dummyChain) GetHeaderByHash(h common.Hash) *types.Header {
return nil
}
// TestBlockhash tests the blockhash operation. It's a bit special, since it internally // TestBlockhash tests the blockhash operation. It's a bit special, since it internally
// requires access to a chain reader. // requires access to a chain reader.
func TestBlockhash(t *testing.T) { func TestBlockhash(t *testing.T) {

View file

@ -47,7 +47,6 @@ import (
// Register adds the engine API to the full node. // Register adds the engine API to the full node.
func Register(stack *node.Node, backend *eth.Ethereum) error { func Register(stack *node.Node, backend *eth.Ethereum) error {
log.Warn("Engine API enabled", "protocol", "eth")
stack.RegisterAPIs([]rpc.API{ stack.RegisterAPIs([]rpc.API{
{ {
Namespace: "engine", Namespace: "engine",
@ -492,6 +491,12 @@ func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*eng
// Client software MAY return an array of all null entries if syncing or otherwise // Client software MAY return an array of all null entries if syncing or otherwise
// unable to serve blob pool data. // unable to serve blob pool data.
func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProofV1, error) { func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProofV1, error) {
// Reject the request if Osaka has been activated.
// follow https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#cancun-api
head := api.eth.BlockChain().CurrentHeader()
if !api.checkFork(head.Time, forks.Cancun, forks.Prague) {
return nil, unsupportedForkErr("engine_getBlobsV1 is only available at Cancun/Prague fork")
}
if len(hashes) > 128 { if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
} }
@ -532,9 +537,6 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
// - if the request is [A_versioned_hash_for_blob_with_blob_proof], the response // - if the request is [A_versioned_hash_for_blob_with_blob_proof], the response
// MUST be null as well. // MUST be null as well.
// //
// Note, geth internally make the conversion from old version to new one, so the
// data will be returned normally.
//
// Client software MUST support request sizes of at least 128 blob versioned // Client software MUST support request sizes of at least 128 blob versioned
// hashes. The client MUST return -38004: Too large request error if the number // hashes. The client MUST return -38004: Too large request error if the number
// of requested blobs is too large. // of requested blobs is too large.
@ -542,6 +544,10 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
// Client software MUST return null if syncing or otherwise unable to serve // Client software MUST return null if syncing or otherwise unable to serve
// blob pool data. // blob pool data.
func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) { func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) {
head := api.eth.BlockChain().CurrentHeader()
if api.config().LatestFork(head.Time) < forks.Osaka {
return nil, unsupportedForkErr("engine_getBlobsV2 is not available before Osaka fork")
}
if len(hashes) > 128 { if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
} }

View file

@ -1991,6 +1991,31 @@ func TestGetBlobsV1(t *testing.T) {
} }
} }
func TestGetBlobsV1AfterOsakaFork(t *testing.T) {
genesis := &core.Genesis{
Config: params.MergedTestChainConfig,
Alloc: types.GenesisAlloc{testAddr: {Balance: testBalance}},
Difficulty: common.Big0,
Timestamp: 1, // Timestamp > 0 to ensure Osaka fork is active
}
n, ethServ := startEthService(t, genesis, nil)
defer n.Close()
var engineErr *engine.EngineAPIError
api := newConsensusAPIWithoutHeartbeat(ethServ)
_, err := api.GetBlobsV1([]common.Hash{testrand.Hash()})
if !errors.As(err, &engineErr) {
t.Fatalf("Unexpected error: %T", err)
} else {
if engineErr.ErrorCode() != -38005 {
t.Fatalf("Expected error code -38005, got %d", engineErr.ErrorCode())
}
if engineErr.Error() != "Unsupported fork" {
t.Fatalf("Expected error message 'Unsupported fork', got '%s'", engineErr.Error())
}
}
}
func TestGetBlobsV2(t *testing.T) { func TestGetBlobsV2(t *testing.T) {
n, api := newGetBlobEnv(t, 1) n, api := newGetBlobEnv(t, 1)
defer n.Close() defer n.Close()

View file

@ -100,7 +100,7 @@ type SimulatedBeacon struct {
func payloadVersion(config *params.ChainConfig, time uint64) engine.PayloadVersion { func payloadVersion(config *params.ChainConfig, time uint64) engine.PayloadVersion {
switch config.LatestFork(time) { switch config.LatestFork(time) {
case forks.Prague, forks.Cancun: case forks.BPO5, forks.BPO4, forks.BPO3, forks.BPO2, forks.BPO1, forks.Osaka, forks.Prague, forks.Cancun:
return engine.PayloadV3 return engine.PayloadV3
case forks.Paris, forks.Shanghai: case forks.Paris, forks.Shanghai:
return engine.PayloadV2 return engine.PayloadV2

View file

@ -43,6 +43,7 @@ var (
errPendingLogsUnsupported = errors.New("pending logs are not supported") errPendingLogsUnsupported = errors.New("pending logs are not supported")
errExceedMaxTopics = errors.New("exceed max topics") errExceedMaxTopics = errors.New("exceed max topics")
errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position") errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position")
errExceedMaxTxHashes = errors.New("exceed max number of transaction hashes allowed per transactionReceipts subscription")
) )
const ( const (
@ -50,6 +51,8 @@ const (
maxTopics = 4 maxTopics = 4
// The maximum number of allowed topics within a topic criteria // The maximum number of allowed topics within a topic criteria
maxSubTopics = 1000 maxSubTopics = 1000
// The maximum number of transaction hash criteria allowed in a single subscription
maxTxHashes = 200
) )
// filter is a helper struct that holds meta information over the filter type // filter is a helper struct that holds meta information over the filter type
@ -140,6 +143,7 @@ func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID {
api.filtersMu.Unlock() api.filtersMu.Unlock()
go func() { go func() {
defer pendingTxSub.Unsubscribe()
for { for {
select { select {
case pTx := <-pendingTxs: case pTx := <-pendingTxs:
@ -214,6 +218,7 @@ func (api *FilterAPI) NewBlockFilter() rpc.ID {
api.filtersMu.Unlock() api.filtersMu.Unlock()
go func() { go func() {
defer headerSub.Unsubscribe()
for { for {
select { select {
case h := <-headers: case h := <-headers:
@ -295,6 +300,83 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc
return rpcSub, nil return rpcSub, nil
} }
// TransactionReceiptsQuery defines criteria for transaction receipts subscription.
// Same as ethereum.TransactionReceiptsQuery but with UnmarshalJSON() method.
type TransactionReceiptsQuery ethereum.TransactionReceiptsQuery
// UnmarshalJSON sets *args fields with given data.
func (args *TransactionReceiptsQuery) UnmarshalJSON(data []byte) error {
type input struct {
TransactionHashes []common.Hash `json:"transactionHashes"`
}
var raw input
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
args.TransactionHashes = raw.TransactionHashes
return nil
}
// TransactionReceipts creates a subscription that fires transaction receipts when transactions are included in blocks.
func (api *FilterAPI) TransactionReceipts(ctx context.Context, filter *TransactionReceiptsQuery) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
// Validate transaction hashes limit
if filter != nil && len(filter.TransactionHashes) > maxTxHashes {
return nil, errExceedMaxTxHashes
}
var (
rpcSub = notifier.CreateSubscription()
matchedReceipts = make(chan []*ReceiptWithTx)
txHashes []common.Hash
)
if filter != nil {
txHashes = filter.TransactionHashes
}
receiptsSub := api.events.SubscribeTransactionReceipts(txHashes, matchedReceipts)
go func() {
defer receiptsSub.Unsubscribe()
signer := types.LatestSigner(api.sys.backend.ChainConfig())
for {
select {
case receiptsWithTxs := <-matchedReceipts:
if len(receiptsWithTxs) > 0 {
// Convert to the same format as eth_getTransactionReceipt
marshaledReceipts := make([]map[string]interface{}, len(receiptsWithTxs))
for i, receiptWithTx := range receiptsWithTxs {
marshaledReceipts[i] = ethapi.MarshalReceipt(
receiptWithTx.Receipt,
receiptWithTx.Receipt.BlockHash,
receiptWithTx.Receipt.BlockNumber.Uint64(),
signer,
receiptWithTx.Transaction,
int(receiptWithTx.Receipt.TransactionIndex),
)
}
// Send a batch of tx receipts in one notification
notifier.Notify(rpcSub.ID, marshaledReceipts)
}
case <-rpcSub.Err():
return
}
}
}()
return rpcSub, nil
}
// FilterCriteria represents a request to create a new filter. // FilterCriteria represents a request to create a new filter.
// Same as ethereum.FilterQuery but with UnmarshalJSON() method. // Same as ethereum.FilterQuery but with UnmarshalJSON() method.
type FilterCriteria ethereum.FilterQuery type FilterCriteria ethereum.FilterQuery
@ -322,6 +404,7 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
api.filtersMu.Unlock() api.filtersMu.Unlock()
go func() { go func() {
defer logsSub.Unsubscribe()
for { for {
select { select {
case l := <-logs: case l := <-logs:

View file

@ -25,6 +25,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/filtermaps"
"github.com/ethereum/go-ethereum/core/history" "github.com/ethereum/go-ethereum/core/history"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -551,3 +552,70 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo
} }
return true return true
} }
// ReceiptWithTx contains a receipt and its corresponding transaction
type ReceiptWithTx struct {
Receipt *types.Receipt
Transaction *types.Transaction
}
// filterReceipts returns the receipts matching the given criteria
// In addition to returning receipts, it also returns the corresponding transactions.
// This is because receipts only contain low-level data, while user-facing data
// may require additional information from the Transaction.
func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx {
var ret []*ReceiptWithTx
receipts := ev.Receipts
txs := ev.Transactions
if len(receipts) != len(txs) {
log.Warn("Receipts and transactions length mismatch", "receipts", len(receipts), "transactions", len(txs))
return ret
}
if len(txHashes) == 0 {
// No filter, send all receipts with their transactions.
ret = make([]*ReceiptWithTx, len(receipts))
for i, receipt := range receipts {
ret[i] = &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],
}
}
} else if len(txHashes) == 1 {
// Filter by single transaction hash.
// This is a common case, so we distinguish it from filtering by multiple tx hashes and made a small optimization.
for i, receipt := range receipts {
if receipt.TxHash == txHashes[0] {
ret = append(ret, &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],
})
break
}
}
} else {
// Filter by multiple transaction hashes.
txHashMap := make(map[common.Hash]bool, len(txHashes))
for _, hash := range txHashes {
txHashMap[hash] = true
}
for i, receipt := range receipts {
if txHashMap[receipt.TxHash] {
ret = append(ret, &ReceiptWithTx{
Receipt: receipt,
Transaction: txs[i],
})
// Early exit if all receipts are found
if len(ret) == len(txHashes) {
break
}
}
}
}
return ret
}

View file

@ -158,6 +158,8 @@ const (
PendingTransactionsSubscription PendingTransactionsSubscription
// BlocksSubscription queries hashes for blocks that are imported // BlocksSubscription queries hashes for blocks that are imported
BlocksSubscription BlocksSubscription
// TransactionReceiptsSubscription queries for transaction receipts when transactions are included in blocks
TransactionReceiptsSubscription
// LastIndexSubscription keeps track of the last index // LastIndexSubscription keeps track of the last index
LastIndexSubscription LastIndexSubscription
) )
@ -182,6 +184,8 @@ type subscription struct {
logs chan []*types.Log logs chan []*types.Log
txs chan []*types.Transaction txs chan []*types.Transaction
headers chan *types.Header headers chan *types.Header
receipts chan []*ReceiptWithTx
txHashes []common.Hash // contains transaction hashes for transactionReceipts subscription filtering
installed chan struct{} // closed when the filter is installed installed chan struct{} // closed when the filter is installed
err chan error // closed when the filter is uninstalled err chan error // closed when the filter is uninstalled
} }
@ -268,6 +272,7 @@ func (sub *Subscription) Unsubscribe() {
case <-sub.f.logs: case <-sub.f.logs:
case <-sub.f.txs: case <-sub.f.txs:
case <-sub.f.headers: case <-sub.f.headers:
case <-sub.f.receipts:
} }
} }
@ -353,6 +358,7 @@ func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*typ
logs: logs, logs: logs,
txs: make(chan []*types.Transaction), txs: make(chan []*types.Transaction),
headers: make(chan *types.Header), headers: make(chan *types.Header),
receipts: make(chan []*ReceiptWithTx),
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
} }
@ -369,6 +375,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
logs: make(chan []*types.Log), logs: make(chan []*types.Log),
txs: make(chan []*types.Transaction), txs: make(chan []*types.Transaction),
headers: headers, headers: headers,
receipts: make(chan []*ReceiptWithTx),
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
} }
@ -385,6 +392,26 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc
logs: make(chan []*types.Log), logs: make(chan []*types.Log),
txs: txs, txs: txs,
headers: make(chan *types.Header), headers: make(chan *types.Header),
receipts: make(chan []*ReceiptWithTx),
installed: make(chan struct{}),
err: make(chan error),
}
return es.subscribe(sub)
}
// SubscribeTransactionReceipts creates a subscription that writes transaction receipts for
// transactions when they are included in blocks. If txHashes is provided, only receipts
// for those specific transaction hashes will be delivered.
func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: TransactionReceiptsSubscription,
created: time.Now(),
logs: make(chan []*types.Log),
txs: make(chan []*types.Transaction),
headers: make(chan *types.Header),
receipts: receipts,
txHashes: txHashes,
installed: make(chan struct{}), installed: make(chan struct{}),
err: make(chan error), err: make(chan error),
} }
@ -415,6 +442,14 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent)
for _, f := range filters[BlocksSubscription] { for _, f := range filters[BlocksSubscription] {
f.headers <- ev.Header f.headers <- ev.Header
} }
// Handle transaction receipts subscriptions when a new block is added
for _, f := range filters[TransactionReceiptsSubscription] {
matchedReceipts := filterReceipts(f.txHashes, ev)
if len(matchedReceipts) > 0 {
f.receipts <- matchedReceipts
}
}
} }
// eventLoop (un)installs filters and processes mux events. // eventLoop (un)installs filters and processes mux events.

View file

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/filtermaps"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
@ -781,3 +782,143 @@ func TestPendingTxFilterDeadlock(t *testing.T) {
} }
} }
} }
// TestTransactionReceiptsSubscription tests the transaction receipts subscription functionality
func TestTransactionReceiptsSubscription(t *testing.T) {
t.Parallel()
const txNum = 5
// Setup test environment
var (
db = rawdb.NewMemoryDatabase()
backend, sys = newTestFilterSystem(db, Config{})
api = NewFilterAPI(sys)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
signer = types.NewLondonSigner(big.NewInt(1))
genesis = &core.Genesis{
Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000000000000000)}}, // 1 ETH
Config: params.TestChainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
}
_, chain, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 1, func(i int, gen *core.BlockGen) {
// Add transactions to the block
for j := 0; j < txNum; j++ {
toAddr := common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268")
tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
Nonce: uint64(j),
GasPrice: gen.BaseFee(),
Gas: 21000,
To: &toAddr,
Value: big.NewInt(1000),
Data: nil,
}), signer, key1)
gen.AddTx(tx)
}
})
)
// Insert the blocks into the chain
blockchain, err := core.NewBlockChain(db, genesis, ethash.NewFaker(), nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
if n, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
// Prepare test data
receipts := blockchain.GetReceiptsByHash(chain[0].Hash())
if receipts == nil {
t.Fatalf("failed to get receipts")
}
chainEvent := core.ChainEvent{
Header: chain[0].Header(),
Receipts: receipts,
Transactions: chain[0].Transactions(),
}
txHashes := make([]common.Hash, txNum)
for i := 0; i < txNum; i++ {
txHashes[i] = chain[0].Transactions()[i].Hash()
}
testCases := []struct {
name string
filterTxHashes []common.Hash
expectedReceiptTxHashes []common.Hash
expectError bool
}{
{
name: "no filter - should return all receipts",
filterTxHashes: nil,
expectedReceiptTxHashes: txHashes,
expectError: false,
},
{
name: "single tx hash filter",
filterTxHashes: []common.Hash{txHashes[0]},
expectedReceiptTxHashes: []common.Hash{txHashes[0]},
expectError: false,
},
{
name: "multiple tx hashes filter",
filterTxHashes: []common.Hash{txHashes[0], txHashes[1], txHashes[2]},
expectedReceiptTxHashes: []common.Hash{txHashes[0], txHashes[1], txHashes[2]},
expectError: false,
},
}
// Run test cases
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
receiptsChan := make(chan []*ReceiptWithTx)
sub := api.events.SubscribeTransactionReceipts(tc.filterTxHashes, receiptsChan)
// Send chain event
backend.chainFeed.Send(chainEvent)
// Wait for receipts
timeout := time.After(1 * time.Second)
var receivedReceipts []*types.Receipt
for {
select {
case receiptsWithTx := <-receiptsChan:
for _, receiptWithTx := range receiptsWithTx {
receivedReceipts = append(receivedReceipts, receiptWithTx.Receipt)
}
case <-timeout:
t.Fatalf("timeout waiting for receipts")
}
if len(receivedReceipts) >= len(tc.expectedReceiptTxHashes) {
break
}
}
// Verify receipt count
if len(receivedReceipts) != len(tc.expectedReceiptTxHashes) {
t.Errorf("Expected %d receipts, got %d", len(tc.expectedReceiptTxHashes), len(receivedReceipts))
}
// Verify specific transaction hashes are present
if tc.expectedReceiptTxHashes != nil {
receivedHashes := make(map[common.Hash]bool)
for _, receipt := range receivedReceipts {
receivedHashes[receipt.TxHash] = true
}
for _, expectedHash := range tc.expectedReceiptTxHashes {
if !receivedHashes[expectedHash] {
t.Errorf("Expected receipt for tx %x not found", expectedHash)
}
}
}
// Cleanup
sub.Unsubscribe()
<-sub.Err()
})
}
}

View file

@ -181,8 +181,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
} else { } else {
head := h.chain.CurrentBlock() head := h.chain.CurrentBlock()
if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
// Print warning log if database is not empty to run snap sync. log.Info("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
} else { } else {
// If snap sync was requested and our database is empty, grant it // If snap sync was requested and our database is empty, grant it
h.snapSync.Store(true) h.snapSync.Store(true)

View file

@ -494,12 +494,19 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
if err := msg.Decode(&txs); err != nil { if err := msg.Decode(&txs); err != nil {
return err return err
} }
// Duplicate transactions are not allowed
seen := make(map[common.Hash]struct{})
for i, tx := range txs { for i, tx := range txs {
// Validate and mark the remote transaction // Validate and mark the remote transaction
if tx == nil { if tx == nil {
return fmt.Errorf("Transactions: transaction %d is nil", i) return fmt.Errorf("Transactions: transaction %d is nil", i)
} }
peer.markTransaction(tx.Hash()) hash := tx.Hash()
if _, exists := seen[hash]; exists {
return fmt.Errorf("Transactions: multiple copies of the same hash %v", hash)
}
seen[hash] = struct{}{}
peer.markTransaction(hash)
} }
return backend.Handle(peer, &txs) return backend.Handle(peer, &txs)
} }
@ -514,12 +521,19 @@ func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
if err := msg.Decode(&txs); err != nil { if err := msg.Decode(&txs); err != nil {
return err return err
} }
// Duplicate transactions are not allowed
seen := make(map[common.Hash]struct{})
for i, tx := range txs.PooledTransactionsResponse { for i, tx := range txs.PooledTransactionsResponse {
// Validate and mark the remote transaction // Validate and mark the remote transaction
if tx == nil { if tx == nil {
return fmt.Errorf("PooledTransactions: transaction %d is nil", i) return fmt.Errorf("PooledTransactions: transaction %d is nil", i)
} }
peer.markTransaction(tx.Hash()) hash := tx.Hash()
if _, exists := seen[hash]; exists {
return fmt.Errorf("PooledTransactions: multiple copies of the same hash %v", hash)
}
seen[hash] = struct{}{}
peer.markTransaction(hash)
} }
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)

View file

@ -22,7 +22,6 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
@ -36,7 +35,7 @@ const (
// Handshake executes the eth protocol handshake, negotiating version number, // Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks. // network IDs, difficulties, head and genesis blocks.
func (p *Peer) Handshake(networkID uint64, chain *core.BlockChain, rangeMsg BlockRangeUpdatePacket) error { func (p *Peer) Handshake(networkID uint64, chain forkid.Blockchain, rangeMsg BlockRangeUpdatePacket) error {
switch p.version { switch p.version {
case ETH69: case ETH69:
return p.handshake69(networkID, chain, rangeMsg) return p.handshake69(networkID, chain, rangeMsg)
@ -47,10 +46,10 @@ func (p *Peer) Handshake(networkID uint64, chain *core.BlockChain, rangeMsg Bloc
} }
} }
func (p *Peer) handshake68(networkID uint64, chain *core.BlockChain) error { func (p *Peer) handshake68(networkID uint64, chain forkid.Blockchain) error {
var ( var (
genesis = chain.Genesis() genesis = chain.Genesis()
latest = chain.CurrentBlock() latest = chain.CurrentHeader()
forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time) forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time)
forkFilter = forkid.NewFilter(chain) forkFilter = forkid.NewFilter(chain)
) )
@ -92,10 +91,10 @@ func (p *Peer) readStatus68(networkID uint64, status *StatusPacket68, genesis co
return nil return nil
} }
func (p *Peer) handshake69(networkID uint64, chain *core.BlockChain, rangeMsg BlockRangeUpdatePacket) error { func (p *Peer) handshake69(networkID uint64, chain forkid.Blockchain, rangeMsg BlockRangeUpdatePacket) error {
var ( var (
genesis = chain.Genesis() genesis = chain.Genesis()
latest = chain.CurrentBlock() latest = chain.CurrentHeader()
forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time) forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time)
forkFilter = forkid.NewFilter(chain) forkFilter = forkid.NewFilter(chain)
) )

View file

@ -74,8 +74,11 @@ func (r *hashRange) End() common.Hash {
// incHash returns the next hash, in lexicographical order (a.k.a plus one) // incHash returns the next hash, in lexicographical order (a.k.a plus one)
func incHash(h common.Hash) common.Hash { func incHash(h common.Hash) common.Hash {
var a uint256.Int for i := len(h) - 1; i >= 0; i-- {
a.SetBytes32(h[:]) h[i]++
a.AddUint64(&a, 1) if h[i] != 0 {
return common.Hash(a.Bytes32()) break
}
}
return h
} }

View file

@ -80,6 +80,7 @@ type StateReleaseFunc func()
type Backend interface { type Backend interface {
HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
CurrentHeader() *types.Header
BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error)
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64)

View file

@ -142,6 +142,10 @@ func (b *testBackend) ChainDb() ethdb.Database {
return b.chaindb return b.chaindb
} }
func (b *testBackend) CurrentHeader() *types.Header {
return b.chain.CurrentHeader()
}
// teardown releases the associated resources. // teardown releases the associated resources.
func (b *testBackend) teardown() { func (b *testBackend) teardown() {
b.chain.Stop() b.chain.Stop()

View file

@ -0,0 +1,86 @@
package native
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import (
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
func init() {
tracers.DefaultDirectory.Register("keccak256PreimageTracer", newKeccak256PreimageTracer, false)
}
// keccak256PreimageTracer is a native tracer that collects preimages of all KECCAK256 operations.
// This tracer is particularly useful for analyzing smart contract execution patterns,
// especially when debugging storage access in Solidity mappings and dynamic arrays.
type keccak256PreimageTracer struct {
computedHashes map[common.Hash]hexutil.Bytes
}
// newKeccak256PreimageTracer returns a new keccak256PreimageTracer instance.
func newKeccak256PreimageTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t := &keccak256PreimageTracer{
computedHashes: make(map[common.Hash]hexutil.Bytes),
}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnOpcode: t.OnOpcode,
},
GetResult: t.GetResult,
}, nil
}
func (t *keccak256PreimageTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
if op == byte(vm.KECCAK256) {
sd := scope.StackData()
// it turns out that sometimes the stack is empty, evm will fail in this case, but we should not panic here
if len(sd) < 2 {
return
}
dataOffset := internal.StackBack(sd, 0).Uint64()
dataLength := internal.StackBack(sd, 1).Uint64()
preimage, err := internal.GetMemoryCopyPadded(scope.MemoryData(), int64(dataOffset), int64(dataLength))
if err != nil {
log.Warn("keccak256PreimageTracer: failed to copy keccak preimage from memory", "err", err)
return
}
hash := crypto.Keccak256(preimage)
t.computedHashes[common.Hash(hash)] = hexutil.Bytes(preimage)
}
}
// GetResult returns the collected keccak256 preimages as a JSON object mapping hashes to preimages.
func (t *keccak256PreimageTracer) GetResult() (json.RawMessage, error) {
msg, err := json.Marshal(t.computedHashes)
if err != nil {
return nil, err
}
return msg, nil
}

View file

@ -0,0 +1,442 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package native_test
import (
"encoding/json"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
)
// mockOpContext implements tracing.OpContext for testing
type mockOpContext struct {
memory []byte
stack []uint256.Int
}
// Ensure mockOpContext implements tracing.OpContext
var _ tracing.OpContext = (*mockOpContext)(nil)
func (m *mockOpContext) MemoryData() []byte {
return m.memory
}
func (m *mockOpContext) StackData() []uint256.Int {
return m.stack
}
func (m *mockOpContext) Address() common.Address {
return common.Address{}
}
func (m *mockOpContext) Caller() common.Address {
return common.Address{}
}
func (m *mockOpContext) CallValue() *uint256.Int {
return uint256.NewInt(0)
}
func (m *mockOpContext) CallInput() []byte {
return []byte{}
}
func (m *mockOpContext) ContractCode() []byte {
return []byte{}
}
func TestKeccak256PreimageTracerCreation(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
require.NotNil(t, tracer)
require.NotNil(t, tracer.Hooks)
require.NotNil(t, tracer.Hooks.OnOpcode)
require.NotNil(t, tracer.GetResult)
}
func TestKeccak256PreimageTracerInitialResult(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
require.Empty(t, hashes)
}
func TestKeccak256PreimageTracerSingleKeccak(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// Test data: "hello world"
testData := []byte("hello world")
memory := make([]byte, 32)
copy(memory, testData)
// Create stack with offset=0, length=11
stack := []uint256.Int{
*uint256.NewInt(11), // length (stack[1])
*uint256.NewInt(0), // offset (stack[0])
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
// Get result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
// Verify the hash and preimage
expectedHash := crypto.Keccak256Hash(testData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(testData), hashes[expectedHash])
}
func TestKeccak256PreimageTracerMultipleKeccak(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
testCases := []struct {
name string
data []byte
}{
{"empty", []byte{}},
{"hello", []byte("hello")},
{"world", []byte("world")},
{"long_data", make([]byte, 100)},
}
// Initialize long_data with some pattern
for i := range testCases[3].data {
testCases[3].data[i] = byte(i % 256)
}
expectedHashes := make(map[common.Hash]hexutil.Bytes)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
memory := make([]byte, max(len(tc.data), 1))
copy(memory, tc.data)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(tc.data))), // length
*uint256.NewInt(0), // offset
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
expectedHash := crypto.Keccak256Hash(tc.data)
expectedHashes[expectedHash] = hexutil.Bytes(tc.data)
})
}
// Get final result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
require.Equal(t, expectedHashes, hashes)
}
func TestKeccak256PreimageTracerNonKeccakOpcodes(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
testData := []byte("should not be recorded")
memory := make([]byte, 32)
copy(memory, testData)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(testData))),
*uint256.NewInt(0),
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Test various non-KECCAK256 opcodes
nonKeccakOpcodes := []vm.OpCode{
vm.ADD, vm.MUL, vm.SUB, vm.DIV, vm.SDIV, vm.MOD, vm.SMOD,
vm.ADDMOD, vm.MULMOD, vm.EXP, vm.SIGNEXTEND, vm.SLOAD,
vm.SSTORE, vm.MLOAD, vm.MSTORE, vm.CALL, vm.RETURN,
}
for _, opcode := range nonKeccakOpcodes {
tracer.OnOpcode(0, byte(opcode), 0, 0, mockScope, nil, 0, nil)
}
// Get result - should be empty
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
require.Empty(t, hashes)
}
func TestKeccak256PreimageTracerMemoryOffset(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// Test data at different memory offset
prefix := []byte("prefix_data_")
testData := []byte("target_data")
memory := make([]byte, len(prefix)+len(testData)+10)
copy(memory, prefix)
copy(memory[len(prefix):], testData)
// Stack: offset=len(prefix), length=len(testData)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(testData))), // length
*uint256.NewInt(uint64(len(prefix))), // offset
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
// Get result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
// Verify the hash matches the target data, not the prefix
expectedHash := crypto.Keccak256Hash(testData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(testData), hashes[expectedHash])
}
func TestKeccak256PreimageTracerMemoryPadding(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// Test data that extends beyond memory bounds (should be zero-padded)
testData := []byte("short")
memory := make([]byte, len(testData))
copy(memory, testData)
// Request more data than available in memory
requestedLength := len(testData) + 5
stack := []uint256.Int{
*uint256.NewInt(uint64(requestedLength)), // length > memory size
*uint256.NewInt(0), // offset
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
// Get result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
// Verify the hash includes zero padding
expectedData := make([]byte, requestedLength)
copy(expectedData, testData)
// Rest is zero-padded by default
expectedHash := crypto.Keccak256Hash(expectedData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(expectedData), hashes[expectedHash])
}
func TestKeccak256PreimageTracerDuplicateHashes(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
testData := []byte("duplicate_test")
memory := make([]byte, len(testData))
copy(memory, testData)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(testData))),
*uint256.NewInt(0),
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256 multiple times with same data
for i := 0; i < 3; i++ {
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
}
// Get result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
// Should only have one entry (duplicates overwrite)
expectedHash := crypto.Keccak256Hash(testData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(testData), hashes[expectedHash])
}
func TestKeccak256PreimageTracerWithExecutionError(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
testData := []byte("error_test")
memory := make([]byte, len(testData))
copy(memory, testData)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(testData))),
*uint256.NewInt(0),
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256 and an execution error
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, vm.ErrOutOfGas)
// Get result - should still record the hash even with execution error
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
expectedHash := crypto.Keccak256Hash(testData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(testData), hashes[expectedHash])
}
func TestKeccak256PreimageTracerInsufficientStack(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// Test with insufficient stack items (should cause panic, but we test it doesn't crash)
testData := []byte("test")
memory := make([]byte, len(testData))
copy(memory, testData)
// Stack with only one item (need 2 for KECCAK256)
stack := []uint256.Int{
*uint256.NewInt(0), // only offset, missing length
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// This should not panic due to insufficient stack
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
}
func TestKeccak256PreimageTracerLargeData(t *testing.T) {
tracer, err := tracers.DefaultDirectory.New("keccak256PreimageTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// Test with large data
largeData := make([]byte, 1024)
for i := range largeData {
largeData[i] = byte(i % 256)
}
memory := make([]byte, len(largeData))
copy(memory, largeData)
stack := []uint256.Int{
*uint256.NewInt(uint64(len(largeData))),
*uint256.NewInt(0),
}
mockScope := &mockOpContext{
memory: memory,
stack: stack,
}
// Call OnOpcode with KECCAK256
tracer.OnOpcode(0, byte(vm.KECCAK256), 0, 0, mockScope, nil, 0, nil)
// Get result
result, err := tracer.GetResult()
require.NoError(t, err)
var hashes map[common.Hash]hexutil.Bytes
err = json.Unmarshal(result, &hashes)
require.NoError(t, err)
expectedHash := crypto.Keccak256Hash(largeData)
require.Len(t, hashes, 1)
require.Contains(t, hashes, expectedHash)
require.Equal(t, hexutil.Bytes(largeData), hashes[expectedHash])
}

View file

@ -350,6 +350,15 @@ func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*
return r, err return r, err
} }
// SubscribeTransactionReceipts subscribes to notifications about transaction receipts.
func (ec *Client) SubscribeTransactionReceipts(ctx context.Context, q *ethereum.TransactionReceiptsQuery, ch chan<- []*types.Receipt) (ethereum.Subscription, error) {
sub, err := ec.c.EthSubscribe(ctx, ch, "transactionReceipts", q)
if err != nil {
return nil, err
}
return sub, nil
}
// SyncProgress retrieves the current progress of the sync algorithm. If there's // SyncProgress retrieves the current progress of the sync algorithm. If there's
// no sync currently running, it returns nil. // no sync currently running, it returns nil.
func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) { func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {

View file

@ -121,6 +121,10 @@ type AncientReaderOp interface {
// - if maxBytes is not specified, 'count' items will be returned if they are present // - if maxBytes is not specified, 'count' items will be returned if they are present
AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error)
// AncientBytes retrieves the value segment of the element specified by the id
// and value offsets.
AncientBytes(kind string, id, offset, length uint64) ([]byte, error)
// Ancients returns the ancient item numbers in the ancient store. // Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error) Ancients() (uint64, error)

View file

@ -140,6 +140,10 @@ func (db *Database) Close() error {
return nil return nil
} }
func (db *Database) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) {
panic("not supported")
}
func New(client *rpc.Client) ethdb.Database { func New(client *rpc.Client) ethdb.Database {
if client == nil { if client == nil {
return nil return nil

4
go.mod
View file

@ -5,7 +5,7 @@ go 1.24.0
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/go-winio v0.6.2
github.com/VictoriaMetrics/fastcache v1.12.2 github.com/VictoriaMetrics/fastcache v1.13.0
github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2 v1.21.2
github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/credentials v1.13.43
@ -31,7 +31,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/gofrs/flock v0.12.1 github.com/gofrs/flock v0.12.1
github.com/golang-jwt/jwt/v4 v4.5.2 github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v1.0.0
github.com/google/gofuzz v1.2.0 github.com/google/gofuzz v1.2.0
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2

10
go.sum
View file

@ -16,8 +16,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
@ -52,7 +52,6 @@ github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3M
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
@ -165,8 +164,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -450,7 +449,6 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=

View file

@ -62,6 +62,13 @@ type ChainReader interface {
SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (Subscription, error) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (Subscription, error)
} }
// TransactionReceiptsQuery defines criteria for transaction receipts subscription.
// If TransactionHashes is empty, receipts for all transactions included in new blocks will be delivered.
// Otherwise, only receipts for the specified transactions will be delivered.
type TransactionReceiptsQuery struct {
TransactionHashes []common.Hash
}
// TransactionReader provides access to past transactions and their receipts. // TransactionReader provides access to past transactions and their receipts.
// Implementations may impose arbitrary restrictions on the transactions and receipts that // Implementations may impose arbitrary restrictions on the transactions and receipts that
// can be retrieved. Historic transactions may not be available. // can be retrieved. Historic transactions may not be available.
@ -81,6 +88,11 @@ type TransactionReader interface {
// transaction may not be included in the current canonical chain even if a receipt // transaction may not be included in the current canonical chain even if a receipt
// exists. // exists.
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
// SubscribeTransactionReceipts subscribes to notifications about transaction receipts.
// The receipts are delivered in batches when transactions are included in blocks.
// If q is nil or has empty TransactionHashes, all receipts from new blocks will be delivered.
// Otherwise, only receipts for the specified transaction hashes will be delivered.
SubscribeTransactionReceipts(ctx context.Context, q *TransactionReceiptsQuery, ch chan<- []*types.Receipt) (Subscription, error)
} }
// ChainStateReader wraps access to the state trie of the canonical blockchain. Note that // ChainStateReader wraps access to the state trie of the canonical blockchain. Note that

View file

@ -23,6 +23,7 @@
package blocktest package blocktest
import ( import (
"bytes"
"hash" "hash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -48,8 +49,8 @@ func (h *testHasher) Reset() {
// Update updates the hash state with the given key and value. // Update updates the hash state with the given key and value.
func (h *testHasher) Update(key, val []byte) error { func (h *testHasher) Update(key, val []byte) error {
h.hasher.Write(key) h.hasher.Write(bytes.Clone(key))
h.hasher.Write(val) h.hasher.Write(bytes.Clone(val))
return nil return nil
} }

View file

@ -25,15 +25,6 @@ import (
"strings" "strings"
) )
// FileExist checks if a file exists at path.
func FileExist(path string) bool {
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// HashFolder iterates all files under the given directory, computing the hash // HashFolder iterates all files under the given directory, computing the hash
// of each. // of each.
func HashFolder(folder string, exlude []string) (map[string][32]byte, error) { func HashFolder(folder string, exlude []string) (map[string][32]byte, error) {

View file

@ -449,21 +449,21 @@ func decodeHash(s string) (h common.Hash, inputLength int, err error) {
if (len(s) & 1) > 0 { if (len(s) & 1) > 0 {
s = "0" + s s = "0" + s
} }
if len(s) > 64 {
return common.Hash{}, len(s) / 2, errors.New("hex string too long, want at most 32 bytes")
}
b, err := hex.DecodeString(s) b, err := hex.DecodeString(s)
if err != nil { if err != nil {
return common.Hash{}, 0, errors.New("hex string invalid") return common.Hash{}, 0, errors.New("hex string invalid")
} }
if len(b) > 32 {
return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes")
}
return common.BytesToHash(b), len(b), nil return common.BytesToHash(b), len(b), nil
} }
// GetHeaderByNumber returns the requested canonical block header. // GetHeaderByNumber returns the requested canonical block header.
// - When blockNr is -1 the chain pending header is returned. // - When number is -1 the chain pending header is returned.
// - When blockNr is -2 the chain latest header is returned. // - When number is -2 the chain latest header is returned.
// - When blockNr is -3 the chain finalized header is returned. // - When number is -3 the chain finalized header is returned.
// - When blockNr is -4 the chain safe header is returned. // - When number is -4 the chain safe header is returned.
func (api *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { func (api *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
header, err := api.b.HeaderByNumber(ctx, number) header, err := api.b.HeaderByNumber(ctx, number)
if header != nil && err == nil { if header != nil && err == nil {
@ -489,10 +489,10 @@ func (api *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
} }
// GetBlockByNumber returns the requested canonical block. // GetBlockByNumber returns the requested canonical block.
// - When blockNr is -1 the chain pending block is returned. // - When number is -1 the chain pending block is returned.
// - When blockNr is -2 the chain latest block is returned. // - When number is -2 the chain latest block is returned.
// - When blockNr is -3 the chain finalized block is returned. // - When number is -3 the chain finalized block is returned.
// - When blockNr is -4 the chain safe block is returned. // - When number is -4 the chain safe block is returned.
// - When fullTx is true all transactions in the block are returned, otherwise // - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned. // only the transaction hash is returned.
func (api *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { func (api *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
@ -627,7 +627,7 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp
result := make([]map[string]interface{}, len(receipts)) result := make([]map[string]interface{}, len(receipts))
for i, receipt := range receipts { for i, receipt := range receipts {
result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) result[i] = MarshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i)
} }
return result, nil return result, nil
} }
@ -636,6 +636,8 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp
type ChainContextBackend interface { type ChainContextBackend interface {
Engine() consensus.Engine Engine() consensus.Engine
HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error) HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error)
HeaderByHash(context.Context, common.Hash) (*types.Header, error)
CurrentHeader() *types.Header
ChainConfig() *params.ChainConfig ChainConfig() *params.ChainConfig
} }
@ -669,6 +671,20 @@ func (context *ChainContext) Config() *params.ChainConfig {
return context.b.ChainConfig() return context.b.ChainConfig()
} }
func (context *ChainContext) CurrentHeader() *types.Header {
return context.b.CurrentHeader()
}
func (context *ChainContext) GetHeaderByNumber(number uint64) *types.Header {
header, _ := context.b.HeaderByNumber(context.ctx, rpc.BlockNumber(number))
return header
}
func (context *ChainContext) GetHeaderByHash(hash common.Hash) *types.Header {
header, _ := context.b.HeaderByHash(context.ctx, hash)
return header
}
func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *override.StateOverride, blockOverrides *override.BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *override.StateOverride, blockOverrides *override.BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil)
if blockOverrides != nil { if blockOverrides != nil {
@ -1472,11 +1488,11 @@ func (api *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash commo
return nil, err return nil, err
} }
// Derive the sender. // Derive the sender.
return marshalReceipt(receipt, blockHash, blockNumber, api.signer, tx, int(index)), nil return MarshalReceipt(receipt, blockHash, blockNumber, api.signer, tx, int(index)), nil
} }
// marshalReceipt marshals a transaction receipt into a JSON object. // MarshalReceipt marshals a transaction receipt into a JSON object.
func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { func MarshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} {
from, _ := types.Sender(signer, tx) from, _ := types.Sender(signer, tx)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -1603,16 +1619,9 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction
// processing (signing + broadcast). // processing (signing + broadcast).
func (api *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { func (api *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
// Set some sanity defaults and terminate on failure // Set some sanity defaults and terminate on failure
sidecarVersion := types.BlobSidecarVersion0
if len(args.Blobs) > 0 {
h := api.b.CurrentHeader()
if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
sidecarVersion = types.BlobSidecarVersion1
}
}
config := sidecarConfig{ config := sidecarConfig{
blobSidecarAllowed: true, blobSidecarAllowed: true,
blobSidecarVersion: sidecarVersion, blobSidecarVersion: api.currentBlobSidecarVersion(),
} }
if err := args.setDefaults(ctx, api.b, config); err != nil { if err := args.setDefaults(ctx, api.b, config); err != nil {
return nil, err return nil, err
@ -1626,6 +1635,14 @@ func (api *TransactionAPI) FillTransaction(ctx context.Context, args Transaction
return &SignTransactionResult{data, tx}, nil return &SignTransactionResult{data, tx}, nil
} }
func (api *TransactionAPI) currentBlobSidecarVersion() byte {
h := api.b.CurrentHeader()
if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
return types.BlobSidecarVersion1
}
return types.BlobSidecarVersion0
}
// SendRawTransaction will add the signed transaction to the transaction pool. // SendRawTransaction will add the signed transaction to the transaction pool.
// The sender is responsible for signing the transaction and using the correct nonce. // The sender is responsible for signing the transaction and using the correct nonce.
func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) {
@ -1633,6 +1650,19 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil
if err := tx.UnmarshalBinary(input); err != nil { if err := tx.UnmarshalBinary(input); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
// Convert legacy blob transaction proofs.
// TODO: remove in go-ethereum v1.17.x
if sc := tx.BlobTxSidecar(); sc != nil {
exp := api.currentBlobSidecarVersion()
if sc.Version == types.BlobSidecarVersion0 && exp == types.BlobSidecarVersion1 {
if err := sc.ToV1(); err != nil {
return common.Hash{}, fmt.Errorf("blob sidecar conversion failed: %v", err)
}
tx = tx.WithBlobTxSidecar(sc)
}
}
return SubmitTransaction(ctx, api.b, tx) return SubmitTransaction(ctx, api.b, tx)
} }

View file

@ -1327,10 +1327,11 @@ func TestSimulateV1(t *testing.T) {
validation = true validation = true
) )
type log struct { type log struct {
Address common.Address `json:"address"` Address common.Address `json:"address"`
Topics []common.Hash `json:"topics"` Topics []common.Hash `json:"topics"`
Data hexutil.Bytes `json:"data"` Data hexutil.Bytes `json:"data"`
BlockNumber hexutil.Uint64 `json:"blockNumber"` BlockNumber hexutil.Uint64 `json:"blockNumber"`
BlockTimestamp hexutil.Uint64 `json:"blockTimestamp"`
// Skip txHash // Skip txHash
//TxHash common.Hash `json:"transactionHash" gencodec:"required"` //TxHash common.Hash `json:"transactionHash" gencodec:"required"`
TxIndex hexutil.Uint `json:"transactionIndex"` TxIndex hexutil.Uint `json:"transactionIndex"`
@ -1677,10 +1678,11 @@ func TestSimulateV1(t *testing.T) {
Calls: []callRes{{ Calls: []callRes{{
ReturnValue: "0x", ReturnValue: "0x",
Logs: []log{{ Logs: []log{{
Address: randomAccounts[2].addr, Address: randomAccounts[2].addr,
Topics: []common.Hash{common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}, Topics: []common.Hash{common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
BlockNumber: hexutil.Uint64(11), BlockNumber: hexutil.Uint64(11),
Data: hexutil.Bytes{}, BlockTimestamp: hexutil.Uint64(0x70),
Data: hexutil.Bytes{},
}}, }},
GasUsed: "0x5508", GasUsed: "0x5508",
Status: "0x1", Status: "0x1",
@ -1853,8 +1855,9 @@ func TestSimulateV1(t *testing.T) {
addressToHash(accounts[0].addr), addressToHash(accounts[0].addr),
addressToHash(randomAccounts[0].addr), addressToHash(randomAccounts[0].addr),
}, },
Data: hexutil.Bytes(common.BigToHash(big.NewInt(50)).Bytes()), Data: hexutil.Bytes(common.BigToHash(big.NewInt(50)).Bytes()),
BlockNumber: hexutil.Uint64(11), BlockNumber: hexutil.Uint64(11),
BlockTimestamp: hexutil.Uint64(0x70),
}, { }, {
Address: transferAddress, Address: transferAddress,
Topics: []common.Hash{ Topics: []common.Hash{
@ -1862,9 +1865,10 @@ func TestSimulateV1(t *testing.T) {
addressToHash(randomAccounts[0].addr), addressToHash(randomAccounts[0].addr),
addressToHash(fixedAccount.addr), addressToHash(fixedAccount.addr),
}, },
Data: hexutil.Bytes(common.BigToHash(big.NewInt(100)).Bytes()), Data: hexutil.Bytes(common.BigToHash(big.NewInt(100)).Bytes()),
BlockNumber: hexutil.Uint64(11), BlockNumber: hexutil.Uint64(11),
Index: hexutil.Uint(1), BlockTimestamp: hexutil.Uint64(0x70),
Index: hexutil.Uint(1),
}}, }},
Status: "0x1", Status: "0x1",
}}, }},
@ -3746,8 +3750,8 @@ func TestCreateAccessListWithStateOverrides(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create access list: %v", err) t.Fatalf("Failed to create access list: %v", err)
} }
if err != nil || result == nil { if result == nil {
t.Fatalf("Failed to create access list: %v", err) t.Fatalf("Failed to create access list: result is nil")
} }
require.NotNil(t, result.Accesslist) require.NotNil(t, result.Accesslist)

View file

@ -53,15 +53,17 @@ type tracer struct {
count int count int
traceTransfers bool traceTransfers bool
blockNumber uint64 blockNumber uint64
blockTimestamp uint64
blockHash common.Hash blockHash common.Hash
txHash common.Hash txHash common.Hash
txIdx uint txIdx uint
} }
func newTracer(traceTransfers bool, blockNumber uint64, blockHash, txHash common.Hash, txIndex uint) *tracer { func newTracer(traceTransfers bool, blockNumber uint64, blockTimestamp uint64, blockHash, txHash common.Hash, txIndex uint) *tracer {
return &tracer{ return &tracer{
traceTransfers: traceTransfers, traceTransfers: traceTransfers,
blockNumber: blockNumber, blockNumber: blockNumber,
blockTimestamp: blockTimestamp,
blockHash: blockHash, blockHash: blockHash,
txHash: txHash, txHash: txHash,
txIdx: txIndex, txIdx: txIndex,
@ -115,14 +117,15 @@ func (t *tracer) onLog(log *types.Log) {
func (t *tracer) captureLog(address common.Address, topics []common.Hash, data []byte) { func (t *tracer) captureLog(address common.Address, topics []common.Hash, data []byte) {
t.logs[len(t.logs)-1] = append(t.logs[len(t.logs)-1], &types.Log{ t.logs[len(t.logs)-1] = append(t.logs[len(t.logs)-1], &types.Log{
Address: address, Address: address,
Topics: topics, Topics: topics,
Data: data, Data: data,
BlockNumber: t.blockNumber, BlockNumber: t.blockNumber,
BlockHash: t.blockHash, BlockTimestamp: t.blockTimestamp,
TxHash: t.txHash, BlockHash: t.blockHash,
TxIndex: t.txIdx, TxHash: t.txHash,
Index: uint(t.count), TxIndex: t.txIdx,
Index: uint(t.count),
}) })
t.count++ t.count++
} }

View file

@ -244,7 +244,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
callResults = make([]simCallResult, len(block.Calls)) callResults = make([]simCallResult, len(block.Calls))
receipts = make([]*types.Receipt, len(block.Calls)) receipts = make([]*types.Receipt, len(block.Calls))
// Block hash will be repaired after execution. // Block hash will be repaired after execution.
tracer = newTracer(sim.traceTransfers, blockContext.BlockNumber.Uint64(), common.Hash{}, common.Hash{}, 0) tracer = newTracer(sim.traceTransfers, blockContext.BlockNumber.Uint64(), blockContext.Time, common.Hash{}, common.Hash{}, 0)
vmConfig = &vm.Config{ vmConfig = &vm.Config{
NoBaseFee: !sim.validate, NoBaseFee: !sim.validate,
Tracer: tracer.Hooks(), Tracer: tracer.Hooks(),
@ -541,3 +541,23 @@ func (b *simBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber)
func (b *simBackend) ChainConfig() *params.ChainConfig { func (b *simBackend) ChainConfig() *params.ChainConfig {
return b.b.ChainConfig() return b.b.ChainConfig()
} }
func (b *simBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
if b.base.Hash() == hash {
return b.base, nil
}
if header, err := b.b.HeaderByHash(ctx, hash); err == nil {
return header, nil
}
// Check simulated headers
for _, header := range b.headers {
if header.Hash() == hash {
return header, nil
}
}
return nil, errors.New("header not found")
}
func (b *simBackend) CurrentHeader() *types.Header {
return b.b.CurrentHeader()
}

View file

@ -500,7 +500,7 @@ func (w *gzipResponseWriter) init() {
hdr := w.resp.Header() hdr := w.resp.Header()
length := hdr.Get("content-length") length := hdr.Get("content-length")
if len(length) > 0 { if len(length) > 0 {
if n, err := strconv.ParseUint(length, 10, 64); err != nil { if n, err := strconv.ParseUint(length, 10, 64); err == nil {
w.hasLength = true w.hasLength = true
w.contentLength = n w.contentLength = n
} }

View file

@ -153,6 +153,7 @@ type lookupIterator struct {
cancel func() cancel func()
lookup *lookup lookup *lookup
tabRefreshing <-chan struct{} tabRefreshing <-chan struct{}
lastLookup time.Time
} }
type lookupFunc func(ctx context.Context) *lookup type lookupFunc func(ctx context.Context) *lookup
@ -185,6 +186,9 @@ func (it *lookupIterator) Next() bool {
return false return false
} }
if it.lookup == nil { if it.lookup == nil {
// Ensure enough time has passed between lookup creations.
it.slowdown()
it.lookup = it.nextLookup(it.ctx) it.lookup = it.nextLookup(it.ctx)
if it.lookup.empty() { if it.lookup.empty() {
// If the lookup is empty right after creation, it means the local table // If the lookup is empty right after creation, it means the local table
@ -235,6 +239,25 @@ func (it *lookupIterator) lookupFailed(tab *Table, timeout time.Duration) {
tab.waitForNodes(tout, 1) tab.waitForNodes(tout, 1)
} }
// slowdown applies a delay between creating lookups. This exists to prevent hot-spinning
// in some test environments where lookups don't yield any results.
func (it *lookupIterator) slowdown() {
const minInterval = 1 * time.Second
now := time.Now()
diff := now.Sub(it.lastLookup)
it.lastLookup = now
if diff > minInterval {
return
}
wait := time.NewTimer(diff)
defer wait.Stop()
select {
case <-wait.C:
case <-it.ctx.Done():
}
}
// Close ends the iterator. // Close ends the iterator.
func (it *lookupIterator) Close() { func (it *lookupIterator) Close() {
it.cancel() it.cancel()

View file

@ -54,9 +54,8 @@ const (
bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
tableIPLimit, tableSubnet = 10, 24 tableIPLimit, tableSubnet = 10, 24
seedMinTableTime = 5 * time.Minute seedCount = 30
seedCount = 30 seedMaxAge = 5 * 24 * time.Hour
seedMaxAge = 5 * 24 * time.Hour
) )
// Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps

View file

@ -575,6 +575,13 @@ func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Wait for bootstrap to complete.
select {
case <-udp.tab.initDone:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for table initialization")
}
return udp return udp
} }

View file

@ -174,11 +174,12 @@ type AsyncFilterFunc func(context.Context, *Node) *Node
// AsyncFilter creates an iterator which checks nodes in parallel. // AsyncFilter creates an iterator which checks nodes in parallel.
// The 'check' function is called on multiple goroutines to filter each node // The 'check' function is called on multiple goroutines to filter each node
// from the upstream iterator. When check returns nil, the node will be skipped. // from the upstream iterator. When check returns nil, the node will be skipped.
// It can also return a new node to be returned by the iterator instead of the . // It can also return a new node to be returned by the iterator instead of the
// original one.
func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator {
f := &asyncFilterIter{ f := &asyncFilterIter{
it: ensureSourceIter(it), it: ensureSourceIter(it),
slots: make(chan struct{}, workers+1), slots: make(chan struct{}, workers+1), // extra 1 slot to make sure all the goroutines can be completed
passed: make(chan iteratorItem), passed: make(chan iteratorItem),
} }
for range cap(f.slots) { for range cap(f.slots) {
@ -193,6 +194,9 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator {
return return
case <-f.slots: case <-f.slots:
} }
defer func() {
f.slots <- struct{}{} // the iterator has ended
}()
// read from the iterator and start checking nodes in parallel // read from the iterator and start checking nodes in parallel
// when a node is checked, it will be sent to the passed channel // when a node is checked, it will be sent to the passed channel
// and the slot will be released // and the slot will be released
@ -201,7 +205,11 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator {
nodeSource := f.it.NodeSource() nodeSource := f.it.NodeSource()
// check the node async, in a separate goroutine // check the node async, in a separate goroutine
<-f.slots select {
case <-ctx.Done():
return
case <-f.slots:
}
go func() { go func() {
if nn := check(ctx, node); nn != nil { if nn := check(ctx, node); nn != nil {
item := iteratorItem{nn, nodeSource} item := iteratorItem{nn, nodeSource}
@ -213,8 +221,6 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator {
f.slots <- struct{}{} f.slots <- struct{}{}
}() }()
} }
// the iterator has ended
f.slots <- struct{}{}
}() }()
return f return f

View file

@ -45,7 +45,7 @@ func TestReadNodesCycle(t *testing.T) {
nodes := ReadNodes(iter, 10) nodes := ReadNodes(iter, 10)
checkNodes(t, nodes, 3) checkNodes(t, nodes, 3)
if iter.count != 10 { if iter.count != 10 {
t.Fatalf("%d calls to Next, want %d", iter.count, 100) t.Fatalf("%d calls to Next, want %d", iter.count, 10)
} }
} }

View file

@ -19,6 +19,7 @@ package enode
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"encoding/base64" "encoding/base64"
"encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
@ -358,9 +359,10 @@ func ParseID(in string) (ID, error) {
// Returns -1 if a is closer to target, 1 if b is closer to target // Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal. // and 0 if they are equal.
func DistCmp(target, a, b ID) int { func DistCmp(target, a, b ID) int {
for i := range target { for i := 0; i < len(target); i += 8 {
da := a[i] ^ target[i] tn := binary.BigEndian.Uint64(target[i : i+8])
db := b[i] ^ target[i] da := tn ^ binary.BigEndian.Uint64(a[i:i+8])
db := tn ^ binary.BigEndian.Uint64(b[i:i+8])
if da > db { if da > db {
return 1 return 1
} else if da < db { } else if da < db {
@ -373,12 +375,14 @@ func DistCmp(target, a, b ID) int {
// LogDist returns the logarithmic distance between a and b, log2(a ^ b). // LogDist returns the logarithmic distance between a and b, log2(a ^ b).
func LogDist(a, b ID) int { func LogDist(a, b ID) int {
lz := 0 lz := 0
for i := range a { for i := 0; i < len(a); i += 8 {
x := a[i] ^ b[i] ai := binary.BigEndian.Uint64(a[i : i+8])
bi := binary.BigEndian.Uint64(b[i : i+8])
x := ai ^ bi
if x == 0 { if x == 0 {
lz += 8 lz += 64
} else { } else {
lz += bits.LeadingZeros8(x) lz += bits.LeadingZeros64(x)
break break
} }
} }

View file

@ -368,6 +368,16 @@ func TestID_distcmpEqual(t *testing.T) {
} }
} }
func BenchmarkDistCmp(b *testing.B) {
base := ID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
aID := ID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}
bID := ID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = DistCmp(base, aID, bID)
}
}
func TestID_logdist(t *testing.T) { func TestID_logdist(t *testing.T) {
logdistBig := func(a, b ID) int { logdistBig := func(a, b ID) int {
abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:])
@ -378,6 +388,28 @@ func TestID_logdist(t *testing.T) {
} }
} }
func makeIDs() (ID, ID) {
var a, b ID
size := len(a)
// last byte differs
for i := 0; i < size-1; i++ {
a[i] = 0xAA
b[i] = 0xAA
}
a[size-1] = 0xAA
b[size-1] = 0xAB
return a, b
}
// Benchmark LogDist
func BenchmarkLogDist(b *testing.B) {
aID, bID := makeIDs() // 256-bit ID
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = LogDist(aID, bID)
}
}
// The random tests is likely to miss the case where a and b are equal, // The random tests is likely to miss the case where a and b are equal,
// this test checks it explicitly. // this test checks it explicitly.
func TestID_logdistEqual(t *testing.T) { func TestID_logdistEqual(t *testing.T) {

View file

@ -377,25 +377,25 @@ var (
Max: 9, Max: 9,
UpdateFraction: 5007716, UpdateFraction: 5007716,
} }
// DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. // DefaultBPO1BlobConfig is the default blob configuration for the BPO1 fork.
DefaultBPO1BlobConfig = &BlobConfig{ DefaultBPO1BlobConfig = &BlobConfig{
Target: 10, Target: 10,
Max: 15, Max: 15,
UpdateFraction: 8346193, UpdateFraction: 8346193,
} }
// DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. // DefaultBPO2BlobConfig is the default blob configuration for the BPO2 fork.
DefaultBPO2BlobConfig = &BlobConfig{ DefaultBPO2BlobConfig = &BlobConfig{
Target: 14, Target: 14,
Max: 21, Max: 21,
UpdateFraction: 11684671, UpdateFraction: 11684671,
} }
// DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. // DefaultBPO3BlobConfig is the default blob configuration for the BPO3 fork.
DefaultBPO3BlobConfig = &BlobConfig{ DefaultBPO3BlobConfig = &BlobConfig{
Target: 21, Target: 21,
Max: 32, Max: 32,
UpdateFraction: 20609697, UpdateFraction: 20609697,
} }
// DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. // DefaultBPO4BlobConfig is the default blob configuration for the BPO4 fork.
DefaultBPO4BlobConfig = &BlobConfig{ DefaultBPO4BlobConfig = &BlobConfig{
Target: 14, Target: 14,
Max: 21, Max: 21,
@ -448,16 +448,17 @@ type ChainConfig struct {
// Fork scheduling was switched from blocks to timestamps here // Fork scheduling was switched from blocks to timestamps here
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
OsakaTime *uint64 `json:"osakaTime,omitempty"` // Osaka switch time (nil = no fork, 0 = already on osaka) OsakaTime *uint64 `json:"osakaTime,omitempty"` // Osaka switch time (nil = no fork, 0 = already on osaka)
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) BPO1Time *uint64 `json:"bpo1Time,omitempty"` // BPO1 switch time (nil = no fork, 0 = already on bpo1)
BPO1Time *uint64 `json:"bpo1Time,omitempty"` // BPO1 switch time (nil = no fork, 0 = already on bpo1) BPO2Time *uint64 `json:"bpo2Time,omitempty"` // BPO2 switch time (nil = no fork, 0 = already on bpo2)
BPO2Time *uint64 `json:"bpo2Time,omitempty"` // BPO2 switch time (nil = no fork, 0 = already on bpo2) BPO3Time *uint64 `json:"bpo3Time,omitempty"` // BPO3 switch time (nil = no fork, 0 = already on bpo3)
BPO3Time *uint64 `json:"bpo3Time,omitempty"` // BPO3 switch time (nil = no fork, 0 = already on bpo3) BPO4Time *uint64 `json:"bpo4Time,omitempty"` // BPO4 switch time (nil = no fork, 0 = already on bpo4)
BPO4Time *uint64 `json:"bpo4Time,omitempty"` // BPO4 switch time (nil = no fork, 0 = already on bpo4) BPO5Time *uint64 `json:"bpo5Time,omitempty"` // BPO5 switch time (nil = no fork, 0 = already on bpo5)
BPO5Time *uint64 `json:"bpo5Time,omitempty"` // BPO5 switch time (nil = no fork, 0 = already on bpo5) AmsterdamTime *uint64 `json:"amsterdamTime,omitempty"` // Amsterdam switch time (nil = no fork, 0 = already on amsterdam)
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
// TerminalTotalDifficulty is the amount of total difficulty reached by // TerminalTotalDifficulty is the amount of total difficulty reached by
// the network that triggers the consensus upgrade. // the network that triggers the consensus upgrade.
@ -503,6 +504,96 @@ func (c CliqueConfig) String() string {
return fmt.Sprintf("clique(period: %d, epoch: %d)", c.Period, c.Epoch) return fmt.Sprintf("clique(period: %d, epoch: %d)", c.Period, c.Epoch)
} }
// String implements the fmt.Stringer interface, returning a string representation
// of ChainConfig.
func (c *ChainConfig) String() string {
result := fmt.Sprintf("ChainConfig{ChainID: %v", c.ChainID)
// Add block-based forks
if c.HomesteadBlock != nil {
result += fmt.Sprintf(", HomesteadBlock: %v", c.HomesteadBlock)
}
if c.DAOForkBlock != nil {
result += fmt.Sprintf(", DAOForkBlock: %v", c.DAOForkBlock)
}
if c.EIP150Block != nil {
result += fmt.Sprintf(", EIP150Block: %v", c.EIP150Block)
}
if c.EIP155Block != nil {
result += fmt.Sprintf(", EIP155Block: %v", c.EIP155Block)
}
if c.EIP158Block != nil {
result += fmt.Sprintf(", EIP158Block: %v", c.EIP158Block)
}
if c.ByzantiumBlock != nil {
result += fmt.Sprintf(", ByzantiumBlock: %v", c.ByzantiumBlock)
}
if c.ConstantinopleBlock != nil {
result += fmt.Sprintf(", ConstantinopleBlock: %v", c.ConstantinopleBlock)
}
if c.PetersburgBlock != nil {
result += fmt.Sprintf(", PetersburgBlock: %v", c.PetersburgBlock)
}
if c.IstanbulBlock != nil {
result += fmt.Sprintf(", IstanbulBlock: %v", c.IstanbulBlock)
}
if c.MuirGlacierBlock != nil {
result += fmt.Sprintf(", MuirGlacierBlock: %v", c.MuirGlacierBlock)
}
if c.BerlinBlock != nil {
result += fmt.Sprintf(", BerlinBlock: %v", c.BerlinBlock)
}
if c.LondonBlock != nil {
result += fmt.Sprintf(", LondonBlock: %v", c.LondonBlock)
}
if c.ArrowGlacierBlock != nil {
result += fmt.Sprintf(", ArrowGlacierBlock: %v", c.ArrowGlacierBlock)
}
if c.GrayGlacierBlock != nil {
result += fmt.Sprintf(", GrayGlacierBlock: %v", c.GrayGlacierBlock)
}
if c.MergeNetsplitBlock != nil {
result += fmt.Sprintf(", MergeNetsplitBlock: %v", c.MergeNetsplitBlock)
}
// Add timestamp-based forks
if c.ShanghaiTime != nil {
result += fmt.Sprintf(", ShanghaiTime: %v", *c.ShanghaiTime)
}
if c.CancunTime != nil {
result += fmt.Sprintf(", CancunTime: %v", *c.CancunTime)
}
if c.PragueTime != nil {
result += fmt.Sprintf(", PragueTime: %v", *c.PragueTime)
}
if c.OsakaTime != nil {
result += fmt.Sprintf(", OsakaTime: %v", *c.OsakaTime)
}
if c.BPO1Time != nil {
result += fmt.Sprintf(", BPO1Time: %v", *c.BPO1Time)
}
if c.BPO2Time != nil {
result += fmt.Sprintf(", BPO2Time: %v", *c.BPO2Time)
}
if c.BPO3Time != nil {
result += fmt.Sprintf(", BPO3Time: %v", *c.BPO3Time)
}
if c.BPO4Time != nil {
result += fmt.Sprintf(", BPO4Time: %v", *c.BPO4Time)
}
if c.BPO5Time != nil {
result += fmt.Sprintf(", BPO5Time: %v", *c.BPO5Time)
}
if c.AmsterdamTime != nil {
result += fmt.Sprintf(", AmsterdamTime: %v", *c.AmsterdamTime)
}
if c.VerkleTime != nil {
result += fmt.Sprintf(", VerkleTime: %v", *c.VerkleTime)
}
result += "}"
return result
}
// Description returns a human-readable description of ChainConfig. // Description returns a human-readable description of ChainConfig.
func (c *ChainConfig) Description() string { func (c *ChainConfig) Description() string {
var banner string var banner string
@ -527,34 +618,32 @@ func (c *ChainConfig) Description() string {
// makes sense for mainnet should be optional at printing to avoid bloating // makes sense for mainnet should be optional at printing to avoid bloating
// the output for testnets and private networks. // the output for testnets and private networks.
banner += "Pre-Merge hard forks (block based):\n" banner += "Pre-Merge hard forks (block based):\n"
banner += fmt.Sprintf(" - Homestead: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/homestead/__init__.py.html)\n", c.HomesteadBlock) banner += fmt.Sprintf(" - Homestead: #%-8v\n", c.HomesteadBlock)
if c.DAOForkBlock != nil { if c.DAOForkBlock != nil {
banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/dao_fork/__init__.py.html)\n", c.DAOForkBlock) banner += fmt.Sprintf(" - DAO Fork: #%-8v\n", c.DAOForkBlock)
} }
banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/tangerine_whistle/__init__.py.html)\n", c.EIP150Block) banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v\n", c.EIP150Block)
banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v\n", c.EIP155Block)
banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v\n", c.EIP158Block)
banner += fmt.Sprintf(" - Byzantium: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/byzantium/__init__.py.html)\n", c.ByzantiumBlock) banner += fmt.Sprintf(" - Byzantium: #%-8v\n", c.ByzantiumBlock)
banner += fmt.Sprintf(" - Constantinople: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.ConstantinopleBlock) banner += fmt.Sprintf(" - Constantinople: #%-8v\n", c.ConstantinopleBlock)
banner += fmt.Sprintf(" - Petersburg: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.PetersburgBlock) banner += fmt.Sprintf(" - Petersburg: #%-8v\n", c.PetersburgBlock)
banner += fmt.Sprintf(" - Istanbul: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/istanbul/__init__.py.html)\n", c.IstanbulBlock) banner += fmt.Sprintf(" - Istanbul: #%-8v\n", c.IstanbulBlock)
if c.MuirGlacierBlock != nil { if c.MuirGlacierBlock != nil {
banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/muir_glacier/__init__.py.html)\n", c.MuirGlacierBlock) banner += fmt.Sprintf(" - Muir Glacier: #%-8v\n", c.MuirGlacierBlock)
} }
banner += fmt.Sprintf(" - Berlin: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/berlin/__init__.py.html)\n", c.BerlinBlock) banner += fmt.Sprintf(" - Berlin: #%-8v\n", c.BerlinBlock)
banner += fmt.Sprintf(" - London: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/london/__init__.py.html)\n", c.LondonBlock) banner += fmt.Sprintf(" - London: #%-8v\n", c.LondonBlock)
if c.ArrowGlacierBlock != nil { if c.ArrowGlacierBlock != nil {
banner += fmt.Sprintf(" - Arrow Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/arrow_glacier/__init__.py.html)\n", c.ArrowGlacierBlock) banner += fmt.Sprintf(" - Arrow Glacier: #%-8v\n", c.ArrowGlacierBlock)
} }
if c.GrayGlacierBlock != nil { if c.GrayGlacierBlock != nil {
banner += fmt.Sprintf(" - Gray Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/gray_glacier/__init__.py.html)\n", c.GrayGlacierBlock) banner += fmt.Sprintf(" - Gray Glacier: #%-8v\n", c.GrayGlacierBlock)
} }
banner += "\n" banner += "\n"
// Add a special section for the merge as it's non-obvious // Add a special section for the merge as it's non-obvious
banner += "Merge configured:\n" banner += "Merge configured:\n"
banner += " - Hard-fork specification: https://ethereum.github.io/execution-specs/src/ethereum/forks/paris/__init__.py.html\n"
banner += " - Network known to be merged\n"
banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty) banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty)
if c.MergeNetsplitBlock != nil { if c.MergeNetsplitBlock != nil {
banner += fmt.Sprintf(" - Merge netsplit block: #%-8v\n", c.MergeNetsplitBlock) banner += fmt.Sprintf(" - Merge netsplit block: #%-8v\n", c.MergeNetsplitBlock)
@ -564,35 +653,39 @@ func (c *ChainConfig) Description() string {
// Create a list of forks post-merge // Create a list of forks post-merge
banner += "Post-Merge hard forks (timestamp based):\n" banner += "Post-Merge hard forks (timestamp based):\n"
if c.ShanghaiTime != nil { if c.ShanghaiTime != nil {
banner += fmt.Sprintf(" - Shanghai: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/shanghai/__init__.py.html)\n", *c.ShanghaiTime) banner += fmt.Sprintf(" - Shanghai: @%-10v\n", *c.ShanghaiTime)
} }
if c.CancunTime != nil { if c.CancunTime != nil {
banner += fmt.Sprintf(" - Cancun: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/cancun/__init__.py.html)\n", *c.CancunTime) banner += fmt.Sprintf(" - Cancun: @%-10v blob: (%s)\n", *c.CancunTime, c.BlobScheduleConfig.Cancun)
} }
if c.PragueTime != nil { if c.PragueTime != nil {
banner += fmt.Sprintf(" - Prague: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/prague/__init__.py.html)\n", *c.PragueTime) banner += fmt.Sprintf(" - Prague: @%-10v blob: (%s)\n", *c.PragueTime, c.BlobScheduleConfig.Prague)
} }
if c.OsakaTime != nil { if c.OsakaTime != nil {
banner += fmt.Sprintf(" - Osaka: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/osaka/__init__.py.html)\n", *c.OsakaTime) banner += fmt.Sprintf(" - Osaka: @%-10v blob: (%s)\n", *c.OsakaTime, c.BlobScheduleConfig.Osaka)
}
if c.VerkleTime != nil {
banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime)
} }
if c.BPO1Time != nil { if c.BPO1Time != nil {
banner += fmt.Sprintf(" - BPO1: @%-10v\n", *c.BPO1Time) banner += fmt.Sprintf(" - BPO1: @%-10v blob: (%s)\n", *c.BPO1Time, c.BlobScheduleConfig.BPO1)
} }
if c.BPO2Time != nil { if c.BPO2Time != nil {
banner += fmt.Sprintf(" - BPO2: @%-10v\n", *c.BPO2Time) banner += fmt.Sprintf(" - BPO2: @%-10v blob: (%s)\n", *c.BPO2Time, c.BlobScheduleConfig.BPO2)
} }
if c.BPO3Time != nil { if c.BPO3Time != nil {
banner += fmt.Sprintf(" - BPO3: @%-10v\n", *c.BPO3Time) banner += fmt.Sprintf(" - BPO3: @%-10v blob: (%s)\n", *c.BPO3Time, c.BlobScheduleConfig.BPO3)
} }
if c.BPO4Time != nil { if c.BPO4Time != nil {
banner += fmt.Sprintf(" - BPO4: @%-10v\n", *c.BPO4Time) banner += fmt.Sprintf(" - BPO4: @%-10v blob: (%s)\n", *c.BPO4Time, c.BlobScheduleConfig.BPO4)
} }
if c.BPO5Time != nil { if c.BPO5Time != nil {
banner += fmt.Sprintf(" - BPO5: @%-10v\n", *c.BPO5Time) banner += fmt.Sprintf(" - BPO5: @%-10v blob: (%s)\n", *c.BPO5Time, c.BlobScheduleConfig.BPO5)
} }
if c.AmsterdamTime != nil {
banner += fmt.Sprintf(" - Amsterdam: @%-10v blob: (%s)\n", *c.AmsterdamTime, c.BlobScheduleConfig.Amsterdam)
}
if c.VerkleTime != nil {
banner += fmt.Sprintf(" - Verkle: @%-10v blob: (%s)\n", *c.VerkleTime, c.BlobScheduleConfig.Verkle)
}
banner += fmt.Sprintf("\nAll fork specifications can be found at https://ethereum.github.io/execution-specs/src/ethereum/forks/\n")
return banner return banner
} }
@ -603,17 +696,26 @@ type BlobConfig struct {
UpdateFraction uint64 `json:"baseFeeUpdateFraction"` UpdateFraction uint64 `json:"baseFeeUpdateFraction"`
} }
// String implement fmt.Stringer, returning string format blob config.
func (bc *BlobConfig) String() string {
if bc == nil {
return "nil"
}
return fmt.Sprintf("target: %d, max: %d, fraction: %d", bc.Target, bc.Max, bc.UpdateFraction)
}
// BlobScheduleConfig determines target and max number of blobs allow per fork. // BlobScheduleConfig determines target and max number of blobs allow per fork.
type BlobScheduleConfig struct { type BlobScheduleConfig struct {
Cancun *BlobConfig `json:"cancun,omitempty"` Cancun *BlobConfig `json:"cancun,omitempty"`
Prague *BlobConfig `json:"prague,omitempty"` Prague *BlobConfig `json:"prague,omitempty"`
Osaka *BlobConfig `json:"osaka,omitempty"` Osaka *BlobConfig `json:"osaka,omitempty"`
Verkle *BlobConfig `json:"verkle,omitempty"` Verkle *BlobConfig `json:"verkle,omitempty"`
BPO1 *BlobConfig `json:"bpo1,omitempty"` BPO1 *BlobConfig `json:"bpo1,omitempty"`
BPO2 *BlobConfig `json:"bpo2,omitempty"` BPO2 *BlobConfig `json:"bpo2,omitempty"`
BPO3 *BlobConfig `json:"bpo3,omitempty"` BPO3 *BlobConfig `json:"bpo3,omitempty"`
BPO4 *BlobConfig `json:"bpo4,omitempty"` BPO4 *BlobConfig `json:"bpo4,omitempty"`
BPO5 *BlobConfig `json:"bpo5,omitempty"` BPO5 *BlobConfig `json:"bpo5,omitempty"`
Amsterdam *BlobConfig `json:"amsterdam,omitempty"`
} }
// IsHomestead returns whether num is either equal to the homestead block or greater. // IsHomestead returns whether num is either equal to the homestead block or greater.
@ -726,11 +828,6 @@ func (c *ChainConfig) IsOsaka(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.OsakaTime, time) return c.IsLondon(num) && isTimestampForked(c.OsakaTime, time)
} }
// IsVerkle returns whether time is either equal to the Verkle fork time or greater.
func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time)
}
// IsBPO1 returns whether time is either equal to the BPO1 fork time or greater. // IsBPO1 returns whether time is either equal to the BPO1 fork time or greater.
func (c *ChainConfig) IsBPO1(num *big.Int, time uint64) bool { func (c *ChainConfig) IsBPO1(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.BPO1Time, time) return c.IsLondon(num) && isTimestampForked(c.BPO1Time, time)
@ -756,6 +853,16 @@ func (c *ChainConfig) IsBPO5(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.BPO5Time, time) return c.IsLondon(num) && isTimestampForked(c.BPO5Time, time)
} }
// IsAmsterdam returns whether time is either equal to the Amsterdam fork time or greater.
func (c *ChainConfig) IsAmsterdam(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.AmsterdamTime, time)
}
// IsVerkle returns whether time is either equal to the Verkle fork time or greater.
func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time)
}
// IsVerkleGenesis checks whether the verkle fork is activated at the genesis block. // IsVerkleGenesis checks whether the verkle fork is activated at the genesis block.
// //
// Verkle mode is considered enabled if the verkle fork time is configured, // Verkle mode is considered enabled if the verkle fork time is configured,
@ -836,6 +943,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{name: "bpo3", timestamp: c.BPO3Time, optional: true}, {name: "bpo3", timestamp: c.BPO3Time, optional: true},
{name: "bpo4", timestamp: c.BPO4Time, optional: true}, {name: "bpo4", timestamp: c.BPO4Time, optional: true},
{name: "bpo5", timestamp: c.BPO5Time, optional: true}, {name: "bpo5", timestamp: c.BPO5Time, optional: true},
{name: "amsterdam", timestamp: c.AmsterdamTime, optional: true},
} { } {
if lastFork.name != "" { if lastFork.name != "" {
switch { switch {
@ -890,6 +998,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{name: "bpo3", timestamp: c.BPO3Time, config: bsc.BPO3}, {name: "bpo3", timestamp: c.BPO3Time, config: bsc.BPO3},
{name: "bpo4", timestamp: c.BPO4Time, config: bsc.BPO4}, {name: "bpo4", timestamp: c.BPO4Time, config: bsc.BPO4},
{name: "bpo5", timestamp: c.BPO5Time, config: bsc.BPO5}, {name: "bpo5", timestamp: c.BPO5Time, config: bsc.BPO5},
{name: "amsterdam", timestamp: c.AmsterdamTime, config: bsc.Amsterdam},
} { } {
if cur.config != nil { if cur.config != nil {
if err := cur.config.validate(); err != nil { if err := cur.config.validate(); err != nil {
@ -1005,6 +1114,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
if isForkTimestampIncompatible(c.BPO5Time, newcfg.BPO5Time, headTimestamp) { if isForkTimestampIncompatible(c.BPO5Time, newcfg.BPO5Time, headTimestamp) {
return newTimestampCompatError("BPO5 fork timestamp", c.BPO5Time, newcfg.BPO5Time) return newTimestampCompatError("BPO5 fork timestamp", c.BPO5Time, newcfg.BPO5Time)
} }
if isForkTimestampIncompatible(c.AmsterdamTime, newcfg.AmsterdamTime, headTimestamp) {
return newTimestampCompatError("Amsterdam fork timestamp", c.AmsterdamTime, newcfg.AmsterdamTime)
}
return nil return nil
} }
@ -1024,6 +1136,8 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
london := c.LondonBlock london := c.LondonBlock
switch { switch {
case c.IsAmsterdam(london, time):
return forks.Amsterdam
case c.IsBPO5(london, time): case c.IsBPO5(london, time):
return forks.BPO5 return forks.BPO5
case c.IsBPO4(london, time): case c.IsBPO4(london, time):
@ -1259,7 +1373,7 @@ type Rules struct {
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsBerlin, IsLondon bool IsBerlin, IsLondon bool
IsMerge, IsShanghai, IsCancun, IsPrague, IsOsaka bool IsMerge, IsShanghai, IsCancun, IsPrague, IsOsaka bool
IsVerkle bool IsAmsterdam, IsVerkle bool
} }
// Rules ensures c's ChainID is not nil. // Rules ensures c's ChainID is not nil.
@ -1289,6 +1403,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
IsCancun: isMerge && c.IsCancun(num, timestamp), IsCancun: isMerge && c.IsCancun(num, timestamp),
IsPrague: isMerge && c.IsPrague(num, timestamp), IsPrague: isMerge && c.IsPrague(num, timestamp),
IsOsaka: isMerge && c.IsOsaka(num, timestamp), IsOsaka: isMerge && c.IsOsaka(num, timestamp),
IsAmsterdam: isMerge && c.IsAmsterdam(num, timestamp),
IsVerkle: isVerkle, IsVerkle: isVerkle,
IsEIP4762: isVerkle, IsEIP4762: isVerkle,
} }

View file

@ -45,6 +45,7 @@ const (
BPO3 BPO3
BPO4 BPO4
BPO5 BPO5
Amsterdam
) )
// String implements fmt.Stringer. // String implements fmt.Stringer.
@ -82,4 +83,5 @@ var forkToString = map[Fork]string{
BPO3: "BPO3", BPO3: "BPO3",
BPO4: "BPO4", BPO4: "BPO4",
BPO5: "BPO5", BPO5: "BPO5",
Amsterdam: "Amsterdam",
} }

View file

@ -273,7 +273,8 @@ func TestServerWebsocketReadLimit(t *testing.T) {
} }
} else if !errors.Is(err, websocket.ErrReadLimit) && } else if !errors.Is(err, websocket.ErrReadLimit) &&
!strings.Contains(strings.ToLower(err.Error()), "1009") && !strings.Contains(strings.ToLower(err.Error()), "1009") &&
!strings.Contains(strings.ToLower(err.Error()), "message too big") { !strings.Contains(strings.ToLower(err.Error()), "message too big") &&
!strings.Contains(strings.ToLower(err.Error()), "connection reset by peer") {
// Not the error we expect from exceeding the message size limit. // Not the error we expect from exceeding the message size limit.
t.Fatalf("unexpected error for read limit violation: %v", err) t.Fatalf("unexpected error for read limit violation: %v", err)
} }

View file

@ -18,7 +18,6 @@ package core_test
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
@ -97,12 +96,12 @@ func (ui *headlessUi) ApproveNewAccount(request *core.NewAccountRequest) (core.N
} }
func (ui *headlessUi) ShowError(message string) { func (ui *headlessUi) ShowError(message string) {
//stdout is used by communication // stdout is used by communication
fmt.Fprintln(os.Stderr, message) fmt.Fprintln(os.Stderr, message)
} }
func (ui *headlessUi) ShowInfo(message string) { func (ui *headlessUi) ShowInfo(message string) {
//stdout is used by communication // stdout is used by communication
fmt.Fprintln(os.Stderr, message) fmt.Fprintln(os.Stderr, message)
} }
@ -128,7 +127,7 @@ func setup(t *testing.T) (*core.SignerAPI, *headlessUi) {
func createAccount(ui *headlessUi, api *core.SignerAPI, t *testing.T) { func createAccount(ui *headlessUi, api *core.SignerAPI, t *testing.T) {
ui.approveCh <- "Y" ui.approveCh <- "Y"
ui.inputCh <- "a_long_password" ui.inputCh <- "a_long_password"
_, err := api.New(context.Background()) _, err := api.New(t.Context())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -143,7 +142,7 @@ func failCreateAccountWithPassword(ui *headlessUi, api *core.SignerAPI, password
ui.inputCh <- password ui.inputCh <- password
ui.inputCh <- password ui.inputCh <- password
addr, err := api.New(context.Background()) addr, err := api.New(t.Context())
if err == nil { if err == nil {
t.Fatal("Should have returned an error") t.Fatal("Should have returned an error")
} }
@ -154,7 +153,7 @@ func failCreateAccountWithPassword(ui *headlessUi, api *core.SignerAPI, password
func failCreateAccount(ui *headlessUi, api *core.SignerAPI, t *testing.T) { func failCreateAccount(ui *headlessUi, api *core.SignerAPI, t *testing.T) {
ui.approveCh <- "N" ui.approveCh <- "N"
addr, err := api.New(context.Background()) addr, err := api.New(t.Context())
if err != core.ErrRequestDenied { if err != core.ErrRequestDenied {
t.Fatal(err) t.Fatal(err)
} }
@ -165,7 +164,7 @@ func failCreateAccount(ui *headlessUi, api *core.SignerAPI, t *testing.T) {
func list(ui *headlessUi, api *core.SignerAPI, t *testing.T) ([]common.Address, error) { func list(ui *headlessUi, api *core.SignerAPI, t *testing.T) ([]common.Address, error) {
ui.approveCh <- "A" ui.approveCh <- "A"
return api.List(context.Background()) return api.List(t.Context())
} }
func TestNewAcc(t *testing.T) { func TestNewAcc(t *testing.T) {
@ -199,7 +198,7 @@ func TestNewAcc(t *testing.T) {
// Testing listing: // Testing listing:
// Listing one Account // Listing one Account
control.approveCh <- "1" control.approveCh <- "1"
list, err := api.List(context.Background()) list, err := api.List(t.Context())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -208,7 +207,7 @@ func TestNewAcc(t *testing.T) {
} }
// Listing denied // Listing denied
control.approveCh <- "Nope" control.approveCh <- "Nope"
list, err = api.List(context.Background()) list, err = api.List(t.Context())
if len(list) != 0 { if len(list) != 0 {
t.Fatalf("List should be empty") t.Fatalf("List should be empty")
} }
@ -246,7 +245,7 @@ func TestSignTx(t *testing.T) {
api, control := setup(t) api, control := setup(t)
createAccount(control, api, t) createAccount(control, api, t)
control.approveCh <- "A" control.approveCh <- "A"
list, err = api.List(context.Background()) list, err = api.List(t.Context())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -260,15 +259,15 @@ func TestSignTx(t *testing.T) {
control.approveCh <- "Y" control.approveCh <- "Y"
control.inputCh <- "wrongpassword" control.inputCh <- "wrongpassword"
res, err = api.SignTransaction(context.Background(), tx, &methodSig) res, err = api.SignTransaction(t.Context(), tx, &methodSig)
if res != nil { if res != nil {
t.Errorf("Expected nil-response, got %v", res) t.Errorf("Expected nil-response, got %v", res)
} }
if err != keystore.ErrDecrypt { if err != keystore.ErrDecrypt {
t.Errorf("Expected ErrLocked! %v", err) t.Errorf("Expected ErrDecrypt! %v", err)
} }
control.approveCh <- "No way" control.approveCh <- "No way"
res, err = api.SignTransaction(context.Background(), tx, &methodSig) res, err = api.SignTransaction(t.Context(), tx, &methodSig)
if res != nil { if res != nil {
t.Errorf("Expected nil-response, got %v", res) t.Errorf("Expected nil-response, got %v", res)
} }
@ -278,22 +277,21 @@ func TestSignTx(t *testing.T) {
// Sign with correct password // Sign with correct password
control.approveCh <- "Y" control.approveCh <- "Y"
control.inputCh <- "a_long_password" control.inputCh <- "a_long_password"
res, err = api.SignTransaction(context.Background(), tx, &methodSig) res, err = api.SignTransaction(t.Context(), tx, &methodSig)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
parsedTx := &types.Transaction{} parsedTx := &types.Transaction{}
rlp.DecodeBytes(res.Raw, parsedTx) rlp.DecodeBytes(res.Raw, parsedTx)
//The tx should NOT be modified by the UI // The tx should NOT be modified by the UI
if parsedTx.Value().Cmp(tx.Value.ToInt()) != 0 { if parsedTx.Value().Cmp(tx.Value.ToInt()) != 0 {
t.Errorf("Expected value to be unchanged, expected %v got %v", tx.Value, parsedTx.Value()) t.Errorf("Expected value to be unchanged, expected %v got %v", tx.Value, parsedTx.Value())
} }
control.approveCh <- "Y" control.approveCh <- "Y"
control.inputCh <- "a_long_password" control.inputCh <- "a_long_password"
res2, err = api.SignTransaction(context.Background(), tx, &methodSig) res2, err = api.SignTransaction(t.Context(), tx, &methodSig)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -301,20 +299,20 @@ func TestSignTx(t *testing.T) {
t.Error("Expected tx to be unmodified by UI") t.Error("Expected tx to be unmodified by UI")
} }
//The tx is modified by the UI // The tx is modified by the UI
control.approveCh <- "M" control.approveCh <- "M"
control.inputCh <- "a_long_password" control.inputCh <- "a_long_password"
res2, err = api.SignTransaction(context.Background(), tx, &methodSig) res2, err = api.SignTransaction(t.Context(), tx, &methodSig)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
parsedTx2 := &types.Transaction{} parsedTx2 := &types.Transaction{}
rlp.DecodeBytes(res.Raw, parsedTx2) rlp.DecodeBytes(res2.Raw, parsedTx2)
//The tx should be modified by the UI // The tx should be modified by the UI
if parsedTx2.Value().Cmp(tx.Value.ToInt()) != 0 { if parsedTx2.Value().Cmp(tx.Value.ToInt()) == 0 {
t.Errorf("Expected value to be unchanged, got %v", parsedTx.Value()) t.Errorf("Expected value to be changed, got %v", parsedTx2.Value())
} }
if bytes.Equal(res.Raw, res2.Raw) { if bytes.Equal(res.Raw, res2.Raw) {
t.Error("Expected tx to be modified by UI") t.Error("Expected tx to be modified by UI")

View file

@ -202,7 +202,7 @@ func TestSignData(t *testing.T) {
t.Errorf("Expected nil-data, got %x", signature) t.Errorf("Expected nil-data, got %x", signature)
} }
if err != keystore.ErrDecrypt { if err != keystore.ErrDecrypt {
t.Errorf("Expected ErrLocked! '%v'", err) t.Errorf("Expected ErrDecrypt! '%v'", err)
} }
control.approveCh <- "No way" control.approveCh <- "No way"
signature, err = api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) signature, err = api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world")))

View file

@ -559,3 +559,6 @@ type dummyChain struct {
func (d *dummyChain) Engine() consensus.Engine { return nil } func (d *dummyChain) Engine() consensus.Engine { return nil }
func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { return nil } func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { return nil }
func (d *dummyChain) Config() *params.ChainConfig { return d.config } func (d *dummyChain) Config() *params.ChainConfig { return d.config }
func (d *dummyChain) CurrentHeader() *types.Header { return nil }
func (d *dummyChain) GetHeaderByNumber(n uint64) *types.Header { return nil }
func (d *dummyChain) GetHeaderByHash(h common.Hash) *types.Header { return nil }

View file

@ -32,8 +32,8 @@ func newBytesPool(sliceCap, nitems int) *bytesPool {
} }
} }
// Get returns a slice. Safe for concurrent use. // get returns a slice. Safe for concurrent use.
func (bp *bytesPool) Get() []byte { func (bp *bytesPool) get() []byte {
select { select {
case b := <-bp.c: case b := <-bp.c:
return b return b
@ -42,18 +42,18 @@ func (bp *bytesPool) Get() []byte {
} }
} }
// GetWithSize returns a slice with specified byte slice size. // getWithSize returns a slice with specified byte slice size.
func (bp *bytesPool) GetWithSize(s int) []byte { func (bp *bytesPool) getWithSize(s int) []byte {
b := bp.Get() b := bp.get()
if cap(b) < s { if cap(b) < s {
return make([]byte, s) return make([]byte, s)
} }
return b[:s] return b[:s]
} }
// Put returns a slice to the pool. Safe for concurrent use. This method // put returns a slice to the pool. Safe for concurrent use. This method
// will ignore slices that are too small or too large (>3x the cap) // will ignore slices that are too small or too large (>3x the cap)
func (bp *bytesPool) Put(b []byte) { func (bp *bytesPool) put(b []byte) {
if c := cap(b); c < bp.w || c > 3*bp.w { if c := cap(b); c < bp.w || c > 3*bp.w {
return return
} }
@ -62,3 +62,40 @@ func (bp *bytesPool) Put(b []byte) {
default: default:
} }
} }
// unsafeBytesPool is a pool for byte slices. It is not safe for concurrent use.
type unsafeBytesPool struct {
items [][]byte
w int
}
// newUnsafeBytesPool creates a new unsafeBytesPool. The sliceCap sets the
// capacity of newly allocated slices, and the nitems determines how many
// items the pool will hold, at maximum.
func newUnsafeBytesPool(sliceCap, nitems int) *unsafeBytesPool {
return &unsafeBytesPool{
items: make([][]byte, 0, nitems),
w: sliceCap,
}
}
// Get returns a slice with pre-allocated space.
func (bp *unsafeBytesPool) get() []byte {
if len(bp.items) > 0 {
last := bp.items[len(bp.items)-1]
bp.items = bp.items[:len(bp.items)-1]
return last
}
return make([]byte, 0, bp.w)
}
// put returns a slice to the pool. This method will ignore slices that are
// too small or too large (>3x the cap)
func (bp *unsafeBytesPool) put(b []byte) {
if c := cap(b); c < bp.w || c > 3*bp.w {
return
}
if len(bp.items) < cap(bp.items) {
bp.items = append(bp.items, b)
}
}

56
trie/list_hasher.go Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
)
// ListHasher is a wrapper of the Merkle-Patricia-Trie, which implements
// types.ListHasher. Compared to a Trie instance, the Update method of this
// type always deep-copies its input slices.
//
// This implementation is very inefficient in terms of memory allocation,
// compared with StackTrie. It exists only for correctness comparison purposes.
type ListHasher struct {
tr *Trie
}
// NewListHasher initializes the list hasher.
func NewListHasher() *ListHasher {
return &ListHasher{
tr: NewEmpty(nil),
}
}
// Reset clears the internal state prepares the ListHasher for reuse.
func (h *ListHasher) Reset() {
h.tr.reset()
}
// Update inserts a key-value pair into the trie.
func (h *ListHasher) Update(key []byte, value []byte) error {
key, value = bytes.Clone(key), bytes.Clone(value)
return h.tr.Update(key, value)
}
// Hash computes the root hash of all inserted key-value pairs.
func (h *ListHasher) Hash() common.Hash {
return h.tr.Hash()
}

View file

@ -28,7 +28,7 @@ import (
var ( var (
stPool = sync.Pool{New: func() any { return new(stNode) }} stPool = sync.Pool{New: func() any { return new(stNode) }}
bPool = newBytesPool(32, 100) bPool = newBytesPool(32, 100)
_ = types.TrieHasher((*StackTrie)(nil)) _ = types.ListHasher((*StackTrie)(nil))
) )
// OnTrieNode is a callback method invoked when a trie node is committed // OnTrieNode is a callback method invoked when a trie node is committed
@ -50,6 +50,7 @@ type StackTrie struct {
onTrieNode OnTrieNode onTrieNode OnTrieNode
kBuf []byte // buf space used for hex-key during insertions kBuf []byte // buf space used for hex-key during insertions
pBuf []byte // buf space used for path during insertions pBuf []byte // buf space used for path during insertions
vPool *unsafeBytesPool
} }
// NewStackTrie allocates and initializes an empty trie. The committed nodes // NewStackTrie allocates and initializes an empty trie. The committed nodes
@ -61,6 +62,7 @@ func NewStackTrie(onTrieNode OnTrieNode) *StackTrie {
onTrieNode: onTrieNode, onTrieNode: onTrieNode,
kBuf: make([]byte, 64), kBuf: make([]byte, 64),
pBuf: make([]byte, 64), pBuf: make([]byte, 64),
vPool: newUnsafeBytesPool(300, 20),
} }
} }
@ -74,6 +76,9 @@ func (t *StackTrie) grow(key []byte) {
} }
// Update inserts a (key, value) pair into the stack trie. // Update inserts a (key, value) pair into the stack trie.
//
// Note the supplied key value pair is copied and managed internally,
// they are safe to be modified after this method returns.
func (t *StackTrie) Update(key, value []byte) error { func (t *StackTrie) Update(key, value []byte) error {
if len(value) == 0 { if len(value) == 0 {
return errors.New("trying to insert empty (deletion)") return errors.New("trying to insert empty (deletion)")
@ -88,7 +93,14 @@ func (t *StackTrie) Update(key, value []byte) error {
} else { } else {
t.last = append(t.last[:0], k...) // reuse key slice t.last = append(t.last[:0], k...) // reuse key slice
} }
t.insert(t.root, k, value, t.pBuf[:0]) vBuf := t.vPool.get()
if cap(vBuf) < len(value) {
vBuf = common.CopyBytes(value)
} else {
vBuf = vBuf[:len(value)]
copy(vBuf, value)
}
t.insert(t.root, k, vBuf, t.pBuf[:0])
return nil return nil
} }
@ -108,14 +120,16 @@ func (t *StackTrie) TrieKey(key []byte) []byte {
// stNode represents a node within a StackTrie // stNode represents a node within a StackTrie
type stNode struct { type stNode struct {
typ uint8 // node type (as in branch, ext, leaf) typ uint8 // node type (as in branch, ext, leaf)
key []byte // key chunk covered by this (leaf|ext) node key []byte // exclusive owned key chunk covered by this (leaf|ext) node
val []byte // value contained by this node if it's a leaf val []byte // exclusive owned value contained by this node (leaf: value; hash: hash)
children [16]*stNode // list of children (for branch and exts) children [16]*stNode // list of children (for branch and ext)
} }
// newLeaf constructs a leaf node with provided node key and value. The key // newLeaf constructs a leaf node with provided node key and value.
// will be deep-copied in the function and safe to modify afterwards, but //
// value is not. // The key is deep-copied within the function, so it can be safely modified
// afterwards. The value is retained directly without copying, as it is
// exclusively owned by the stackTrie.
func newLeaf(key, val []byte) *stNode { func newLeaf(key, val []byte) *stNode {
st := stPool.Get().(*stNode) st := stPool.Get().(*stNode)
st.typ = leafNode st.typ = leafNode
@ -146,9 +160,9 @@ const (
func (n *stNode) reset() *stNode { func (n *stNode) reset() *stNode {
if n.typ == hashedNode { if n.typ == hashedNode {
// On hashnodes, we 'own' the val: it is guaranteed to be not held // On hashnodes, we 'own' the val: it is guaranteed to be not held
// by external caller. Hence, when we arrive here, we can put it back // by external caller. Hence, when we arrive here, we can put it
// into the pool // back into the pool
bPool.Put(n.val) bPool.put(n.val)
} }
n.key = n.key[:0] n.key = n.key[:0]
n.val = nil n.val = nil
@ -172,11 +186,6 @@ func (n *stNode) getDiffIndex(key []byte) int {
} }
// Helper function to that inserts a (key, value) pair into the trie. // Helper function to that inserts a (key, value) pair into the trie.
//
// - The key is not retained by this method, but always copied if needed.
// - The value is retained by this method, as long as the leaf that it represents
// remains unhashed. However: it is never modified.
// - The path is not retained by this method.
func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
switch st.typ { switch st.typ {
case branchNode: /* Branch */ case branchNode: /* Branch */
@ -235,16 +244,14 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
} }
var p *stNode var p *stNode
if diffidx == 0 { if diffidx == 0 {
// the break is on the first byte, so // the break is on the first byte, so the current node
// the current node is converted into // is converted into a branch node.
// a branch node.
st.children[0] = nil st.children[0] = nil
p = st
st.typ = branchNode st.typ = branchNode
p = st
} else { } else {
// the common prefix is at least one byte // the common prefix is at least one byte long, insert
// long, insert a new intermediate branch // a new intermediate branch node.
// node.
st.children[0] = stPool.Get().(*stNode) st.children[0] = stPool.Get().(*stNode)
st.children[0].typ = branchNode st.children[0].typ = branchNode
p = st.children[0] p = st.children[0]
@ -280,8 +287,8 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
if diffidx == 0 { if diffidx == 0 {
// Convert current leaf into a branch // Convert current leaf into a branch
st.typ = branchNode st.typ = branchNode
p = st
st.children[0] = nil st.children[0] = nil
p = st
} else { } else {
// Convert current node into an ext, // Convert current node into an ext,
// and insert a child branch node. // and insert a child branch node.
@ -307,9 +314,7 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
st.val = nil st.val = nil
case emptyNode: /* Empty */ case emptyNode: /* Empty */
st.typ = leafNode *st = *newLeaf(key, value)
st.key = append(st.key, key...) // deep-copy the key as it's volatile
st.val = value
case hashedNode: case hashedNode:
panic("trying to insert into hash") panic("trying to insert into hash")
@ -393,18 +398,23 @@ func (t *StackTrie) hash(st *stNode, path []byte) {
st.typ = hashedNode st.typ = hashedNode
st.key = st.key[:0] st.key = st.key[:0]
st.val = nil // Release reference to potentially externally held slice. // Release reference to value slice which is exclusively owned
// by stackTrie itself.
if cap(st.val) > 0 && t.vPool != nil {
t.vPool.put(st.val)
}
st.val = nil
// Skip committing the non-root node if the size is smaller than 32 bytes // Skip committing the non-root node if the size is smaller than 32 bytes
// as tiny nodes are always embedded in their parent except root node. // as tiny nodes are always embedded in their parent except root node.
if len(blob) < 32 && len(path) > 0 { if len(blob) < 32 && len(path) > 0 {
st.val = bPool.GetWithSize(len(blob)) st.val = bPool.getWithSize(len(blob))
copy(st.val, blob) copy(st.val, blob)
return return
} }
// Write the hash to the 'val'. We allocate a new val here to not mutate // Write the hash to the 'val'. We allocate a new val here to not mutate
// input values. // input values.
st.val = bPool.GetWithSize(32) st.val = bPool.getWithSize(32)
t.h.hashDataTo(st.val, blob) t.h.hashDataTo(st.val, blob)
// Invoke the callback it's provided. Notably, the path and blob slices are // Invoke the callback it's provided. Notably, the path and blob slices are

View file

@ -19,6 +19,7 @@ package trie
import ( import (
"errors" "errors"
"fmt" "fmt"
"slices"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -553,7 +554,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
} }
children = []childNode{{ children = []childNode{{
node: node.Val, node: node.Val,
path: append(append([]byte(nil), req.path...), key...), path: slices.Concat(req.path, key),
}} }}
// Mark all internal nodes between shortNode and its **in disk** // Mark all internal nodes between shortNode and its **in disk**
// child as invalid. This is essential in the case of path mode // child as invalid. This is essential in the case of path mode
@ -595,7 +596,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
if node.Children[i] != nil { if node.Children[i] != nil {
children = append(children, childNode{ children = append(children, childNode{
node: node.Children[i], node: node.Children[i],
path: append(append([]byte(nil), req.path...), byte(i)), path: append(slices.Clone(req.path), byte(i)),
}) })
} }
} }

View file

@ -784,8 +784,8 @@ func (t *Trie) Witness() map[string][]byte {
return t.prevalueTracer.Values() return t.prevalueTracer.Values()
} }
// Reset drops the referenced root node and cleans all internal state. // reset drops the referenced root node and cleans all internal state.
func (t *Trie) Reset() { func (t *Trie) reset() {
t.root = nil t.root = nil
t.owner = common.Hash{} t.owner = common.Hash{}
t.unhashed = 0 t.unhashed = 0

View file

@ -326,7 +326,7 @@ func TestReplication(t *testing.T) {
updateString(trie2, val.k, val.v) updateString(trie2, val.k, val.v)
} }
if trie2.Hash() != hash { if trie2.Hash() != hash {
t.Errorf("root failure. expected %x got %x", hash, hash) t.Errorf("root failure. expected %x got %x", hash, trie2.Hash())
} }
} }

View file

@ -66,7 +66,7 @@ func TestVerkleTreeReadWrite(t *testing.T) {
} }
for key, val := range storages[addr] { for key, val := range storages[addr] {
if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil { if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
t.Fatalf("Failed to update account, %v", err) t.Fatalf("Failed to update storage, %v", err)
} }
} }
} }
@ -107,7 +107,7 @@ func TestVerkleRollBack(t *testing.T) {
} }
for key, val := range storages[addr] { for key, val := range storages[addr] {
if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil { if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
t.Fatalf("Failed to update account, %v", err) t.Fatalf("Failed to update storage, %v", err)
} }
} }
hash := crypto.Keccak256Hash(code) hash := crypto.Keccak256Hash(code)

Some files were not shown because too many files have changed in this diff Show more