From 01d0ce0bf156e52a62699b977288aecfa0366833 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 30 Sep 2025 10:05:43 +0800 Subject: [PATCH 001/277] params: add blob config information in the banner (#32771) Extend the chain banner with blob config information. Co-authored-by: Felix Lange --- params/config.go | 61 +++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/params/config.go b/params/config.go index b441d60661..4e885cbdd4 100644 --- a/params/config.go +++ b/params/config.go @@ -618,34 +618,32 @@ func (c *ChainConfig) Description() string { // makes sense for mainnet should be optional at printing to avoid bloating // the output for testnets and private networks. banner += "Pre-Merge hard forks (block based):\n" - banner += fmt.Sprintf(" - Homestead: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/homestead/__init__.py.html)\n", c.HomesteadBlock) + banner += fmt.Sprintf(" - Homestead: #%-8v\n", c.HomesteadBlock) if c.DAOForkBlock != nil { - banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/dao_fork/__init__.py.html)\n", c.DAOForkBlock) + banner += fmt.Sprintf(" - DAO Fork: #%-8v\n", c.DAOForkBlock) } - banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/tangerine_whistle/__init__.py.html)\n", c.EIP150Block) - banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Byzantium: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/byzantium/__init__.py.html)\n", c.ByzantiumBlock) - banner += fmt.Sprintf(" - Constantinople: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.ConstantinopleBlock) - banner += fmt.Sprintf(" - Petersburg: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.PetersburgBlock) - banner += fmt.Sprintf(" - Istanbul: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/istanbul/__init__.py.html)\n", c.IstanbulBlock) + banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v\n", c.EIP150Block) + banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v\n", c.EIP155Block) + banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v\n", c.EIP155Block) + banner += fmt.Sprintf(" - Byzantium: #%-8v\n", c.ByzantiumBlock) + banner += fmt.Sprintf(" - Constantinople: #%-8v\n", c.ConstantinopleBlock) + banner += fmt.Sprintf(" - Petersburg: #%-8v\n", c.PetersburgBlock) + banner += fmt.Sprintf(" - Istanbul: #%-8v\n", c.IstanbulBlock) if c.MuirGlacierBlock != nil { - banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/muir_glacier/__init__.py.html)\n", c.MuirGlacierBlock) + banner += fmt.Sprintf(" - Muir Glacier: #%-8v\n", c.MuirGlacierBlock) } - banner += fmt.Sprintf(" - Berlin: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/berlin/__init__.py.html)\n", c.BerlinBlock) - banner += fmt.Sprintf(" - London: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/london/__init__.py.html)\n", c.LondonBlock) + banner += fmt.Sprintf(" - Berlin: #%-8v\n", c.BerlinBlock) + banner += fmt.Sprintf(" - London: #%-8v\n", c.LondonBlock) if c.ArrowGlacierBlock != nil { - banner += fmt.Sprintf(" - Arrow Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/arrow_glacier/__init__.py.html)\n", c.ArrowGlacierBlock) + banner += fmt.Sprintf(" - Arrow Glacier: #%-8v\n", c.ArrowGlacierBlock) } if c.GrayGlacierBlock != nil { - banner += fmt.Sprintf(" - Gray Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/gray_glacier/__init__.py.html)\n", c.GrayGlacierBlock) + banner += fmt.Sprintf(" - Gray Glacier: #%-8v\n", c.GrayGlacierBlock) } banner += "\n" // Add a special section for the merge as it's non-obvious banner += "Merge configured:\n" - banner += " - Hard-fork specification: https://ethereum.github.io/execution-specs/src/ethereum/forks/paris/__init__.py.html\n" - banner += " - Network known to be merged\n" banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty) if c.MergeNetsplitBlock != nil { banner += fmt.Sprintf(" - Merge netsplit block: #%-8v\n", c.MergeNetsplitBlock) @@ -655,38 +653,39 @@ func (c *ChainConfig) Description() string { // Create a list of forks post-merge banner += "Post-Merge hard forks (timestamp based):\n" if c.ShanghaiTime != nil { - banner += fmt.Sprintf(" - Shanghai: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/shanghai/__init__.py.html)\n", *c.ShanghaiTime) + banner += fmt.Sprintf(" - Shanghai: @%-10v\n", *c.ShanghaiTime) } if c.CancunTime != nil { - banner += fmt.Sprintf(" - Cancun: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/cancun/__init__.py.html)\n", *c.CancunTime) + banner += fmt.Sprintf(" - Cancun: @%-10v blob: (%s)\n", *c.CancunTime, c.BlobScheduleConfig.Cancun) } if c.PragueTime != nil { - banner += fmt.Sprintf(" - Prague: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/prague/__init__.py.html)\n", *c.PragueTime) + banner += fmt.Sprintf(" - Prague: @%-10v blob: (%s)\n", *c.PragueTime, c.BlobScheduleConfig.Prague) } if c.OsakaTime != nil { - banner += fmt.Sprintf(" - Osaka: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/osaka/__init__.py.html)\n", *c.OsakaTime) + banner += fmt.Sprintf(" - Osaka: @%-10v blob: (%s)\n", *c.OsakaTime, c.BlobScheduleConfig.Osaka) } if c.BPO1Time != nil { - banner += fmt.Sprintf(" - BPO1: @%-10v\n", *c.BPO1Time) + banner += fmt.Sprintf(" - BPO1: @%-10v blob: (%s)\n", *c.BPO1Time, c.BlobScheduleConfig.BPO1) } if c.BPO2Time != nil { - banner += fmt.Sprintf(" - BPO2: @%-10v\n", *c.BPO2Time) + banner += fmt.Sprintf(" - BPO2: @%-10v blob: (%s)\n", *c.BPO2Time, c.BlobScheduleConfig.BPO2) } if c.BPO3Time != nil { - banner += fmt.Sprintf(" - BPO3: @%-10v\n", *c.BPO3Time) + banner += fmt.Sprintf(" - BPO3: @%-10v blob: (%s)\n", *c.BPO3Time, c.BlobScheduleConfig.BPO3) } if c.BPO4Time != nil { - banner += fmt.Sprintf(" - BPO4: @%-10v\n", *c.BPO4Time) + banner += fmt.Sprintf(" - BPO4: @%-10v blob: (%s)\n", *c.BPO4Time, c.BlobScheduleConfig.BPO4) } if c.BPO5Time != nil { - banner += fmt.Sprintf(" - BPO5: @%-10v\n", *c.BPO5Time) + banner += fmt.Sprintf(" - BPO5: @%-10v blob: (%s)\n", *c.BPO5Time, c.BlobScheduleConfig.BPO5) } if c.AmsterdamTime != nil { - banner += fmt.Sprintf(" - Amsterdam: @%-10v\n", *c.AmsterdamTime) + banner += fmt.Sprintf(" - Amsterdam: @%-10v blob: (%s)\n", *c.AmsterdamTime, c.BlobScheduleConfig.Amsterdam) } if c.VerkleTime != nil { - banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) + banner += fmt.Sprintf(" - Verkle: @%-10v blob: (%s)\n", *c.VerkleTime, c.BlobScheduleConfig.Verkle) } + banner += fmt.Sprintf("\nAll fork specifications can be found at https://ethereum.github.io/execution-specs/src/ethereum/forks/\n") return banner } @@ -697,6 +696,14 @@ type BlobConfig struct { UpdateFraction uint64 `json:"baseFeeUpdateFraction"` } +// String implement fmt.Stringer, returning string format blob config. +func (bc *BlobConfig) String() string { + if bc == nil { + return "nil" + } + return fmt.Sprintf("target: %d, max: %d, fraction: %d", bc.Target, bc.Max, bc.UpdateFraction) +} + // BlobScheduleConfig determines target and max number of blobs allow per fork. type BlobScheduleConfig struct { Cancun *BlobConfig `json:"cancun,omitempty"` From c1e9d78f1f8339b8dafed994530fb6fd231c48b0 Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Tue, 30 Sep 2025 05:07:54 +0300 Subject: [PATCH 002/277] core/txpool: remove unused signer field from TxPool (#32787) The TxPool.signer field was never read and each subpool (legacy/blob) maintains its own signer instance. This field remained after txpool refactoring into subpools and is dead code. Removing it reduces confusion and simplifies the constructor. --- core/txpool/txpool.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index b5470cd7fc..437861efca 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -65,7 +65,6 @@ type BlockChain interface { type TxPool struct { subpools []SubPool // List of subpools for specialized transaction handling chain BlockChain - signer types.Signer stateLock sync.RWMutex // The lock for protecting state instance state *state.StateDB // Current state at the blockchain head @@ -98,7 +97,6 @@ func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) { pool := &TxPool{ subpools: subpools, chain: chain, - signer: types.LatestSigner(chain.Config()), state: statedb, quit: make(chan chan error), term: make(chan struct{}), From 2037c53e7a0104b33b996ef4c705a1928df207a4 Mon Sep 17 00:00:00 2001 From: Galoretka Date: Tue, 30 Sep 2025 05:11:09 +0300 Subject: [PATCH 003/277] core/state: correct expected value in TestMessageCallGas (#32780) --- core/state/access_events_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go index 5e1fee767c..e80859a0b4 100644 --- a/core/state/access_events_test.go +++ b/core/state/access_events_test.go @@ -131,7 +131,7 @@ func TestMessageCallGas(t *testing.T) { } gas = ae.CodeHashGas(testAddr, false, math.MaxUint64, false) if gas != params.WitnessChunkReadCost { - t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) } // Check warm read cost From 6f8e28b4aab10c07fffef02823810a1dfaa06617 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 30 Sep 2025 12:50:20 +0200 Subject: [PATCH 004/277] go.mod, cmd/keeper/go.mod: upgrade victoria metrics dependency (#32720) This is required for geth to compile to WASM. --- cmd/keeper/go.mod | 4 ++-- cmd/keeper/go.sum | 14 ++++---------- go.mod | 4 ++-- go.sum | 10 ++++------ 4 files changed, 12 insertions(+), 20 deletions(-) diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index d1649da43f..16094d16b1 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -9,7 +9,7 @@ require ( require ( github.com/StackExchange/wmi v1.2.1 // indirect - github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/VictoriaMetrics/fastcache v1.13.0 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/gnark-crypto v0.18.0 // indirect @@ -24,7 +24,7 @@ require ( github.com/ferranbt/fastssz v0.1.4 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/gofrs/flock v0.12.1 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v1.0.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.3.2 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index e3bc204ba8..3eaef469dc 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -2,15 +2,14 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= -github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= +github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= @@ -31,7 +30,6 @@ github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= @@ -61,9 +59,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= @@ -112,8 +109,6 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= @@ -134,7 +129,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= diff --git a/go.mod b/go.mod index 03cdf3bb2d..c91cc81d21 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.0 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.2 - github.com/VictoriaMetrics/fastcache v1.12.2 + github.com/VictoriaMetrics/fastcache v1.13.0 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 @@ -31,7 +31,7 @@ require ( github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gofrs/flock v0.12.1 github.com/golang-jwt/jwt/v4 v4.5.2 - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb + github.com/golang/snappy v1.0.0 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 diff --git a/go.sum b/go.sum index 764cfdb668..779bcde846 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= -github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= +github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= @@ -52,7 +52,6 @@ github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3M github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= @@ -165,8 +164,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -450,7 +449,6 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= From 9986270fbf78d4c3ccf03c602168f3427fc96cd4 Mon Sep 17 00:00:00 2001 From: Yuan-Yao Sung Date: Tue, 30 Sep 2025 19:30:10 +0800 Subject: [PATCH 005/277] eth/catalyst: extend payloadVersion support to osaka/post-osaka forks (#32800) This PR updates the `payloadVersion` function in `simulated_beacon.go` to handle additional following forks used during development and testing phases after Osaka. This change ensures that the simulated beacon correctly resolves the payload version for these forks, enabling consistent and valid execution payload handling during local testing or simulation. --- eth/catalyst/simulated_beacon.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index 0642d6a1ad..c10990c233 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -100,7 +100,7 @@ type SimulatedBeacon struct { func payloadVersion(config *params.ChainConfig, time uint64) engine.PayloadVersion { switch config.LatestFork(time) { - case forks.Prague, forks.Cancun: + case forks.BPO5, forks.BPO4, forks.BPO3, forks.BPO2, forks.BPO1, forks.Osaka, forks.Prague, forks.Cancun: return engine.PayloadV3 case forks.Paris, forks.Shanghai: return engine.PayloadV2 From f9756bb8857706e64fdaf709a092ac1acc7b45e9 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Tue, 30 Sep 2025 19:30:47 +0800 Subject: [PATCH 006/277] p2p: fix error message in test (#32804) --- p2p/enode/iter_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/enode/iter_test.go b/p2p/enode/iter_test.go index 577f9c2825..922e1cde19 100644 --- a/p2p/enode/iter_test.go +++ b/p2p/enode/iter_test.go @@ -45,7 +45,7 @@ func TestReadNodesCycle(t *testing.T) { nodes := ReadNodes(iter, 10) checkNodes(t, nodes, 3) if iter.count != 10 { - t.Fatalf("%d calls to Next, want %d", iter.count, 100) + t.Fatalf("%d calls to Next, want %d", iter.count, 10) } } From bb00d26bbe6bdee45d8d0fe231a326e345d53a68 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Tue, 30 Sep 2025 19:31:42 +0800 Subject: [PATCH 007/277] signer/core: fix error message in test (#32807) --- signer/core/api_test.go | 2 +- signer/core/signed_data_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/signer/core/api_test.go b/signer/core/api_test.go index 0e16a1b7fd..ed4fdc5096 100644 --- a/signer/core/api_test.go +++ b/signer/core/api_test.go @@ -264,7 +264,7 @@ func TestSignTx(t *testing.T) { t.Errorf("Expected nil-response, got %v", res) } if err != keystore.ErrDecrypt { - t.Errorf("Expected ErrLocked! %v", err) + t.Errorf("Expected ErrDecrypt! %v", err) } control.approveCh <- "No way" res, err = api.SignTransaction(t.Context(), tx, &methodSig) diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go index 001f6b6838..8455aaf9c5 100644 --- a/signer/core/signed_data_test.go +++ b/signer/core/signed_data_test.go @@ -202,7 +202,7 @@ func TestSignData(t *testing.T) { t.Errorf("Expected nil-data, got %x", signature) } if err != keystore.ErrDecrypt { - t.Errorf("Expected ErrLocked! '%v'", err) + t.Errorf("Expected ErrDecrypt! '%v'", err) } control.approveCh <- "No way" signature, err = api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) From 1487a8577d1566497e161a04f8cee3204d4b3d36 Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Tue, 30 Sep 2025 19:33:36 +0800 Subject: [PATCH 008/277] params: fix banner message (#32796) --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 4e885cbdd4..e796d75535 100644 --- a/params/config.go +++ b/params/config.go @@ -624,7 +624,7 @@ func (c *ChainConfig) Description() string { } banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v\n", c.EIP150Block) banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v\n", c.EIP155Block) - banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v\n", c.EIP155Block) + banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v\n", c.EIP158Block) banner += fmt.Sprintf(" - Byzantium: #%-8v\n", c.ByzantiumBlock) banner += fmt.Sprintf(" - Constantinople: #%-8v\n", c.ConstantinopleBlock) banner += fmt.Sprintf(" - Petersburg: #%-8v\n", c.PetersburgBlock) From 057667151b1940e7403a97bc9fcbc207cd5b5045 Mon Sep 17 00:00:00 2001 From: Martin HS Date: Wed, 1 Oct 2025 10:05:49 +0200 Subject: [PATCH 009/277] core/types, trie: reduce allocations in derivesha (#30747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alternative to #30746, potential follow-up to #30743 . This PR makes the stacktrie always copy incoming value buffers, and reuse them internally. Improvement in #30743: ``` goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/core/types cpu: 12th Gen Intel(R) Core(TM) i7-1270P │ derivesha.1 │ derivesha.2 │ │ sec/op │ sec/op vs base │ DeriveSha200/stack_trie-8 477.8µ ± 2% 430.0µ ± 12% -10.00% (p=0.000 n=10) │ derivesha.1 │ derivesha.2 │ │ B/op │ B/op vs base │ DeriveSha200/stack_trie-8 45.17Ki ± 0% 25.65Ki ± 0% -43.21% (p=0.000 n=10) │ derivesha.1 │ derivesha.2 │ │ allocs/op │ allocs/op vs base │ DeriveSha200/stack_trie-8 1259.0 ± 0% 232.0 ± 0% -81.57% (p=0.000 n=10) ``` This PR further enhances that: ``` goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/core/types cpu: 12th Gen Intel(R) Core(TM) i7-1270P │ derivesha.2 │ derivesha.3 │ │ sec/op │ sec/op vs base │ DeriveSha200/stack_trie-8 430.0µ ± 12% 423.6µ ± 13% ~ (p=0.739 n=10) │ derivesha.2 │ derivesha.3 │ │ B/op │ B/op vs base │ DeriveSha200/stack_trie-8 25.654Ki ± 0% 4.960Ki ± 0% -80.67% (p=0.000 n=10) │ derivesha.2 │ derivesha.3 │ │ allocs/op │ allocs/op vs base │ DeriveSha200/stack_trie-8 232.00 ± 0% 37.00 ± 0% -84.05% (p=0.000 n=10) ``` So the total derivesha-improvement over *both PRS* is: ``` goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/core/types cpu: 12th Gen Intel(R) Core(TM) i7-1270P │ derivesha.1 │ derivesha.3 │ │ sec/op │ sec/op vs base │ DeriveSha200/stack_trie-8 477.8µ ± 2% 423.6µ ± 13% -11.33% (p=0.015 n=10) │ derivesha.1 │ derivesha.3 │ │ B/op │ B/op vs base │ DeriveSha200/stack_trie-8 45.171Ki ± 0% 4.960Ki ± 0% -89.02% (p=0.000 n=10) │ derivesha.1 │ derivesha.3 │ │ allocs/op │ allocs/op vs base │ DeriveSha200/stack_trie-8 1259.00 ± 0% 37.00 ± 0% -97.06% (p=0.000 n=10) ``` Since this PR always copies the incoming value, it adds a little bit of a penalty on the previous insert-benchmark, which copied nothing (always passed the same empty slice as input) : ``` goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/trie cpu: 12th Gen Intel(R) Core(TM) i7-1270P │ stacktrie.7 │ stacktrie.10 │ │ sec/op │ sec/op vs base │ Insert100K-8 88.21m ± 34% 92.37m ± 31% ~ (p=0.280 n=10) │ stacktrie.7 │ stacktrie.10 │ │ B/op │ B/op vs base │ Insert100K-8 3.424Ki ± 3% 4.581Ki ± 3% +33.80% (p=0.000 n=10) │ stacktrie.7 │ stacktrie.10 │ │ allocs/op │ allocs/op vs base │ Insert100K-8 22.00 ± 5% 26.00 ± 4% +18.18% (p=0.000 n=10) ``` --------- Co-authored-by: Gary Rong Co-authored-by: Felix Lange --- core/types/block.go | 2 +- core/types/hashing.go | 27 ++++++++----- core/types/hashing_test.go | 39 ++++++++++++------ internal/blocktest/test_hash.go | 5 ++- trie/bytepool.go | 51 ++++++++++++++++++++---- trie/list_hasher.go | 56 ++++++++++++++++++++++++++ trie/stacktrie.go | 70 +++++++++++++++++++-------------- trie/trie.go | 4 +- 8 files changed, 189 insertions(+), 65 deletions(-) create mode 100644 trie/list_hasher.go diff --git a/core/types/block.go b/core/types/block.go index da9614793a..b5b6468a13 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -240,7 +240,7 @@ type extblock struct { // // The receipt's bloom must already calculated for the block's bloom to be // correctly calculated. -func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher) *Block { +func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher ListHasher) *Block { if body == nil { body = &Body{} } diff --git a/core/types/hashing.go b/core/types/hashing.go index 3cc22d50d1..98fe64e15a 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// hasherPool holds LegacyKeccak256 hashers for rlpHash. +// hasherPool holds LegacyKeccak256 buffer for rlpHash. var hasherPool = sync.Pool{ New: func() interface{} { return crypto.NewKeccakState() }, } @@ -75,11 +75,17 @@ func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { return h } -// TrieHasher is the tool used to calculate the hash of derivable list. -// This is internal, do not use. -type TrieHasher interface { +// ListHasher defines the interface for computing the hash of a derivable list. +type ListHasher interface { + // Reset clears the internal state of the hasher, preparing it for reuse. Reset() - Update([]byte, []byte) error + + // Update inserts the given key-value pair into the hasher. + // The implementation must copy the provided slices, allowing the caller + // to safely modify them after the call returns. + Update(key []byte, value []byte) error + + // Hash computes and returns the final hash of all inserted key-value pairs. Hash() common.Hash } @@ -91,19 +97,20 @@ type DerivableList interface { EncodeIndex(int, *bytes.Buffer) } +// encodeForDerive encodes the element in the list at the position i into the buffer. func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { buf.Reset() list.EncodeIndex(i, buf) - // It's really unfortunate that we need to perform this copy. - // StackTrie holds onto the values until Hash is called, so the values - // written to it must not alias. - return common.CopyBytes(buf.Bytes()) + return buf.Bytes() } // DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header. -func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { +func DeriveSha(list DerivableList, hasher ListHasher) common.Hash { hasher.Reset() + // Allocate a buffer for value encoding. As the hasher is claimed that all + // supplied key value pairs will be copied by hasher and safe to reuse the + // encoding buffer. valueBuf := encodeBufferPool.Get().(*bytes.Buffer) defer encodeBufferPool.Put(valueBuf) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index 54adbc73e8..a7153bf09a 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -26,12 +26,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" ) func TestDeriveSha(t *testing.T) { @@ -40,7 +38,7 @@ func TestDeriveSha(t *testing.T) { t.Fatal(err) } for len(txs) < 1000 { - exp := types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(txs, trie.NewListHasher()) got := types.DeriveSha(txs, trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) @@ -76,30 +74,45 @@ func TestEIP2718DeriveSha(t *testing.T) { } } +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/core/types +// cpu: Apple M1 Pro +// BenchmarkDeriveSha200 +// BenchmarkDeriveSha200/std_trie +// BenchmarkDeriveSha200/std_trie-8 6754 174074 ns/op 80054 B/op 1926 allocs/op +// BenchmarkDeriveSha200/stack_trie +// BenchmarkDeriveSha200/stack_trie-8 7296 162675 ns/op 745 B/op 19 allocs/op func BenchmarkDeriveSha200(b *testing.B) { txs, err := genTxs(200) if err != nil { b.Fatal(err) } - var exp common.Hash - var got common.Hash + want := types.DeriveSha(txs, trie.NewListHasher()) + b.Run("std_trie", func(b *testing.B) { b.ReportAllocs() + var have common.Hash for b.Loop() { - exp = types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + have = types.DeriveSha(txs, trie.NewListHasher()) + } + if have != want { + b.Errorf("have %x want %x", have, want) } }) + st := trie.NewStackTrie(nil) b.Run("stack_trie", func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() + var have common.Hash for b.Loop() { - got = types.DeriveSha(txs, trie.NewStackTrie(nil)) + st.Reset() + have = types.DeriveSha(txs, st) + } + if have != want { + b.Errorf("have %x want %x", have, want) } }) - if got != exp { - b.Errorf("got %x exp %x", got, exp) - } } func TestFuzzDeriveSha(t *testing.T) { @@ -107,7 +120,7 @@ func TestFuzzDeriveSha(t *testing.T) { rndSeed := mrand.Int() for i := 0; i < 10; i++ { seed := rndSeed + i - exp := types.DeriveSha(newDummy(i), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(newDummy(i), trie.NewListHasher()) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { printList(t, newDummy(seed)) @@ -135,7 +148,7 @@ func TestDerivableList(t *testing.T) { }, } for i, tc := range tcs[1:] { - exp := types.DeriveSha(flatList(tc), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(flatList(tc), trie.NewListHasher()) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("case %d: got %x exp %x", i, got, exp) diff --git a/internal/blocktest/test_hash.go b/internal/blocktest/test_hash.go index 4d2b077e89..b3e7098e2b 100644 --- a/internal/blocktest/test_hash.go +++ b/internal/blocktest/test_hash.go @@ -23,6 +23,7 @@ package blocktest import ( + "bytes" "hash" "github.com/ethereum/go-ethereum/common" @@ -48,8 +49,8 @@ func (h *testHasher) Reset() { // Update updates the hash state with the given key and value. func (h *testHasher) Update(key, val []byte) error { - h.hasher.Write(key) - h.hasher.Write(val) + h.hasher.Write(bytes.Clone(key)) + h.hasher.Write(bytes.Clone(val)) return nil } diff --git a/trie/bytepool.go b/trie/bytepool.go index 4f9c5672fd..31be7ae749 100644 --- a/trie/bytepool.go +++ b/trie/bytepool.go @@ -32,8 +32,8 @@ func newBytesPool(sliceCap, nitems int) *bytesPool { } } -// Get returns a slice. Safe for concurrent use. -func (bp *bytesPool) Get() []byte { +// get returns a slice. Safe for concurrent use. +func (bp *bytesPool) get() []byte { select { case b := <-bp.c: return b @@ -42,18 +42,18 @@ func (bp *bytesPool) Get() []byte { } } -// GetWithSize returns a slice with specified byte slice size. -func (bp *bytesPool) GetWithSize(s int) []byte { - b := bp.Get() +// getWithSize returns a slice with specified byte slice size. +func (bp *bytesPool) getWithSize(s int) []byte { + b := bp.get() if cap(b) < s { return make([]byte, s) } return b[:s] } -// Put returns a slice to the pool. Safe for concurrent use. This method +// put returns a slice to the pool. Safe for concurrent use. This method // will ignore slices that are too small or too large (>3x the cap) -func (bp *bytesPool) Put(b []byte) { +func (bp *bytesPool) put(b []byte) { if c := cap(b); c < bp.w || c > 3*bp.w { return } @@ -62,3 +62,40 @@ func (bp *bytesPool) Put(b []byte) { default: } } + +// unsafeBytesPool is a pool for byte slices. It is not safe for concurrent use. +type unsafeBytesPool struct { + items [][]byte + w int +} + +// newUnsafeBytesPool creates a new unsafeBytesPool. The sliceCap sets the +// capacity of newly allocated slices, and the nitems determines how many +// items the pool will hold, at maximum. +func newUnsafeBytesPool(sliceCap, nitems int) *unsafeBytesPool { + return &unsafeBytesPool{ + items: make([][]byte, 0, nitems), + w: sliceCap, + } +} + +// Get returns a slice with pre-allocated space. +func (bp *unsafeBytesPool) get() []byte { + if len(bp.items) > 0 { + last := bp.items[len(bp.items)-1] + bp.items = bp.items[:len(bp.items)-1] + return last + } + return make([]byte, 0, bp.w) +} + +// put returns a slice to the pool. This method will ignore slices that are +// too small or too large (>3x the cap) +func (bp *unsafeBytesPool) put(b []byte) { + if c := cap(b); c < bp.w || c > 3*bp.w { + return + } + if len(bp.items) < cap(bp.items) { + bp.items = append(bp.items, b) + } +} diff --git a/trie/list_hasher.go b/trie/list_hasher.go new file mode 100644 index 0000000000..8f334f9901 --- /dev/null +++ b/trie/list_hasher.go @@ -0,0 +1,56 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common" +) + +// ListHasher is a wrapper of the Merkle-Patricia-Trie, which implements +// types.ListHasher. Compared to a Trie instance, the Update method of this +// type always deep-copies its input slices. +// +// This implementation is very inefficient in terms of memory allocation, +// compared with StackTrie. It exists only for correctness comparison purposes. +type ListHasher struct { + tr *Trie +} + +// NewListHasher initializes the list hasher. +func NewListHasher() *ListHasher { + return &ListHasher{ + tr: NewEmpty(nil), + } +} + +// Reset clears the internal state prepares the ListHasher for reuse. +func (h *ListHasher) Reset() { + h.tr.reset() +} + +// Update inserts a key-value pair into the trie. +func (h *ListHasher) Update(key []byte, value []byte) error { + key, value = bytes.Clone(key), bytes.Clone(value) + return h.tr.Update(key, value) +} + +// Hash computes the root hash of all inserted key-value pairs. +func (h *ListHasher) Hash() common.Hash { + return h.tr.Hash() +} diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 2b7366c3c5..18fe1eea78 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -28,7 +28,7 @@ import ( var ( stPool = sync.Pool{New: func() any { return new(stNode) }} bPool = newBytesPool(32, 100) - _ = types.TrieHasher((*StackTrie)(nil)) + _ = types.ListHasher((*StackTrie)(nil)) ) // OnTrieNode is a callback method invoked when a trie node is committed @@ -50,6 +50,7 @@ type StackTrie struct { onTrieNode OnTrieNode kBuf []byte // buf space used for hex-key during insertions pBuf []byte // buf space used for path during insertions + vPool *unsafeBytesPool } // NewStackTrie allocates and initializes an empty trie. The committed nodes @@ -61,6 +62,7 @@ func NewStackTrie(onTrieNode OnTrieNode) *StackTrie { onTrieNode: onTrieNode, kBuf: make([]byte, 64), pBuf: make([]byte, 64), + vPool: newUnsafeBytesPool(300, 20), } } @@ -74,6 +76,9 @@ func (t *StackTrie) grow(key []byte) { } // Update inserts a (key, value) pair into the stack trie. +// +// Note the supplied key value pair is copied and managed internally, +// they are safe to be modified after this method returns. func (t *StackTrie) Update(key, value []byte) error { if len(value) == 0 { return errors.New("trying to insert empty (deletion)") @@ -88,7 +93,14 @@ func (t *StackTrie) Update(key, value []byte) error { } else { t.last = append(t.last[:0], k...) // reuse key slice } - t.insert(t.root, k, value, t.pBuf[:0]) + vBuf := t.vPool.get() + if cap(vBuf) < len(value) { + vBuf = common.CopyBytes(value) + } else { + vBuf = vBuf[:len(value)] + copy(vBuf, value) + } + t.insert(t.root, k, vBuf, t.pBuf[:0]) return nil } @@ -108,14 +120,16 @@ func (t *StackTrie) TrieKey(key []byte) []byte { // stNode represents a node within a StackTrie type stNode struct { typ uint8 // node type (as in branch, ext, leaf) - key []byte // key chunk covered by this (leaf|ext) node - val []byte // value contained by this node if it's a leaf - children [16]*stNode // list of children (for branch and exts) + key []byte // exclusive owned key chunk covered by this (leaf|ext) node + val []byte // exclusive owned value contained by this node (leaf: value; hash: hash) + children [16]*stNode // list of children (for branch and ext) } -// newLeaf constructs a leaf node with provided node key and value. The key -// will be deep-copied in the function and safe to modify afterwards, but -// value is not. +// newLeaf constructs a leaf node with provided node key and value. +// +// The key is deep-copied within the function, so it can be safely modified +// afterwards. The value is retained directly without copying, as it is +// exclusively owned by the stackTrie. func newLeaf(key, val []byte) *stNode { st := stPool.Get().(*stNode) st.typ = leafNode @@ -146,9 +160,9 @@ const ( func (n *stNode) reset() *stNode { if n.typ == hashedNode { // On hashnodes, we 'own' the val: it is guaranteed to be not held - // by external caller. Hence, when we arrive here, we can put it back - // into the pool - bPool.Put(n.val) + // by external caller. Hence, when we arrive here, we can put it + // back into the pool + bPool.put(n.val) } n.key = n.key[:0] n.val = nil @@ -172,11 +186,6 @@ func (n *stNode) getDiffIndex(key []byte) int { } // Helper function to that inserts a (key, value) pair into the trie. -// -// - The key is not retained by this method, but always copied if needed. -// - The value is retained by this method, as long as the leaf that it represents -// remains unhashed. However: it is never modified. -// - The path is not retained by this method. func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { switch st.typ { case branchNode: /* Branch */ @@ -235,16 +244,14 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { } var p *stNode if diffidx == 0 { - // the break is on the first byte, so - // the current node is converted into - // a branch node. + // the break is on the first byte, so the current node + // is converted into a branch node. st.children[0] = nil - p = st st.typ = branchNode + p = st } else { - // the common prefix is at least one byte - // long, insert a new intermediate branch - // node. + // the common prefix is at least one byte long, insert + // a new intermediate branch node. st.children[0] = stPool.Get().(*stNode) st.children[0].typ = branchNode p = st.children[0] @@ -280,8 +287,8 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { if diffidx == 0 { // Convert current leaf into a branch st.typ = branchNode - p = st st.children[0] = nil + p = st } else { // Convert current node into an ext, // and insert a child branch node. @@ -307,9 +314,7 @@ func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) { st.val = nil case emptyNode: /* Empty */ - st.typ = leafNode - st.key = append(st.key, key...) // deep-copy the key as it's volatile - st.val = value + *st = *newLeaf(key, value) case hashedNode: panic("trying to insert into hash") @@ -393,18 +398,23 @@ func (t *StackTrie) hash(st *stNode, path []byte) { st.typ = hashedNode st.key = st.key[:0] - st.val = nil // Release reference to potentially externally held slice. + // Release reference to value slice which is exclusively owned + // by stackTrie itself. + if cap(st.val) > 0 && t.vPool != nil { + t.vPool.put(st.val) + } + st.val = nil // Skip committing the non-root node if the size is smaller than 32 bytes // as tiny nodes are always embedded in their parent except root node. if len(blob) < 32 && len(path) > 0 { - st.val = bPool.GetWithSize(len(blob)) + st.val = bPool.getWithSize(len(blob)) copy(st.val, blob) return } // Write the hash to the 'val'. We allocate a new val here to not mutate // input values. - st.val = bPool.GetWithSize(32) + st.val = bPool.getWithSize(32) t.h.hashDataTo(st.val, blob) // Invoke the callback it's provided. Notably, the path and blob slices are diff --git a/trie/trie.go b/trie/trie.go index 36cc732ee8..1ef2c2f1a6 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -784,8 +784,8 @@ func (t *Trie) Witness() map[string][]byte { return t.prevalueTracer.Values() } -// Reset drops the referenced root node and cleans all internal state. -func (t *Trie) Reset() { +// reset drops the referenced root node and cleans all internal state. +func (t *Trie) reset() { t.root = nil t.owner = common.Hash{} t.unhashed = 0 From f0dc47aae393abe0bb7b084345daf9108ad8897a Mon Sep 17 00:00:00 2001 From: zzzckck <152148891+zzzckck@users.noreply.github.com> Date: Thu, 2 Oct 2025 18:43:31 +0800 Subject: [PATCH 010/277] p2p/enode: fix discovery AyncFilter deadlock on shutdown (#32572) Description: We found a occasionally node hang issue on BSC, I think Geth may also have the issue, so pick the fix patch here. The fix on BSC repo: https://github.com/bnb-chain/bsc/pull/3347 When the hang occurs, there are two routines stuck. - routine 1: AsyncFilter(...) On node start, it will run part of the DiscoveryV4 protocol, which could take considerable time, here is its hang callstack: ``` goroutine 9711 [chan receive]: // this routine was stuck on read channel: `<-f.slots` github.com/ethereum/go-ethereum/p2p/enode.AsyncFilter.func1() github.com/ethereum/go-ethereum/p2p/enode/iter.go:206 +0x125 created by github.com/ethereum/go-ethereum/p2p/enode.AsyncFilter in goroutine 1 github.com/ethereum/go-ethereum/p2p/enode/iter.go:192 +0x205 ``` - Routine 2: Node Stop It is the main routine to shutdown the process, but it got stuck when it tries to shutdown the discovery components, as it tries to drain the channel of `<-f.slots`, but the extra 1 slot will never have chance to be resumed. ``` goroutine 11796 [chan receive]: github.com/ethereum/go-ethereum/p2p/enode.(*asyncFilterIter).Close.func1() github.com/ethereum/go-ethereum/p2p/enode/iter.go:248 +0x5c sync.(*Once).doSlow(0xc032a97cb8?, 0xc032a97d18?) sync/once.go:78 +0xab sync.(*Once).Do(...) sync/once.go:69 github.com/ethereum/go-ethereum/p2p/enode.(*asyncFilterIter).Close(0xc092ff8d00?) github.com/ethereum/go-ethereum/p2p/enode/iter.go:244 +0x36 github.com/ethereum/go-ethereum/p2p/enode.(*bufferIter).Close.func1() github.com/ethereum/go-ethereum/p2p/enode/iter.go:299 +0x24 sync.(*Once).doSlow(0x11a175f?, 0x2bfe63e?) sync/once.go:78 +0xab sync.(*Once).Do(...) sync/once.go:69 github.com/ethereum/go-ethereum/p2p/enode.(*bufferIter).Close(0x30?) github.com/ethereum/go-ethereum/p2p/enode/iter.go:298 +0x36 github.com/ethereum/go-ethereum/p2p/enode.(*FairMix).Close(0xc0004bfea0) github.com/ethereum/go-ethereum/p2p/enode/iter.go:379 +0xb7 github.com/ethereum/go-ethereum/eth.(*Ethereum).Stop(0xc000997b00) github.com/ethereum/go-ethereum/eth/backend.go:960 +0x4a github.com/ethereum/go-ethereum/node.(*Node).stopServices(0xc0001362a0, {0xc012e16330, 0x1, 0xc000111410?}) github.com/ethereum/go-ethereum/node/node.go:333 +0xb3 github.com/ethereum/go-ethereum/node.(*Node).Close(0xc0001362a0) github.com/ethereum/go-ethereum/node/node.go:263 +0x167 created by github.com/ethereum/go-ethereum/cmd/utils.StartNode.func1.1 in goroutine 9729 github.com/ethereum/go-ethereum/cmd/utils/cmd.go:101 +0x78 ``` The rootcause of the hang is caused by the extra 1 slot, which was designed to make sure the routines in `AsyncFilter(...)` can be finished. This PR fixes it by making sure the extra 1 shot can always be resumed when node shutdown. --- p2p/enode/iter.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go index 4890321f49..265d8648de 100644 --- a/p2p/enode/iter.go +++ b/p2p/enode/iter.go @@ -178,7 +178,7 @@ type AsyncFilterFunc func(context.Context, *Node) *Node func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { f := &asyncFilterIter{ it: ensureSourceIter(it), - slots: make(chan struct{}, workers+1), + slots: make(chan struct{}, workers+1), // extra 1 slot to make sure all the goroutines can be completed passed: make(chan iteratorItem), } for range cap(f.slots) { @@ -193,6 +193,9 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { return case <-f.slots: } + defer func() { + f.slots <- struct{}{} // the iterator has ended + }() // read from the iterator and start checking nodes in parallel // when a node is checked, it will be sent to the passed channel // and the slot will be released @@ -201,7 +204,11 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { nodeSource := f.it.NodeSource() // check the node async, in a separate goroutine - <-f.slots + select { + case <-ctx.Done(): + return + case <-f.slots: + } go func() { if nn := check(ctx, node); nn != nil { item := iteratorItem{nn, nodeSource} @@ -213,8 +220,6 @@ func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { f.slots <- struct{}{} }() } - // the iterator has ended - f.slots <- struct{}{} }() return f From fc8c8c1314a0fafc56297332729c2c00372e837e Mon Sep 17 00:00:00 2001 From: hero5512 Date: Thu, 2 Oct 2025 08:34:06 -0400 Subject: [PATCH 011/277] core: refactor StateProcessor to accept ChainContext interface (#32739) This pr implements https://github.com/ethereum/go-ethereum/issues/32733 to make StateProcessor more customisable. ## Compatibility notes This introduces a breaking change to users using geth EVM as a library. The `NewStateProcessor` function now takes one parameter which has the chainConfig embedded instead of 2 parameters. --- core/blockchain.go | 2 +- core/evm.go | 9 ++------- core/state_processor.go | 28 ++++++++++++++++------------ core/stateless.go | 2 +- core/vm/runtime/runtime_test.go | 12 ++++++++++++ eth/tracers/api.go | 1 + eth/tracers/api_test.go | 4 ++++ internal/ethapi/api.go | 16 ++++++++++++++++ internal/ethapi/simulate.go | 20 ++++++++++++++++++++ tests/state_test_util.go | 3 +++ 10 files changed, 76 insertions(+), 21 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 30f3da3004..71eb4c45a2 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -394,7 +394,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, bc.statedb = state.NewDatabase(bc.triedb, nil) bc.validator = NewBlockValidator(chainConfig, bc) bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc) - bc.processor = NewStateProcessor(chainConfig, bc.hc) + bc.processor = NewStateProcessor(bc.hc) genesisHeader := bc.GetHeaderByNumber(0) if genesisHeader == nil { diff --git a/core/evm.go b/core/evm.go index 41b4e6ac58..18d940fdd2 100644 --- a/core/evm.go +++ b/core/evm.go @@ -25,21 +25,16 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) // ChainContext supports retrieving headers and consensus parameters from the // current blockchain to be used during transaction processing. type ChainContext interface { + consensus.ChainHeaderReader + // Engine retrieves the chain's consensus engine. Engine() consensus.Engine - - // GetHeader returns the header corresponding to the hash/number argument pair. - GetHeader(common.Hash, uint64) *types.Header - - // Config returns the chain's configuration. - Config() *params.ChainConfig } // NewEVMBlockContext creates a new context for use in the EVM. diff --git a/core/state_processor.go b/core/state_processor.go index 4a5e69ca6e..b66046f501 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -35,18 +35,21 @@ import ( // // StateProcessor implements Processor. type StateProcessor struct { - config *params.ChainConfig // Chain configuration options - chain *HeaderChain // Canonical header chain + chain ChainContext // Chain context interface } // NewStateProcessor initialises a new StateProcessor. -func NewStateProcessor(config *params.ChainConfig, chain *HeaderChain) *StateProcessor { +func NewStateProcessor(chain ChainContext) *StateProcessor { return &StateProcessor{ - config: config, - chain: chain, + chain: chain, } } +// chainConfig returns the chain configuration. +func (p *StateProcessor) chainConfig() *params.ChainConfig { + return p.chain.Config() +} + // Process processes the state changes according to the Ethereum rules by running // the transaction messages using the statedb and applying any rewards to both // the processor (coinbase) and any included uncles. @@ -56,6 +59,7 @@ func NewStateProcessor(config *params.ChainConfig, chain *HeaderChain) *StatePro // transactions failed to execute due to insufficient gas it will return an error. func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error) { var ( + config = p.chainConfig() receipts types.Receipts usedGas = new(uint64) header = block.Header() @@ -66,12 +70,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) // Mutate the block and state according to any hard-fork specs - if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { + if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(block.Number()) == 0 { misc.ApplyDAOHardFork(statedb) } var ( context vm.BlockContext - signer = types.MakeSigner(p.config, header.Number, header.Time) + signer = types.MakeSigner(config, header.Number, header.Time) ) // Apply pre-execution system calls. @@ -80,12 +84,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg tracingStateDB = state.NewHookedState(statedb, hooks) } context = NewEVMBlockContext(header, p.chain, nil) - evm := vm.NewEVM(context, tracingStateDB, p.config, cfg) + evm := vm.NewEVM(context, tracingStateDB, config, cfg) if beaconRoot := block.BeaconRoot(); beaconRoot != nil { ProcessBeaconBlockRoot(*beaconRoot, evm) } - if p.config.IsPrague(block.Number(), block.Time()) || p.config.IsVerkle(block.Number(), block.Time()) { + if config.IsPrague(block.Number(), block.Time()) || config.IsVerkle(block.Number(), block.Time()) { ProcessParentBlockHash(block.ParentHash(), evm) } @@ -106,10 +110,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Read requests if Prague is enabled. var requests [][]byte - if p.config.IsPrague(block.Number(), block.Time()) { + if config.IsPrague(block.Number(), block.Time()) { requests = [][]byte{} // EIP-6110 - if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil { + if err := ParseDepositLogs(&requests, allLogs, config); err != nil { return nil, fmt.Errorf("failed to parse deposit logs: %w", err) } // EIP-7002 @@ -123,7 +127,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - p.chain.engine.Finalize(p.chain, header, tracingStateDB, block.Body()) + p.chain.Engine().Finalize(p.chain, header, tracingStateDB, block.Body()) return &ProcessResult{ Receipts: receipts, diff --git a/core/stateless.go b/core/stateless.go index d21a62b4a5..b20c909da6 100644 --- a/core/stateless.go +++ b/core/stateless.go @@ -62,7 +62,7 @@ func ExecuteStateless(config *params.ChainConfig, vmconfig vm.Config, block *typ headerCache: lru.NewCache[common.Hash, *types.Header](256), engine: beacon.New(ethash.NewFaker()), } - processor := NewStateProcessor(config, chain) + processor := NewStateProcessor(chain) validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block // Run the stateless blocks processing and self-validate certain fields diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index ddd32df039..a001d81623 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -312,6 +312,18 @@ func (d *dummyChain) Config() *params.ChainConfig { return nil } +func (d *dummyChain) CurrentHeader() *types.Header { + return nil +} + +func (d *dummyChain) GetHeaderByNumber(n uint64) *types.Header { + return d.GetHeader(common.Hash{}, n) +} + +func (d *dummyChain) GetHeaderByHash(h common.Hash) *types.Header { + return nil +} + // TestBlockhash tests the blockhash operation. It's a bit special, since it internally // requires access to a chain reader. func TestBlockhash(t *testing.T) { diff --git a/eth/tracers/api.go b/eth/tracers/api.go index a05b7a7a4a..aebeb48463 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -80,6 +80,7 @@ type StateReleaseFunc func() type Backend interface { HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) + CurrentHeader() *types.Header BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 39c39ff05d..4173d2a791 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -142,6 +142,10 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } +func (b *testBackend) CurrentHeader() *types.Header { + return b.chain.CurrentHeader() +} + // teardown releases the associated resources. func (b *testBackend) teardown() { b.chain.Stop() diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 2432bb70b8..c3f267027c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -636,6 +636,8 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp type ChainContextBackend interface { Engine() consensus.Engine HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error) + HeaderByHash(context.Context, common.Hash) (*types.Header, error) + CurrentHeader() *types.Header ChainConfig() *params.ChainConfig } @@ -669,6 +671,20 @@ func (context *ChainContext) Config() *params.ChainConfig { return context.b.ChainConfig() } +func (context *ChainContext) CurrentHeader() *types.Header { + return context.b.CurrentHeader() +} + +func (context *ChainContext) GetHeaderByNumber(number uint64) *types.Header { + header, _ := context.b.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) + return header +} + +func (context *ChainContext) GetHeaderByHash(hash common.Hash) *types.Header { + header, _ := context.b.HeaderByHash(context.ctx, hash) + return header +} + func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *override.StateOverride, blockOverrides *override.BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) if blockOverrides != nil { diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 75b5c5ffa8..2bda69b315 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -541,3 +541,23 @@ func (b *simBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) func (b *simBackend) ChainConfig() *params.ChainConfig { return b.b.ChainConfig() } + +func (b *simBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + if b.base.Hash() == hash { + return b.base, nil + } + if header, err := b.b.HeaderByHash(ctx, hash); err == nil { + return header, nil + } + // Check simulated headers + for _, header := range b.headers { + if header.Hash() == hash { + return header, nil + } + } + return nil, errors.New("header not found") +} + +func (b *simBackend) CurrentHeader() *types.Header { + return b.b.CurrentHeader() +} diff --git a/tests/state_test_util.go b/tests/state_test_util.go index b8d3c4fb92..1d6cc8db70 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -559,3 +559,6 @@ type dummyChain struct { func (d *dummyChain) Engine() consensus.Engine { return nil } func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { return nil } func (d *dummyChain) Config() *params.ChainConfig { return d.config } +func (d *dummyChain) CurrentHeader() *types.Header { return nil } +func (d *dummyChain) GetHeaderByNumber(n uint64) *types.Header { return nil } +func (d *dummyChain) GetHeaderByHash(h common.Hash) *types.Header { return nil } From 4927e89647a0d27f284472aa563890035d3662db Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Thu, 2 Oct 2025 17:27:35 +0200 Subject: [PATCH 012/277] p2p/enode: fix asyncfilter comment (#32823) just finisher the sentence Signed-off-by: Csaba Kiraly --- p2p/enode/iter.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go index 265d8648de..54c2fc7258 100644 --- a/p2p/enode/iter.go +++ b/p2p/enode/iter.go @@ -174,7 +174,8 @@ type AsyncFilterFunc func(context.Context, *Node) *Node // AsyncFilter creates an iterator which checks nodes in parallel. // The 'check' function is called on multiple goroutines to filter each node // from the upstream iterator. When check returns nil, the node will be skipped. -// It can also return a new node to be returned by the iterator instead of the . +// It can also return a new node to be returned by the iterator instead of the +// original one. func AsyncFilter(it Iterator, check AsyncFilterFunc, workers int) Iterator { f := &asyncFilterIter{ it: ensureSourceIter(it), From 1e4b39ed122f475ac3f776ae66c8d065e845a84e Mon Sep 17 00:00:00 2001 From: hero5512 Date: Thu, 2 Oct 2025 11:32:20 -0400 Subject: [PATCH 013/277] trie: cleaner array concatenation (#32756) It uses the slices.Concat and slices.Clone methods available now in Go. --- trie/sync.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/trie/sync.go b/trie/sync.go index 8d0ce6901c..404d67f154 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -19,6 +19,7 @@ package trie import ( "errors" "fmt" + "slices" "sync" "github.com/ethereum/go-ethereum/common" @@ -553,7 +554,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { } children = []childNode{{ node: node.Val, - path: append(append([]byte(nil), req.path...), key...), + path: slices.Concat(req.path, key), }} // Mark all internal nodes between shortNode and its **in disk** // child as invalid. This is essential in the case of path mode @@ -595,7 +596,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { if node.Children[i] != nil { children = append(children, childNode{ node: node.Children[i], - path: append(append([]byte(nil), req.path...), byte(i)), + path: append(slices.Clone(req.path), byte(i)), }) } } From 477ee5873ba9fde828c7964fcb9f881799c9d6c2 Mon Sep 17 00:00:00 2001 From: Nikita Mescheryakov Date: Mon, 6 Oct 2025 21:19:25 +0500 Subject: [PATCH 014/277] internal/ethapi: add timestamp to logs in eth_simulate (#32831) Adds blockTimestamp to the logs in response of eth_simulateV1. --------- Co-authored-by: Sina Mahmoodi --- internal/ethapi/api_test.go | 30 +++++++++++++++++------------- internal/ethapi/logtracer.go | 21 ++++++++++++--------- internal/ethapi/simulate.go | 2 +- 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 2e0b1c3bc0..d3278c04e7 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1327,10 +1327,11 @@ func TestSimulateV1(t *testing.T) { validation = true ) type log struct { - Address common.Address `json:"address"` - Topics []common.Hash `json:"topics"` - Data hexutil.Bytes `json:"data"` - BlockNumber hexutil.Uint64 `json:"blockNumber"` + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` + BlockNumber hexutil.Uint64 `json:"blockNumber"` + BlockTimestamp hexutil.Uint64 `json:"blockTimestamp"` // Skip txHash //TxHash common.Hash `json:"transactionHash" gencodec:"required"` TxIndex hexutil.Uint `json:"transactionIndex"` @@ -1677,10 +1678,11 @@ func TestSimulateV1(t *testing.T) { Calls: []callRes{{ ReturnValue: "0x", Logs: []log{{ - Address: randomAccounts[2].addr, - Topics: []common.Hash{common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}, - BlockNumber: hexutil.Uint64(11), - Data: hexutil.Bytes{}, + Address: randomAccounts[2].addr, + Topics: []common.Hash{common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")}, + BlockNumber: hexutil.Uint64(11), + BlockTimestamp: hexutil.Uint64(0x70), + Data: hexutil.Bytes{}, }}, GasUsed: "0x5508", Status: "0x1", @@ -1853,8 +1855,9 @@ func TestSimulateV1(t *testing.T) { addressToHash(accounts[0].addr), addressToHash(randomAccounts[0].addr), }, - Data: hexutil.Bytes(common.BigToHash(big.NewInt(50)).Bytes()), - BlockNumber: hexutil.Uint64(11), + Data: hexutil.Bytes(common.BigToHash(big.NewInt(50)).Bytes()), + BlockNumber: hexutil.Uint64(11), + BlockTimestamp: hexutil.Uint64(0x70), }, { Address: transferAddress, Topics: []common.Hash{ @@ -1862,9 +1865,10 @@ func TestSimulateV1(t *testing.T) { addressToHash(randomAccounts[0].addr), addressToHash(fixedAccount.addr), }, - Data: hexutil.Bytes(common.BigToHash(big.NewInt(100)).Bytes()), - BlockNumber: hexutil.Uint64(11), - Index: hexutil.Uint(1), + Data: hexutil.Bytes(common.BigToHash(big.NewInt(100)).Bytes()), + BlockNumber: hexutil.Uint64(11), + BlockTimestamp: hexutil.Uint64(0x70), + Index: hexutil.Uint(1), }}, Status: "0x1", }}, diff --git a/internal/ethapi/logtracer.go b/internal/ethapi/logtracer.go index 456aa93736..54d2d653ea 100644 --- a/internal/ethapi/logtracer.go +++ b/internal/ethapi/logtracer.go @@ -53,15 +53,17 @@ type tracer struct { count int traceTransfers bool blockNumber uint64 + blockTimestamp uint64 blockHash common.Hash txHash common.Hash txIdx uint } -func newTracer(traceTransfers bool, blockNumber uint64, blockHash, txHash common.Hash, txIndex uint) *tracer { +func newTracer(traceTransfers bool, blockNumber uint64, blockTimestamp uint64, blockHash, txHash common.Hash, txIndex uint) *tracer { return &tracer{ traceTransfers: traceTransfers, blockNumber: blockNumber, + blockTimestamp: blockTimestamp, blockHash: blockHash, txHash: txHash, txIdx: txIndex, @@ -115,14 +117,15 @@ func (t *tracer) onLog(log *types.Log) { func (t *tracer) captureLog(address common.Address, topics []common.Hash, data []byte) { t.logs[len(t.logs)-1] = append(t.logs[len(t.logs)-1], &types.Log{ - Address: address, - Topics: topics, - Data: data, - BlockNumber: t.blockNumber, - BlockHash: t.blockHash, - TxHash: t.txHash, - TxIndex: t.txIdx, - Index: uint(t.count), + Address: address, + Topics: topics, + Data: data, + BlockNumber: t.blockNumber, + BlockTimestamp: t.blockTimestamp, + BlockHash: t.blockHash, + TxHash: t.txHash, + TxIndex: t.txIdx, + Index: uint(t.count), }) t.count++ } diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 2bda69b315..0d1a59b371 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -244,7 +244,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, callResults = make([]simCallResult, len(block.Calls)) receipts = make([]*types.Receipt, len(block.Calls)) // Block hash will be repaired after execution. - tracer = newTracer(sim.traceTransfers, blockContext.BlockNumber.Uint64(), common.Hash{}, common.Hash{}, 0) + tracer = newTracer(sim.traceTransfers, blockContext.BlockNumber.Uint64(), blockContext.Time, common.Hash{}, common.Hash{}, 0) vmConfig = &vm.Config{ NoBaseFee: !sim.validate, Tracer: tracer.Hooks(), From ee309827c680d4efc8f9cebf82017bdbec6be051 Mon Sep 17 00:00:00 2001 From: Sina M <1591639+s1na@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:24:30 +0200 Subject: [PATCH 015/277] build: faster gh actions workflow, no ubuntu on appveyor (#32829) This PR does a few things: - Sets the gh actions runner sizes for lint (s) and test (l) workflows - Runs the tests on gh actions in parallel - Skips fetching the spec tests when unnecessary (on windows in appveyor) - Removes ubuntu appveyor runner since it's essentially duplicate of the gh action workflow now The gh test seems to go down from ~35min to ~13min. --- .github/workflows/go.yml | 6 +++--- appveyor.yml | 22 +--------------------- build/ci.go | 28 +++++++++++++++++----------- 3 files changed, 21 insertions(+), 35 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index cc8ea36d74..b8cf7f75e0 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -10,7 +10,7 @@ on: jobs: lint: name: Lint - runs-on: self-hosted-ghr + runs-on: [self-hosted-ghr, size-s-x64] steps: - uses: actions/checkout@v4 with: @@ -37,7 +37,7 @@ jobs: test: name: Test needs: lint - runs-on: self-hosted-ghr + runs-on: [self-hosted-ghr, size-l-x64] strategy: matrix: go: @@ -55,4 +55,4 @@ jobs: cache: false - name: Run tests - run: go run build/ci.go test + run: go run build/ci.go test -p 8 diff --git a/appveyor.yml b/appveyor.yml index ae1c74c18e..8dce7f30a2 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -2,7 +2,6 @@ clone_depth: 5 version: "{branch}.{build}" image: - - Ubuntu - Visual Studio 2019 environment: @@ -17,25 +16,6 @@ install: - go version for: - # Linux has its own script without -arch and -cc. - # The linux builder also runs lint. - - matrix: - only: - - image: Ubuntu - build_script: - - go run build/ci.go lint - - go run build/ci.go check_generate - - go run build/ci.go check_baddeps - - go run build/ci.go install -dlgo - test_script: - - go run build/ci.go test -dlgo -short - - # linux/386 is disabled. - - matrix: - exclude: - - image: Ubuntu - GETH_ARCH: 386 - # Windows builds for amd64 + 386. - matrix: only: @@ -56,4 +36,4 @@ for: - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds test_script: - - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short + - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short -skip-spectests diff --git a/build/ci.go b/build/ci.go index da867a1516..905f6e4072 100644 --- a/build/ci.go +++ b/build/ci.go @@ -281,20 +281,26 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) ( func doTest(cmdline []string) { var ( - dlgo = flag.Bool("dlgo", false, "Download Go and build with it") - arch = flag.String("arch", "", "Run tests for given architecture") - cc = flag.String("cc", "", "Sets C compiler binary") - coverage = flag.Bool("coverage", false, "Whether to record code coverage") - verbose = flag.Bool("v", false, "Whether to log verbosely") - race = flag.Bool("race", false, "Execute the race detector") - short = flag.Bool("short", false, "Pass the 'short'-flag to go test") - cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Run tests for given architecture") + cc = flag.String("cc", "", "Sets C compiler binary") + coverage = flag.Bool("coverage", false, "Whether to record code coverage") + verbose = flag.Bool("v", false, "Whether to log verbosely") + race = flag.Bool("race", false, "Execute the race detector") + short = flag.Bool("short", false, "Pass the 'short'-flag to go test") + cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") + skipspectests = flag.Bool("skip-spectests", false, "Skip downloading execution-spec-tests fixtures") + threads = flag.Int("p", 1, "Number of CPU threads to use for testing") ) flag.CommandLine.Parse(cmdline) - // Get test fixtures. + // Load checksums file (needed for both spec tests and dlgo) csdb := download.MustLoadChecksums("build/checksums.txt") - downloadSpecTestFixtures(csdb, *cachedir) + + // Get test fixtures. + if !*skipspectests { + downloadSpecTestFixtures(csdb, *cachedir) + } // Configure the toolchain. tc := build.GoToolchain{GOARCH: *arch, CC: *cc} @@ -315,7 +321,7 @@ func doTest(cmdline []string) { // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. - gotest.Args = append(gotest.Args, "-p", "1") + gotest.Args = append(gotest.Args, "-p", fmt.Sprintf("%d", *threads)) if *coverage { gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") } From d67037a981cbb6355b657d30429af3c325921364 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 Oct 2025 11:14:27 +0200 Subject: [PATCH 016/277] cmd/devp2p/internal/ethtest: update to PoS-only test chain (#32850) --- cmd/devp2p/internal/ethtest/mkchain.sh | 5 +- cmd/devp2p/internal/ethtest/snap.go | 32 +- .../internal/ethtest/testdata/chain.rlp | Bin 341951 -> 451888 bytes .../internal/ethtest/testdata/forkenv.json | 37 +- .../internal/ethtest/testdata/genesis.json | 59 +- .../internal/ethtest/testdata/headblock.json | 21 +- .../internal/ethtest/testdata/headfcu.json | 8 +- .../internal/ethtest/testdata/headstate.json | 6492 +++---- .../internal/ethtest/testdata/newpayload.json | 15735 +++++++++++----- .../internal/ethtest/testdata/txinfo.json | 5390 +++--- 10 files changed, 16749 insertions(+), 11030 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/mkchain.sh b/cmd/devp2p/internal/ethtest/mkchain.sh index b9253e8ca7..fab630d977 100644 --- a/cmd/devp2p/internal/ethtest/mkchain.sh +++ b/cmd/devp2p/internal/ethtest/mkchain.sh @@ -1,9 +1,10 @@ #!/bin/sh hivechain generate \ + --pos \ --fork-interval 6 \ --tx-interval 1 \ - --length 500 \ + --length 600 \ --outdir testdata \ - --lastfork cancun \ + --lastfork prague \ --outputs accounts,genesis,chain,headstate,txinfo,headblock,headfcu,newpayload,forkenv diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index 9c1efa0e8e..f4fce0931f 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -86,9 +86,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { root: root, startingHash: zero, limitHash: ffHash, - expAccounts: 86, + expAccounts: 67, expFirst: firstKey, - expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"), + expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"), desc: "In this test, we request the entire state range, but limit the response to 4000 bytes.", }, { @@ -96,9 +96,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { root: root, startingHash: zero, limitHash: ffHash, - expAccounts: 65, + expAccounts: 49, expFirst: firstKey, - expLast: common.HexToHash("0x2e6fe1362b3e388184fd7bf08e99e74170b26361624ffd1c5f646da7067b58b6"), + expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"), desc: "In this test, we request the entire state range, but limit the response to 3000 bytes.", }, { @@ -106,9 +106,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { root: root, startingHash: zero, limitHash: ffHash, - expAccounts: 44, + expAccounts: 34, expFirst: firstKey, - expLast: common.HexToHash("0x1c3f74249a4892081ba0634a819aec9ed25f34c7653f5719b9098487e65ab595"), + expLast: common.HexToHash("0x2ef46ebd2073cecde499c2e8df028ad79a26d57bfaa812c4c6f7eb4c9617b913"), desc: "In this test, we request the entire state range, but limit the response to 2000 bytes.", }, { @@ -177,9 +177,9 @@ The server should return the first available account.`, root: root, startingHash: firstKey, limitHash: ffHash, - expAccounts: 86, + expAccounts: 67, expFirst: firstKey, - expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"), + expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"), desc: `In this test, startingHash is exactly the first available account key. The server should return the first available account of the state as the first item.`, }, @@ -188,9 +188,9 @@ The server should return the first available account of the state as the first i root: root, startingHash: hashAdd(firstKey, 1), limitHash: ffHash, - expAccounts: 86, + expAccounts: 67, expFirst: secondKey, - expLast: common.HexToHash("0x4615e5f5df5b25349a00ad313c6cd0436b6c08ee5826e33a018661997f85ebaa"), + expLast: common.HexToHash("0x66192e4c757fba1cdc776e6737008f42d50370d3cd801db3624274283bf7cd63"), desc: `In this test, startingHash is after the first available key. The server should return the second account of the state as the first item.`, }, @@ -226,9 +226,9 @@ server to return no data because genesis is older than 127 blocks.`, root: s.chain.RootAt(int(s.chain.Head().Number().Uint64()) - 127), startingHash: zero, limitHash: ffHash, - expAccounts: 84, + expAccounts: 66, expFirst: firstKey, - expLast: common.HexToHash("0x580aa878e2f92d113a12c0a3ce3c21972b03dbe80786858d49a72097e2c491a3"), + expLast: common.HexToHash("0x729953a43ed6c913df957172680a17e5735143ad767bda8f58ac84ec62fbec5e"), desc: `This test requests data at a state root that is 127 blocks old. We expect the server to have this state available.`, }, @@ -657,8 +657,8 @@ The server should reject the request.`, // It's a bit unfortunate these are hard-coded, but the result depends on // a lot of aspects of the state trie and can't be guessed in a simple // way. So you'll have to update this when the test chain is changed. - common.HexToHash("0x3e963a69401a70224cbfb8c0cc2249b019041a538675d71ccf80c9328d114e2e"), - common.HexToHash("0xd0670d09cdfbf3c6320eb3e92c47c57baa6c226551a2d488c05581091e6b1689"), + common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"), + common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"), empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, @@ -678,8 +678,8 @@ The server should reject the request.`, // be updated when the test chain is changed. expHashes: []common.Hash{ empty, - common.HexToHash("0xd0670d09cdfbf3c6320eb3e92c47c57baa6c226551a2d488c05581091e6b1689"), - common.HexToHash("0x3e963a69401a70224cbfb8c0cc2249b019041a538675d71ccf80c9328d114e2e"), + common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"), + common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"), }, }, diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp b/cmd/devp2p/internal/ethtest/testdata/chain.rlp index 2964c02bb1fb7f695fe6eb9c1c113f9db0b7c97b..7d4f4b3efe5d204312966e56040e759ce53b6600 100644 GIT binary patch literal 451888 zcmeFa1y~ea`2Wq)-7T>w-Q6HcBZ7jUl$3ykfV7CPl(Yg862c&g(j9^#pfpH|lqiw{ zQW8tQyI?>1EbzSV?|T2gzk6NIoH^&7nPvTa?>TeMcW2j-i`S5;fH8k;d9^3;IJ&ZT zHWN-sBSZV47|A}jU0z_^F+zWXLkG~doSq!2H*nANFSaOY>89=?mOl~|>_jr3a&k+i z`;KB1(r;csH1^bWA)s#(O1t$!)GLMkK{phT{VYg3@ezqlpox_LU`xPS?HDW9b%~c( z&I|Aw&{g7lBt(dQ{Y^U|X6mJx0wDGwbHNA$c_(Hmb>zp3$I=|tPl%HN=R1xT-xY6T zflY|7dALPef zj_rWo`i8S4W3w&D(9Jl+av0I;;7~N84p@F{`8O7ljzya%dUI z4;C8j3sU!lfot>=xWY!rq((>2Uv+h`v$fzdHMX(2Wb9<@Xz6Zdgrs3aY=pn1XM`kV zG*4r^10pq2Jb&fqKzIb2KAGE%rledQMq)v-9bq058k}DfS zHv#i&0B7YVTC`4H(@hs>2E1=JYH;u#*KvkGMEUgK+iE`ccV?tRzqd{C+HCen)u@PYsisfOB|A z{Pq*NnH<3<_uDG_y}Ij6?`}WvZisijG)ij#wEm?V34-#4ZrN$NgV4H5{L3IBI%IJ+PtR1G6WBcv@oFU%L$fZv6E zt%HYy;o;auj2d3fj%E&EGY1Ddhp+8FuZ`q1jMQMq{g(A5Ns2kbg$xBEp(F~7m>wy; z_(I{hw}r^YO>QPnWlf<1h7i15Z13*4}{(vpi4ny4EL#vv%a*!_)p_ zxG;EY&&4HcD|$QEzYeh^@{Ylr5t$Gj$sh7j)54N7c~x{sVQ8p|-&2Yq58N`3jY_#C z)mz8myj~iTs6CIlUL6(^<^dcLe7iR3_J}cuK>P1MONo~WW47F_pDjbKK8hN;`BxZ75Y#WixF^hd5MdnMVGltp zi~y)ViXSXA)ED%W1_H^Eg)@s}1lBN;(KC`ULIN9sA!m(1pm2@dPKhx{#+%Rx=lmIC z5R#G1*BqV*u!)M!;p4fA8IXQA{qi~10Q;Ggc|gQ{s)_nyiFHeK z`Kn*nn-A1;heTDFJj+jCODy}{prl{)i?O!ICM{EMmiuQh-VWQ%A(O-dWY$klL#^<| z&sCOFWTLnDBw|OQz4yb8tsy(dqSIr31`vxk8NFwE1B0HZ9=m8OzUj(GaN*Vf2TiQl zFh&pF<>P-s6?0GJsHk9g0RmtA=H>I7I#?BK=WiW$2y7b+`}f-y>bFD7{I(ql2^Ilc zAj1}4-qDx1ug52ZH-ljd2oe%HZ1tNLY-jlXNQApfSOL6S0pAn;fPzGNR{RIf*q3l;uf8i9Calt9N#w>({Mp);+Ue0S!t=6BFdS zX*X(!5p3>hor=(xC#tw$Hn^y{t3|C|GWP?77+9= zxwOkx@*sp#cZ)6}BoqPxd=YLD(eEmh(?V})YlOEdk+e(@8$Q%tvpED30h zw%N0^6TR}MD3Tl>$jbKFzOGc3e1uRlr5#LCNw{@LUh(iFchY%L<=iLh&VCH`wVxuh zRo|IiMoOu~N>2A#?*L?`sJ);}InS>rFhQ5=?&k9WXP;3YVKSp~mcJGzap4S*DP@tU zlf%)2g9V|JYI-bNa*Q@`RqjLgMv#%$N&eEzU0%Otr+@MZ`DfL?)X40%ea%~60>NL_ z|8*7qF9-OV9Dfb@<*oeMytfU(*h{F-vlI@3D}@&m@qTdSgd{)p6pLuc)W##_TMw~g zs#Gb)a-C;8z1G~*Nq`zlxRT;Vs0^$^UH*aswy2(^+AC^AWJ0^71s%#A=Ap1zm>?xVUBw(cQ~;mIA{zm5L&oEjwF~%H(g~skr2VR zfU1i>@RCdo*-pKF$d{;FhtZCE=a2&F_GCWkJ03<{@fZV>`Ti;g`pC0L4R`G~CrKZm zuPZPX0%%ezoY`+1)}=E1y-FK~o?IxnSO4U#za9U>{JaeN#4#{&5`wuWkfy{l2N4Kqgpl`s1mbKfpc~+}k}<$p9jaFs7}<~ZpttD4?AR*-A%GDH z*8 zI+mMgUd`x<1&Q9_&y5eM59MHm(=(E`8sY0&`jO4SH}!a1Uzz(1XHs6ROSQ$3<^WXb zOWv>w=)~i`2$O#|i+P1b$f=BeTaez0k)xB-4AmBJVp-3ke=04OP|DXI#g^d`EXj$c zMpsR9$wcR51qblr2Ytx=1}MmH>i%3Mv)lBeK42B#AG+te?T1_(NFR^`oCJdPRUf(s z(FZZo&{c%`@P&OC5a1M1NEcn_%h@v8#*v7mxS%*$fojMN~a6=pxi%34UM^!ls9@0a#mw`u^W(`=+w3hn|<}EDV4BFYsbIv4dkL%w!}H? z(xE6*5QQlG-r*_yQX{^0siG)N+;p6XAR~dLMt7|G16QQu{SpG%`Xkn-KGPmjN?OJ> zAuBg{h(G7OlTtEkF(+0`M}5Dd{+P3Oat>mQ9sz{5KP+&MP7N|8_(`piVf9q`N1)c}#wecF19FCb zUGE)=+ytR8hw#E~f83sBZsG7JbS=PY*oM>bXOY>%GFNFro}S~l$m@k&dRJ{n=*`)f zs0_7F)?wCUp*T=_Cvy7&@eNf4w8 z0{CThGMQ>|=J^TTQm*gyuHTaDtWd0MUhOt`LLJnp4FHzXC(cBd5`-8VTV|$_j+^EU zmcK66JL!bcFidS6}A9w zP)f=jlH0Dg9s(5#$dZ(47dwUITIti8qn6Zqu;y%d+{4U2shqN zKvI92TM{ON4MQ3V8EWbA8fqAx+Z|aq{6290S{t8_o}G(=sK3GT6noH>&o=_8!h>GW z3t{35+?;8i0vu_*HSkJoaNcZllq~1Ib2Zk~pt>#<9--L1DTWs?XHxbW725e#R=A+u z_u*5R_gjAn1X2Yxg&Kd2jymBrOn;%|etVFQwUGl)Wk=^g%v;!{NK6M6IbTYUH0K-^ zb=)xsjp8HoOF|ybX4oNqDgG(t@c{8-4A;<6f@%QCcmq|li zqX#HbZ=Nn?s3@l-Ukp`g`3!Zy`|^DDP8|v)z|z*m z&f4tKw;^8c4~6}wTjhH+({%+x8poW@B;MoHyJN9_fBb1^yuap4fQtShK}t$K#-%q$ zMi;KXh`d>G-{ab+GoKy@8&Y|)Qr?8$@xbPh4S{2CUVdBSi5;2UX;XG!OxZSiwN2#`LV zSh+Rm#%w*H@ZzB?G_NcN8JhW3h4{#Ruv63fA}i0sM2lrSr-Wa85z-OgD|9VQ70Dji zPa?hvsZ8_19a5Y9Dg#MX zha#BvK653&J)gU^BHPVnZawRZwwQqI<4Q5zxx}&n zUJG8%6k9OXvcpR&p{Ngvu!}6NlD`OERAOG=d?zj;E9U zafmy7IfP17K>U!xU=fcTd^1*Z?+KvqMobfYhokU$yR>Fea(y#S1BpbB6maF11OW(% z5r_Zykp`;uI>iBdfy>*f;YDx6*IzMrm8%0?X<2Lc<=%t{_|mQ!C~}@)o$`N<5l?$! zguA`8lTkJ4t1$j2N&NrWxghvoJAmGy2hjzVEOx_wbWtse+$|E;oM>!^Ycz%bHhjFM zzs>k_B4G6vKf^*%A`0P!6neGH4g|INstW=?*qIyQJw(3=eOAHSWmd0J(#6G2Ub$hk z)S|H4L!>mOgt%>IIDU zQ@6&^SRqzqI_&PTiix7w#JyE;ush&j&v+WTX9H|Rs(M;xH{XxEm=hO!`%&xjwU#N_ zxjR{zPY(&Ur(Ji+=h$sgNn#qzhfhknru=0|a+_ygpyN-5=<&F50iIFBwEi0RUMHj9 z!9l9R6)HYz>!CweGjT1TgGMAajtxMwKDJS=gdS63IQB5%HiJ?MTLj9>j*xaX5ImK;Ms3ch zbsaI-Hc%@^ftdgnpzEh%M zh^a!D1r_z09_eM5PZ514!~Ej0fXs;vHOkNO4o6iP<{2ke1+rwvA8{kgHjT1rz%DFj^9@CU9+K(JBpPCQ)s!le zNQeCMS9`ad2NN+U{H9%;`&PE*WcL;8%BZ}13uSDd_DCcp6s8fY#^-Q)af!BO^LnmfIRZ713brzk4Yj@?|174Ai zN|E*??L({DX&KYx%*_=(u_4H!)s;gYO)=GWVeo7_0`EbDcF~gt#<$+o!)`Cwax%NQ zm=q92Wd7VfU@)YJZJzAhA$e!z-7%AxAx&^UPlq-eMK0z(ZRZ~gdXT#?o&&Cv7Ptv8 zQjzlxG;On5uQ*C%TCa8JsI59Ljz-9Di&q^VG38FF2` zXj83u*@)*U@dx~z;NN9i2+Vc3hlvkt1^y>7yMy~OFeexiZ1Lj@wj2DG@%!Nak2jF8 z+V2hD_d-SbI&=JW4R3=_BK$J8|E(De5BT-e;BU?F#$Qk52Y+9`98U%w2u4DJ5byOM zHqE??4uU(Ho%OzM83io~S0`L+V=DRQ2bXNx`nk8mYz)LJwh3GY9?5zcWFy8MW_}~~ zgPo545au@kSVP`YxVtka5jIF8axNBX_@US|Ft^#%y4IA)E-4`27VzM2%cxChx-8{| zg_>x=^LsEVb`)Qc8m8*%7k|tX3TijbKQrDTpPRCTE7B}vkPgte{B(5fssmDD* zL-!|TBw)V1##L6!V6*(TMsMYGdHx!)=wI_29OZw1K}f!2^)6fQ%7b8zT0^e4pR7L4 ze!Y)bNU&6c;tKsjH|ypL`_z1wmuY+)M&vcGp|2GXV-B;&k@&$*r;o@U$G^}c{`^cz zKJqLZ9mxf1Jev=n-kqfdrB2V*zKU)2O$4|$EMn%-wG?hXa2lF4Z+x3QS~x3kcj5JR z*JHD)yX)~VqETevc{|+DDNV#PJ6xpxRybxwX<)r7{eV}TgcA6}sj;^o8*>jybmLX~ zHjLM*fs87p$_5h!R6y;R)FeRgsU*cR46Vvx6i6=Nl^_vP_R#+&D^!d>H*@D%Zh!RZ-P<-SgC{-2L-7|zBR($vE^q$;0OW@< z;-buM$d9=iRsr6v{Nv)Mbo<9BSi{#TS;)Ty{kcu%k4+HLy}aF&=-qH2pxK*I_5<|J z2a68k%c8g2PgjtZ;q zc$74r|7Y(*vsQ01_j~(xiX!HKD@G1|H0+K<*&kCXJE33@%Zw;K1PF|MT{p<`K>S@> z1b1Wo%#lV#)0uf}e1hx6-ND{Z-Q@SAFb_8eyDSQ=~&mNK3=#mW{-7 zT-|GHU)*-j}zj>^!_vHE_<%xUERn$W7lU_Bc z&0#P4tjEj&Nw@G??1xrTa}@YG(u7_y2a_JtpgPj&XqTauI{dac|1c7Q{E5arOyLsx zp_c*vQ}i$OzxGysY5pdmU+e$#HiYb(ggOr*A(V%0clINpV=b3L6MH!^8I*iVKiopT zVq0tpUL&~VcqA{us_pDK7Q_Q#c3nQcpOvNb7G^H0$M9rpEw#E=W%{@_6DFcuYo0PaOtz) z@w{R}e3uvL;p@J%5M&}3(L5d*hqaUNdzc~k<*KlLsq^GH;zVW!rHke#(3?29Fa=y3 z?l+A3X`HgfYd@^mro;k2H(Z!qV9mJDKjmCPcj`0Q(qNO^Gv&fnlv~>#iGYd=;|n>l zQ|Ruba{-(2_SdlG0<)@)C3aUq9hw9N99n`7?$WYgn0YhPH&9zvHI^XiJHFYCw6pyBP=cWY$i;wR~uc+IB) zwTHtLv6%sxS3l&gXRDvq%*- z-VI9;gvJMnHQ5A;m_*7a)e0Q)Y7&)#dcP&;rMB>;fJZy$XuCp4A$SkpOeknH6_g0b zuww%%_x)%^PzOgpUaHbPx%I~Md7r7j$Q*x?KjYfDwo^yB|Jw9^MEUn+2<0B&8kpYm z2Ljwd?el&BpQ|NMua?ERVt~qP1}2Yu036lWp0~UcWlFC>^3dE_5i#ID8~G?<4^wea zTY^t)k&YV4j!WT74lZhK2-en(<&Z1n{%Ie20rU^OYC+y*}MJ&bs;p@ z0Df|3gEilXp2S>G`6VOtUOydvTON*@tcv;fha`Kv;4Jwzr0&F84idNMET+F@oNpU+ zQH$)F{^_d$x!Tk~i+Y8H<8#@jsdv^qdDA504~;XxgCp%y;%=(FD60*=fBgr)r}@WP z2-P>T*AGNC;}OgKkS*D`VpzRo$uh05vC?s>QDxq3`}Ueoy5F-73Np@)bIFLw{@H&3 zd%KKBtl(g)5RhuH9(h&!Ed0gbI61pc^z)DJcKrt#5lfj^HQ^grxc5y%8?V>r#&VqG zS$CRuey?Okl|ZTuTycp+6)9Vch7UkC6ToA5LZMJ>kvMS`N^Me85dH9v2oGQa^6&nvO z953FUA#d8D#nUkZNlMCIE7+udfYZwAbL<^^(N&iOHEIK)QagwK1~pWPfF``~4=hJ8 zeBn@gtRWi!FHVlBQzrC`VBNhsJ~Lpb=l=kU?cPkU73P?>+R0A#M~8%Z_0ro=8JzYu zyitV*ZWA|Cu7p_IxN^T#*UB&z&^R~8jPN5x7=WOo{d)}lchy3U>_QFk0l|g}5&da_ zUjzI=sr9@7w3*EG!w}`;-bh)#`e%Zj=LM0bKCPIz2E8Tp@hYIMhxM~9_#6V;?60FR zIfY(R{P6bSWD%YG_%YsgJjlmN8EJi0gh<032q3hmUX;spN}g8L5Y61$v{rA)n={g< zVGe}ZHDn{e8Ly9roafEG_X~;-9;QX!0xp@(tA1?hff^9scDUmF@{ss-wNP7GE4ud) zoWBfdRq#r8Lncj9Dra*UukJ~0Bm;DhE2)7^{@dMeZo%$~pgk*q@)P?17iI%eAcJxd&>1=KUPD6zG zQnvgGv#ZLr`uu1>^+pyr%Qx|KQx26Aa8y)7>OvdIzM=SEfxqq$|9qLPtkj$3h%Dyo6Ed;o$o_xFJWC-ewuy!jo>97pxt5n zG71hc*Oe#{;zvSDefeOeJki3?$)@nGeswAJt9$ttYZnLbpP|@m`7$^Gw+DQyfDdE9 zD~!I?zzg8rt=%DxFLPDjciL_Gx!2jcxin(KaeZ>ZfVS&01j3{aTqo_-RYtJ(P0 zNM@-#EbIvzCkf%`am2jCJP0&?pc83shw>oY!_ABOe7jov9i3xyjJ}3!sP?qp@p)`M z);M&FBIv3+z;3L5{#cJ>Ndjx>dRk_v7Q`c7JW{NeEN(RPJ=4kyIXLF+@Vp-Vns;Sf zP0z^Tik-|_^hPsQ-1)YQMHhqmn||G!P0c!o#N1NQ%Xg}Y?X-UxroN0MotZ{qa=24( zUbyFI6Az)rOKX6QSYME}0BvoKLP#WOj7LJuElf?8-je0y=>cK>-ZNZ(JqR$45xgLD zU-EjFtq+LvK%7%WE$oN$QS8L6gv?2kZl5MxjH(Bi<|jIA(9OZksG+tNlDg^hqU2qgo|W3Y4pAr*E}JLu8gjWyiC5`Q@{R!!M-k11_7>V(&5f zFm~Sciac>VvWmzw$1z*ng8tR z64^WPWB?X|o-zah4nH@`>~8*kNP4&Fhn)HIp8MDe|5MEOjQytwc>OQ$>W5GW{a)s7 zO7sB?q|tFf3fnkITzG=Tub+jR)x7WAx%NJf~4VN-WV<;+yb-4kIpJykO%pA#e2-j3{!=fT%&F~ z?WKR_q3$6S^qiAZu+-{EwTX~kz>r578JQ$@#6!Et-jlV&57&*K6#yLFSJM3-I4`ac zg^nIu78gRAgZQ?JlmsPdMc?!9n2H35(o-#J9=>wCcq}Hwe`y}q>&zkn4jZT?QSX#M zQFM&YK@}wP1A4y{kzt(^`0E!|!Ml|R7YM`ts33%gMi2l2e^-#qK@{}n?W_G<68v8E zpLg+mgw^k!?-u(AFXd)vjQt(j)Z2`rXX7=LHTPlT=_`8!f_8uXW)i*co40NMBA{YM zUxNH6hI9W}Kqm~RA>k*=Ghg;Gnj9M?AoPuqtfX5R8VDhiR&M8>cm)E)rCLVj#0tph zRoy&3XB*KTxjJeVNwTK2U`~4*jj53v&`QOM#s`<9XBkN22SM~ny=_g&#BWHixXO68 zw&P3c!3AUqw;+h2aHB$OqMsY-XA&q2o-0(Iw5EWBoC3w$)m*B*bsp!C0zw08If`pm zt!`Jyr}sXfW|!(rOm}d5aX}dkcV%?FeF3_~s-Yp^WNzoA+eaZ8Q%=wGYT26oL zH`#h{rIthy$b1mnB6UXO+%2QNBmVUmi*mBcIb`9&ru5NUZ*C^jhawgbb%(Lk z@TwC!%W@16{-pM8AUPg=fCk}JXbpr^wv7Va_$neW$oJ_o#m$~g=SpR#{-yz!0`=t#Zr}{OxSClENiU*IA3XuOzABdKoS5^wtbFOGUHN^o($fzYO~DoN5{J~9HXjG^OpMTNg%wAC z^hkiy)pD~c*YoCw7Aw913yv*8fb0cZftavLQLI0QNHqEsrTI>hxEB=-)l7KVQRL9f zM1bVk$rb6lF;eIJqPqkH0exS5_i1vghYLj3<{&vM1^+*2?W>0^?U6>j_zr#=CTjy+(ESV(1H@7)?njOviK`*6_E?Ma)WQ&*J_KC z%We?71mZvsW$$L|c~U)ILMSw-8`Oot_n`&VAGL)Xx^*4ud}DyI?la-g;B(u~VZrc$Y_F_IYzXZFMeEFLAY)U&n#~s3m%855y%Ab_llZE(I-9$bY)6ge@JO&Rt5XA``6LGy! z>`tA>Z0#^R9Zg}ckwlj`$W4aBk&I34YX(#aOO1x#Ac=cVbnO|)%T?R5(0YglmlFHg zV5zh!TKLXi1HvCr|DhUkY>#gZOdrt2gYX^Rz_TB|S95Vf91=UOyD>8*RQu+~40KUG zb6|TtiK(~bmxx3B95LUpXD>mELt?!PhlHnV=IIY0#93S$mlij(moNi-Unv(@KN zxCpiGi#SH25ydjoiMxXg2m3WVare&@YMIS7Y)9y`nv2|DxuhuU{gEKP7#Y2MpO{Yg4b&cL&}Z3c4*k z=Qn9_d%5wYD&HaEqEI1rVu~1Y$Oiv|b zjAn|GvU1(BztL_r{t9+(demw&VzBMYY(Y@#uN|agzMe(r;9URV0a+kG9fMDIx{oQQ z10pPSvN<+4bUwpHj+p%OxgfSuyv&&g0PnHqJ1g}bi?=4Uu zL#X-DxkN?Mso<~u;14kWPz_=F&b7lqxVHVEwjZt&YlHJ2H{;(GIx|l1tTIL0y&PPp z#O{5i7X8AIGu}7}Vy>;BmW;l(AJpM6*FT9TSKC5%zq6nyVpHGmVNqA{{s;3xHYTH< zw@ITCcc?C2{X?8*>Q5K1w;4^dFRw~9aFw!t0+b#+tjHO9h;%ApY~kYd%&epF?hD>6 zR~5R_?>?G*Kk5xfdiUuE_m46WYHOWzBiT@63-pA}P*Vn6FFFx%Jo^uhryPxW-3q ziD+YB`fuLKyeN+%F=UxZQAHu8B{k3Hh7f4D_W&6!YNF$~`~AnFovJ!TuqUEtrF@{- zX@PkUX5dq?RvjKpL*Tyo_*`i0W$FO8Fb0c=z7Z)ddD(oE&o2-CJ8PE1vWcgq%*gcX z;@qvm?p^vIz91Io#rD~}akD}-$_Wq9_z)>5d0srMjprd*5rE@Utv5?a!fm%gr62Ib z{oW?=U+)28@jzI1>D%)j+&lW zk7)<>J6Zr0S!{C|N|Ev;y5a$!SF2fL%8TuI_$_s?OEQizl($Fa4oQzbk=v&j=%p{M>OcFrRRwTKnyOzVd95~I?D;&`D^5&j zITbPOH=x&I;rlX!bR=e8Fd@>lSjKK+@`E>7;jo69zUh>&<5 zPI~5JQL%xQGHQAX;_Ho9`3?!Wxl^aGI*114-c`}yUed=cdKOPaj^m(Ci4vc$hhQo( z1E*g$?(Do?mpkGp+3QQ+!9hrnD-WsA-&u`m^AxK~I{Mdlu!sM@Eg@`Qays015PuNL zPf+CUhw@{T%eCV-HITA{@Ug=RXxy-mTF>Bcv*pqh0)0iGm>k5EUxu2q_@1E1`*+s? z0O)(>ac_xMoxNI=D?0#w#8AI16R?q;xnigw_I#J^I;--BS!h; zi$ek}lqukGd?dB!(HN6NZ7qG04U?D8*)3T?HEN~Tw>5O{0iTFjUYch2yg#i1(XC+= zCN`2S8hE{=x6?bXKx>dZ=m9uzyz*^wFLQhw)wjqcH6Pl#S&^^s+9-bPZ9I{v+IBrq`fpIn4|9QGcXwAAFJB?j4IsIOQW*#)-fsV>^`q0DXHEE|=LOq|Q%!He|Z zBHJ^a``%dae$SrkKqeq3>1uQTL{zm%54qp_K%w0Ek>VtIgVv=a34blWjGB z_8`3`!%`@pyN&RyhJG>zCxs!SN0>oW9_L~saBJgOxc%ro>qI)R1P8tShD4NfOhVF{ z-G(YEF0;&{9Kbq0ls8b-EVz*@l(gzF1wzZ2ejvJE?MR74^>w+T{zfN7*l; z_=4U)Rx-OazXXH-5q7wWZwi8Z+5ewfINX6D9N!g`a}Wjbjqj_MUnvlC4nNh#lC`C^ zZM@FX+;*U%_0-q|%JRMUBQ*|qj}R;9XV(FnFW)$g{;aJspDov zADru-*fieCDxi!c7S77n62j@X0mt$6CYTLw;ZGk@KpK*|q*a^Tk9psf>0r3noDS!n z*5=c%hOnorMmC~n<^xNSrIWU)Q5P~j+gTV^MTS3&#*Uc^n={>N+D?l-$u0?0GXU3^ z^w#IP0y>0+q#xoD^N=dpZqqULMG|sN^q;8SpMdsBrp#{KKLqsCO~yVC_0O=c0@^#t zKihw-`FW-DgC%F2!i=pBQ4k2635Yt4vVu*hqz>AY*ze!k6=&^J7jHbHHn z)vg`zf?zM93T;N*Il|QV;swYsFY%mk)t!6hnrf48-@QZ@hTp&Jx(=$g=t#2lRIeg` za7#CHgtnPO5_(?98R`q4p^T2H>VX_mYwyxKOIuTloAX)_~QJ!pAl(N5DP2fDxFA!!Rx2Kh3~!Mi0lS z@+@;JH7X7@Nmu)5`{^gi=I=oN6I%NN;|Ht`|8jeN_-hp8_h*D~?P(3}I;c5_)|e$P zAk-RcNXp~=>l)2RYlJP2PS{4pSE={UfFDw3+BJ`+NrlM9>u(|y8r*fj;ma(k^G9tV zhhD&b791}9rt2ryfrq42e9HMnme{k2?1QCOMN;grWBpEMJCGC-v5mwrCjy#&xHFG^Z8E4#{_ueud<{cG~!5O8lI&J-y49NEibba1HzyGrH+r>X=LcZVQYn zP6flR#^Gw3UOXxvHZ7;D3D%@tmBcYd^{B#S{<;o+&-9PA5biy`;jV+0gYeDZhP5BQ zPp3%vUVNa;JWKLfb#k-&N!OrRCAJU|S)0sB!TJr@j|n0C3hmFX15S`HgBvz{Ld&;) zl)xH8x2ueOaw-(u}H8YV|ad;)&_b17wJ8Fv+ot zHj)7P5~p;7?n|%Qsfkn1i$k?KNSYWVt_1TvDezmW6-J+hgT3oIXciErX}TL^5n5Wi z9j`tj-^bQ+b#i4)TEC0X2OsqA^C7|3Ov0m5zh%8)bJD;oK=U1*^9J!z{H=^MhKV_k zSbld=AeTTdi~cg?$j5g!N7%u4Qo|f1NVjTj!3y4kX?DWB;(uKSKO+45GKA+l*sx3N z?}NVA27WF2(yEB>2khEeCtnB{tH^NbxvHT(-An()>#LnF6|aM_Yo!_R@5&(t`)Ahy z7wp6maISyrTTD!7I36&kB%ZT7_4b zF~(K%)A6>W6C+oi+sMz>J(vwLB8m|85CGwy;eY!RQ|!7i+#0H_8{G1ccUF zQppMlqd%)T#e(`}m%8K^m$8~dRZ><)>+m#$bpN^zzEb$-HH7y&*Y6L)HC4*aez+#6 zCywT~a-V$fYGYh!@%dALJs&Of7UdqbREohQM1*(nM3zS@gUt^6>1GGs3$$_wKT(7M2>RH62H}5C3xsbMX_)I^ z>L5Uq+hzb)rU=wBH1JCn-x908COyf`b9}tRyY<;we=p#c3xVMSSP$Egq=?U;)A{C_ z40Q2%@akoz-o?}GA8Jkq8Z_g+4Lh@p5NNpT0OgNQJlJ&|6n_|J4r)@&r@U4k=kc2V zC1EDkya<)ISC>>>q*NR>${{-nOnjORp(njwC*&b>B~46t(D-CatqG52w+>`!qnCue z1BBNYRK}+Xx#`5!Z)VPgU!P6HcpM4Zvfv-b#<0Ab@Yi*)50OClcj?=69V{ILKoa0J zLV3dgWD!%8$T*h%^uc8N(jr6K13#IfQfZDH$D7rvSuHkT9fSbFT?ahABtXx9aUB4F zkzd6-IY;E39tULC(@`gmF44tQhgs6JVo zeT@_@XQs<^RoQ$cHQ~v2*L9#xg!AzB82=;kz~$!StHjBkF~Z#$EAg_5tH^OBi_}LC ziF?CMPWwmhQsWiX9jFZNiS?zg(j@eBuL*h5J!^|~zUmKzbBDf8?k;{#VPk2jCRm50 zjvtG3Mv1%=OgV(=GkM$(5cgS6U7c)TQjkTt;7(^*RNmzVQo7?2paA91sW77V`nO~Y z9|e@z;|09CLjAJ!b$)-ZZEp$rwgDe;Kt{s+VE~W#TiYKB;4k%G*GNb|4O{$hsNIoC zcq4q03${juz}7#$AiEnfKUVFP!AM9-kZ;Ak0umAg6ATg9>p#F<2ipgsorw3uerWIR zM;Z|)nx$O1&XjGq?9@6q*p!NIA^VJw8LBnyuL)dClBR1m|hIsDb`vc zO=wwl6YMjo(T?kn{H$=J>8-k13cyWSeEfBHzGdLBS*AeDU3G!z;F&Y%GI7lS znlVOQogy6aUDpA(rQ{i+vJ#G(l-N~TG`D(w}QGS6 z_;h}KgUDUy{<;pn(){N&MDR;ahq(@rF%LpH0WQscC?7xTVxVL+|I&_%n7DyT+QY~G zmO;oIUC%3xVXgx{UjkfOM6Ltr2NA}NPh7<$@3~w&db668$DfT~!gGGS zB&?rx;aCg`z+Y%#B6bV8K{$+HJgMH=uslki#L5$$SLH$*#}N~U-5*@oeOi3Py$jYu z{Yx97akjerQz`1s;zu&``%UWFGr&SfdImQS33R_|r`t*Q_D}}Jt9B<&-ODIK=F9%L zncvNkQ*rCm!z&5^FD|8Bl7#bl?3Nl&E+4t7z|Wz`+(}{#jmHvaI?pd20Tdc{tQcer z9kOMmt%DyOIrDh~Ti>hl$)#p(nsvjjHqU>{+Asznf9Bvvd)SvKncek2rhN8Vf2sb( zN%rLk|FZfzj0VfAUxx7J>Cz`dV0!A*DMN4aC&vhdGh(g>!wyb zV?VB~y-gRY^1oEPf|%!@T?hOiUp%h#e;Ja_;vSP_a4OaK&(l3~%{SMZ24)1xT<-?P zUd(E`EtifV?x)3I>_uOQPu)NX0t|ERN)V*6n6U=>-^;f~>l3JPL48nt$0Yf}dUeu- z6ftm)YTI7BW2?mMX?jfcHMy(w0&(U7Dlt4RR|PI#_vLv6yCm$n4r2Y7reDhoKkv9E z-*(=?rk$ro4^yI1faAJqaMTM|lZrzs2$OKMg@Am9;?rud4DPM%(K`*M^>f^BjD#+0 zO|ackp8~uxE2X!GFP9~T%#%ILa8F(lw> zV%^(TW2?e5O6tYn32WO6v=^T-{TBu80t@~DTbbSRFH+eH_@V3a!w&l@e%bZ^I|T`U zR}jrX6oeB*yB`IeFBO5boJ>nKcX?%xDSuDzKDy|=MqUEynP;vqpCG%gBUaGQt^)yI zoFKY?5s(IG^9H5|Uc!GCP`Id|s#qS8#q-A><1aCFooZsN@tCHfyH#$VseAis;ur{^ zB;3ZNzPC1{(Z-SS={SkaTk=nKg$+g-SC@`F!m{e91#FVgoaw-JpF@(Ujs_K+g2r&U z%K4tn|7=iO#5X;N4xb6#a~(i#wVn|@5g@a9iX~e0Ew{3Ub`r#-Orf~7Q!sxj*ERZ( z0{Wmv$-q72S|nr{!YMu28?h<{_CDJo!|<#u>gAf;${3)*Ry6hcjOsZ`9h65+xmy~k z$bPd+oF;M{qLFw+%S4+1%iVP4QjEprcigD9sgo68x_frl2|>5HglcD!AL9o8>HF7T z$%cF_>}xHvd!z9?0Uhi(zdhOidjk69%|JxH3yAq30>Y-;Mkt^?*8zj4Kj{?jf-LQe zRwTO#-ggf4F*MV~?Zsfrj-ufl1H|8WTKmOyAPDlsrri0XicmtwIsAn|F=zj~>%hGO&^rJipyU=h(+tJTE!Ex$Z_A+$Jm(<%8%~PbpQe41}|7j+}Mg`VHBPV=E!8W z$*HA3CXqpexg2;-d0L$lu)(8^vhr|qi9Sn)#!B-l(X41Z(?{(ghmmhw14z)Q7JmP- z>pGAnsZ@bt)d;_!8hG8`aSIQHz!=|CHEWIlT41$onx}9`t=-al|C!uqjbbxtIchN) zjJXaiF735_HjdiDMcia5I00Nbre^3?Ld*R8?xm{4&H6^73mce={HN$vuTdxA;;^y- z!A2Eyi~`AL$?72Q%rT43pCM@Y%qQg-oRvwg>oES&ZSP_Q6Ui^HFEx-~s{eg+Z>QgB z?cev?S8L?Hq7D%R5#7@o%yodwdl0Q*_S7KM8f-|4wA7g>k%_;=D z>ux%y+(IwOo^t9I1QVXE*i>h9r~@u%7^*(G%f>emeT@*G;B}TNw(Y2uJJ!&|HW@)U z-fT3S?_JkHQ>V)Lqnf&vFR?-x4sy}n2c+>#tgkXqvr0gGVtY*A+iET!z=h@jAC$nvix zn9{tx|JQYZ7tLkSArjAo z+vBzXMF;e;s3()gX|mO^^>YOxcFxGcZK?o?qp>I}{VWvQ%2mv&K=8d3RvBl?iQWRc z`zi}(Ogn_&VDGvP$j1doZ`ZMyyt-ZO!fCxF`0{FZ{EFA8`Fv?AW`_iu=e61i zvrzY|g5B2SlVt3IT-TPPQzV=*0q01Ni!+Rb|udkUh zP`FlnAc^2n%J}O#*az6+-@%qU2-xVg`}(EN8$_J`#i5sKUAojBJ*xHzxJ&6D#Pd9w z2;U^TL%ZobH)61Vb{&ZLqSxxfx&GZRitlnw*?rzd<4TBjreoJk99!>Y%nm7O$-oD? zx|5tbc*>lf0PZtQ1`fR#>$==C#~nkwl4_ieYG0~(bCGQ25$OJ@^aVKAyRHMVh5D7V z!?CE8TfrAGZZ=4Ji^#pcm~J)^&OrPWc_Dn}kX&z!VdWJv(M%8aX_9R`EuoJP*HA$2 z(nl#?v@fwBye$q`Y@dBP|B|?#^2uaH8`;q?!SZ(hqY9o9)d4DHRw=a}z$MPkrvvv{ z=L+Mjm&Lr9@Psj0qmPzhoO?M^<=-d?u|sh8jF^i5^CKV<-?>&h2-j#cv-?TdtQ)g~ zmy-HA)3*C93WZaeRnEpz=>7foIG2LRl=(s=zd zq!F#d(8ws&ZZI;QmHK+gWAG)^nx!$5m1l6IcU=dsTGq%f3zpX?S=NS&p$M&V`q##j zX;yN@4T$E6Rj9@t5@|j6J7?+)+fLP=-0AAnqcD?Xvi?@qQ@Cb+MTe%}eX)zz7&Un^$Z!afAGQ5WM zuKE~y|OBB1mY6qE+(P#OVgL?jiE5J5Uc{=17l;`68r`1}6P z`TdUf9PXVvGxzT5zTTgioq6w_-#_c1tRY4UpCk9WqWnypDs!jfjOK=*ssDDjDxs4I zVd>h`G3i@amtD=MlsG{HhaxLIcQWpJOm(H%=PPUcp&HCGGqy{BQQ4@Z(qhU9y?5T( zgbx|Xi2X~(mG2G+2zt-HX?-b|^Y>W?ume6w2udH+b%0=W6abMz?!lBd7{FKa^#g9F z7t)GdrybTvr5^{|GCtuXNHIE(+&(^65kCYIK#1#rKL9D@{-3xGQUE?`bB5ye4D3@& zT$NVwF{h{N{Cw+gi3CT-C=zQ0rQ`z=8(k8_M7Asrvz>2lR&DU^wP;_F` z12L!43$QMQbF{pE-ap06!=|dH@Mfg^34ob`*}a@0xO$3iGU7Ki2kG+ z*iP_c_Q64Rr2W}1@a_LK#PRh;`*nXPqx#8fK)eP2Y*bX=ZU%YRRNz0*{vr>M)cy^j z{~W6Q#~Ofhzi!~0>=*A}1l zb0S>N^uZPS$FVXB*7O3m0X4%?j24nuhF#1L4^_PvYM9SIx?%cx)H!^zlCedY1^VR= z>N>!|-ym+eqcl)ljGm{HUCb^*@!U_y!;;rU_8CRZ#f0PjE{{jwi#~6Xlk#-{qYJ5m z^#dB(>njpjj+xbVLS9d)p5+2NNi?rcj@%saNz`a>8za5Bg%_%iG)grc_T?d;XZIt8 zzpjI?kpJcY68Rd_A+7_LBT??*a~R6+Y@{I|d$3Z!dizNx;)C;8*;8L;mV6zpKqZgd zsw5?L*fAaKI=B#k=-~^?b-;N-Zja_O9${Njua6bBg1+`GE`IpY_R}?y)E;DPZaBa- z**9b#8zdd8CvcjtKJL{{c!YJ^?p>TzVqZhBse=qHIBM@d>wuLrw|rI4t6a@h#!_O8 zSwJF83lK2hLcP1hyQj-qlzhxiN+xBD_-w-MW)11HpZd?JvBYCaXQT3dVqK(U4H@JDCzYrI6Wm{22D>@EnQNk3bS(U2h*!Zklw9=z< z*;pdvk&Ou7piI}yjNYRm7+qx0Z$S8Oktj6lhd6v7NJ0v!pfwJ&I~c2>^&|g%)wd}8 z@BE{lgGBeEHpF!R?oz(?1XMh2MJ zX60FnUOL3Lz8_r&g75)|nD2ixByHt%Wnm&#SLe?sO?AG2SQ?tHv$F!Xhh|;xmxb+| zM2a6}BFTw_pD7vff(QK2c7XhZ9%Y$0%-yn}L_;wh@WR548M3BQH7I>yi`M`j@g8nC zC(2Xk*vqkQy$b|$S!Ywv_*D=73R z3PQ-tIgEnZJL8(hr@LY(U0B^bhu)4qeM;r0lf*Pjk=PX&tTF@pciNjjx(tC(N#6PPDB?!_2e`%{OplB@)QJ6pq?eP z6MY7LvY8|E7l||x-ri;e;Wb4I>tVZk|Hmd~fM$Nly)!4T39On_;~Qq#E1G)G;hJ~# zsIM|KmCTWJw8m=1R5FRO$lpzoX=A|(o~ zb4URl;(k9=`Kf?F(60Y}z2k}rB>r7M(MJ-HS~N@n?YjUdEGl zanu_~sQBz%n;16~wjJLMvsbnCFV}%Ed;o%4%r8}h7^ZN3M2I?Uq~VWMH2q4`jpR0a z7CI&+?m!=W`n9VCrykeE@?F7Dny|5{r971{ zTVzwF)G=KR2mr2NMkMbR>H}?|B;*>ON~M}_vy5%b*pJE=D%1KYraM5sq(fZ?xui)+ zdzZJW@DQrSMBQqg%Uy#LI;eHr&m=zUWtGMxI;PgfqXgCjI;btq^Il!_L0G8&a#GBP zKyqVaWg(WH4VeiGc*|nAm|W(-k7_dZth2nWXyJ;j$IFVH4W<~XlRlZ<{Qwrp#fng? zpwmh3e1S1vG#<|cfm@ee@np8rwQ?)9yleZvKx?A=Qu6dL4N2O}V1R*Q{%_Kz8y+ZW|*8y7|HNXCCPLFNj1`FJ`0a?Kj4X5YP?i;pL+Oe<4+13D* zR)vEP(O-Rd{%(9DG&)yQWMCt*P=gpieG`{oSzh!C!Z*})0B6#A-Vw`ZHJ5jW@{Z*f z#*i|b3BqpI*tccHl`aiSEAED|Oky&=9^ zzznJpAM!26vs685_h;&*$Vt3H5T4ZMamLcZ<-_lm-~a16IDq~yk|4=_z9FuI$4B8C zzU3Xvwgk-gI8JCWP2plUGYUu|h?+lE$uNlQ4Q`p}kcxrq4W~!$u=)Pcbs!2K0N*nH zo7q`-0R0@5>{=9nQs;jEjG&_Z`+)HlDkXVu9SW|8$-Eau_yFC?s(bz~<(!_v-H|Q7 z+s)KXUx&x)`>yn58kWgM(|j8QY^dv?=1#^vz7OUaNV^`q7(#Ro;kp-TflC3}3_I`L zvF==1JSNx~m#v+6$h_S=k|S}gLeU0NxkIouPEw$LG&hsEr;&vY@CxF-k4$j8S4iF1 zT3N1frhe|xXSWxhhz5i$2U_c79R9ivenInrY|zE;U>6<O0p}N8(y9_b^=3qN_IZQj#$rD9H!|qLQ``> zXtAaQcEAR^4#2-t)XRgw%)%Gx{7idxna-kLwS$m(Q8Tg-{U6ss3h+{O4&7*dA!yuA zG%12;xbLyHS`B>z&iuPMPESjk0(`)xm}^fMqyMVwOrO`YPcbKO_Xy_Op3UdlU#sOG z#}E&JAPsdL?4HufpAntSqNs`;%b)m&52||AK=5r?Nqpph$yV2Tex;n&UmN zPF9h@E5KMduJ&lMg>yy%^GzK6`3 zp)mY$XZlu)UC~yQD3d;zKto&y$jH$qSJD;qpX~(J*?n}PCiiO%lgwcKw!(snr}HIp z(acmSyS{lNT~sBL;^ZquYP41aqa!|XQ@UKoq>qB()^^Mw8hmP!SmP}N8`L@{O11J) zc9-%cwk!0QT@rv6589^~@^g#fAF<2STIwf>1FzKh@LCvN5O|}El+}v%*L46}D4@$w z`u1H1T}J{CD+H#z!2q&nM+-GcH}P*9QG`Wg`98ZbBI$R1@s=a_MSW^mTcj9$)^j#|I)DXtFlBY0M0K9_ zxgtgM9${Q`J-?|ct@qwnvq`FR6v83F_U#seE})nwYCXTjc1N&LNNMzD`coO~F~UBb zo1+VavZKILRl;N9K6UP$0vd)kzU|qlqW;9IzN%ae>XB^f1tn_>zL%HGgaIS20>8)h zYGXx_z>17*uvD=TY5yiW;ri1y3Ielrj}oAwZA&z#!Jq7nuhd;t%;#@>^$@prTplB~ zh9==${*a@2AlOpv&n`hHj-V?jut8ASA34~cO%naa&VMGC{`GKzZ}lL@?_+vY2hyV8 z5-RZDf47s+EUF(1{xu-^k5l5?@P#PURIq;$=)k4meDH?$_5DK6(BQsZ1T6!*4y5<{ z4-nVE;1Ou|pF4LL*hl*k?|NPCGrjL2#bIKjA(m&Y4|Y<+)wl z={mL;p6vnj!JeU?W9t=LnonhsPR`b?paXr|0JRLjHH-eOnW)rKinv zbOsz3^kV^KLq3YGG)r!O_qq32#*cMyE&NQkM-$B9akyAZ)t@~$5V3#uiGzNY`O7>} zXvx2#E7iZMe~;kb3q-#sf0_Dm2>O@R4mJ_LsdS(SB(oo}A&!HEqtIL=cNm%}dEPWx zoRIL2klxG9gj*4ty@q<{snbjq77=oy(?thL*by5%U5fC1L3$~8|04NcPV^vzamYO1 zlfUV|{KpeLCt`0BuV{ zlY;HN2*gr0rB`G^3mdF~si$4iL!|8i7nyOQbVaw?RnBY8*3WJ}^L!t0y`RL4?&3hh z6~7cq3y6LMA^Hh&lQJ)!DD6N*kq(p+)FJYfjjU{TBr&SiwHmIR9y{*SA#vB19ED7& ziMM0S8JU^xUvDp8T9gUMz15VVyA*itqYOX>Vi^F^?ki8ZkEL*kKYnC*o?r>v^UFi( zb#wXQqTB)??3#Jo3BI&)f`QVH^3l6QHA1{M_29o`*pUtD_C#ml+`r+D4Hso5ivBxd){T$60=pBYQ zy3qafLLw3#NyEDb52s-3XP~SfezW)6QmdEzlV1HsJ_7Ao^Y&9FU4JYev8!5Bi?(Of z@KYpZrMxZe5-yTp08Itj#RYufYM)}5LBJW-d-M-^uI*|)xnX$O=UIVv%K;&b(g)!RL-E*?fL%`TPt^QE28j`Rund2vk3-`I9`*Px20C z{qN)>_m6xK5so6CPzIRt**6=wN+LC|A)R{o==LxJMci}SHh5`AA{9F|>tx5>kJg%= zuobjdwu`vA{Y^oa;r&C;{Zc}RVHet~)B{mN#r{}AbPosDp0?IMXETjt9z-9+d13T6 z-9hlueTsf~H^%-*_rqEZo`D;o;Q@Tj3YL@`09pfITJU?Z*@qQ211G)=c4XM#0YZvY znyC3%1{!EeW5}N>uT&+0NKcXvR&vlj4Cl;8ihz7=8G^J^23aY9VbLSTS%GZB3=BKz zSJ~qOS_7wgD@fj#Y>jC}9#d!|PK-D2ToP<6ep!cyeb0vgt^SS!71#3eWeHRUCLb3M zpi83trYyDJ3L)-IghIR8!dINEZ64dsJr+71ZM(zr!GQSgC4)z}#pe@aFarzjs_F%6 zN~Ap?wqne_s6SI|Iq@3k_^FK*TzV(A*Yb z3Jp9A)lYN}54YY=LuNJ5qBy`)<*iX$Wk@$(vqn)hT!*?ZI&77>ZaPc*yDj}vSqNb) zT8OljL_Cx~7!73R@-V2bhqAou-AP!*ypoUiwB3;_DI^k8Uf)_n;(Rwi=O{Lha!_eKn z&d0-*F999~T+1U(Ed_2|x+Q)&7Om-`+6S_?84IMa=>|{7B79%){tCRm`RH$_V>=q6 z=txzcytLmc9nRjleYUWE;%=-=^7QB0mzj%Kq|JfLJ8zy+GPcOAoxGH#D%m?=I#VyV zG5MH7O~Y_B{Z(T-1nXW1)^_t&;XAuz?iVlM8+^fvr0Z^ZMD7zmd1pA~Ix|7cg7Pu3 zepB>&*38G-DYd{C8w{^FI_{%+AE^bjy~259gw6X^n}ZKsyHiFmijPx(cp{M z)pvMHJ$_=nkL;aRCguOy4E{s$e=`Z|50RJqS~E_P%xsne#n0G#U)ZTQ3)tH)HTL9HnJ!& ztpm5D8#s#>#A}f5_qf04j6;-H9l?Kfn;}wC>KaqoCG#+F&)V9B-Io450v?_arVMqr z8TS`k4<MVQ5Rw#V+g+Xu7e2dF!%tO zkGg)h>6Lq-jOTV^XFq8vr&jhjkG^xK_6Z7d9Uh09r838auQ`s60b_4b%j&Z5Lx@{ zFo3NVkO~w&sKWqJ_$d4dlfV==m_L^BL5Ez9WbOnL{U&xYJ5QA1yYOdI9SrklK7uX` z={$$Y-)7KefSkWD>7V%va_!{F`S6$ly1OqJ9$=m0;4MH4a7kmfuMuT#c)5W61mIuh zkczFqoX&7Op%BIR4mr3^H>yNg4~L?jV^rb!1?b|CZy{ZWEU>LRbF#zai=zbYryv%x zA=do&Rpa-@#|oA+EB(~I9254A_zOsI((Gq%f|ov%7Q;^3TjP{2&+1Ow5|Y1OxGWkD zm^rRkxa@8^GN5iN#tsCL@IFkFrUS*>R%^|VBAtA(0g%!fRZ(B`p46Rl)R|psKRdm^ z8x>yO-q44{(}OQ2y74X2f}r2^MZcl}f<7%k6guFC1otiXhj}J&CfaXD(y8FSZU@Z> zXyD8K+2H>@(+>xNmO7Nl}A$9a>bu71`ams zV1I$UKabI|`~_dwwuMQqn4>C^OGj!Vuv?C=8TXM|PSM??y-;g-V!j6;VyD+sAwU_s z#34F*2K@nP6Eg`XC=`Q<&dS;-FP<(D(wTtz3u@2R_4WKNZ%Yb<6wV*g8%tMSpEXDGfq+AWYaH^ zow1)QC&gw(cNIENV2MsK470dr_pX%#wm1=8)T+@udS}jU78w+J(~XiWd#>p6q1$?$ zb}2)k2jjf&FVHg-W3qJ3<3X@}!Rc0*w=S=YM?`&xg&T!@V@RI;s`@c;25u|ZO|cHn z%3B+!IZFyj)xMJCe>z##uvLWSA9ISj0`ML6m}vFnZZ?$(S*SOqAn7 zZwzWRF*|V1d5Z^CK4db<-MYTma(WZ7B|2(GKTFME>Mm9>ugUFi5&P?hElTy{rw;n@ z6vmk-wESN|mI}s2hb{g#?)*c$aoCH0R}XY`KVpOZ1&BIFq4`|lVQ3C%#e)xcX%|$| zcU{gkrD3OKR2kL%d6n>UlPy!hGH~|==4z)u`U@1{{m(u9%^37@z}`n>o3rQtcyN{$h~*c3|UXc{IvWPqt)^z}fAz&jiLGfO=!@{e3RHmuDq19vh#Xeo&vi ztp7MST8*%~_EuYsoEV_hnJr|zaYw5JCuhugtr{iCpgFSkmB4*(3f$dmT`d5YWI-Y&b^pV2Nmk>Ff$}MDa+~*lAN9+U>QFf_*SSW+1Q{A;~ ztz7B`5bwS{sSEm0gxh-pm_8qAAI01rZH%7cVWT+`nDj{wpOu(h!Qc$1%U&F72H;Ke z^giafG2+mimr{l383}xrn-nzf7^{HFhTx~!TmB(=;42? zAINL$zYz4*{(J{g|E?d?qv(eYbQt{<=xVAqmxksik4BvhyzJ#+z!TxW+guFXF%y3u zDk*-5A;2H~1xo&Oe!r1Vgm!QvE^hdrKMn+LqZLt*?hWPb`o!nh!=VzSXHd6<9Eny= z=IhM`MN!Pd1Ig&V>L*;D^Ba1-8j&QFO~&c;TbW)f6OM9`rGI7eJRVS~34To&P3hcN za|3kUPdlvTI@ei0qpWx2msS89fnj8be4zdUbPMgo*^mU$=@SkVIdo-%aStrtbQ6j( zQs~O0Ub>a)a?Iw3yPR4iM8&?Xh@aYMFw_}t?V(p$vd9zvM`#s&j&(-%z;nSnB6^qF z@^Bs#2kFtCvED37@ID!SA(>w~KP&}<%mIj2R4!0ZrA_H_8g}vIJca8wPi})lR9a{; ztgy-$q{{jq`TU3@=*QFFoWX<3M4{#Xr{x2=GkE*f{Oq6oZZG<~!vB_mG``En?kMu1 zT!ks0eSbj?KC+f{Pszi*7o~o2w>Kh#b8^o2U&27X_V9ueJVWC$>=x)pe}OW*Kjqpl zC4?9@%;>W$Kg^l^$1RW-U3ys5L`irip}-95Uc$S%igHIjc^sKDwuHCuHoiJ@SbqWW zbXEh+#q`r}>6%a12r`TZ&XA7CnzYr!WnP(75KBS>Jl;ueUpA% zBMp_r6d7CMO<)=q!n8dFtRtP-yGMv;(8-bWrdx^PM3ww3C|!PGpf;mp!bJeX3CMIGBj; zi|#XO1oeqe4s7DjeT!Ja@gQ_V{ROda;(UlH#o7a={1eeVdwmnRTq*wJ!KqBm{# zM0Gf3GxF%IR(g*%PPprQU9HD18F!RVwLSvr8FjNw0n1h^dMBVdD5wk2;Xv=f^}y_A zg=cD8j_z`;Byik4*G+hql>+szzu+LpKTm_S_UVTB3j&WqH;MOQ=zdNqP(dP(*4L!C zs6X-Ma`m7Ui@E=@>vFp#nL)dPP&RD3fAklq!uyl>K&G$cMgwK+tHxrK&KC_w&nT35 zDzOegXJYM*$xl2brQCSR%N%}fWg=?nBL|nG8W#6Go;z|{liY0p0_9up*hRtU95{QdKK?}l*(zdX%{?T8c=1*t`p?Uz(jN-D7MJ-4ruQ9#dPcZnS zupE@?B=+f()baRP(hS`NPN&{$3CmbMa2zdFeyqoYXG-RU2{@BrIG}L$iIK^WdyYPU zSEOhv>TbjmBl|S45Jk2-52YIFFG$ZsTrduDYY`JP4J}{q8NQ5hxoV@}>}aiL&@=AB zV5DPG{fdpgk3wx5w?E?%d1r6>7E(PA&V*^t9-Crb&!d2d(}0sqvt!pYeof3vGSxek z2zy4iA3F9vsqLH|dmL*K(y{W_UvQA$pQk}O->JTT6sqwKwUm>E-&}9?)JxS=WwH#G z@#LjD6UEliFvR<9{ffCSitLG5>Q@MA2b3hS%u^R#Y3YqpNc||Fj|qeN z3!XKs1!azgM44#a|G2`c^&0Qurg0@R|BZZV%g@W%`8CG`IstEQ+E6>dFRj-wkIe|L z#uo$5&}b>Yx;c$a(-@V}4rZ8uSpi1h`sWDzzm*Hpg#r!s7o;8qXIx9bHus|Y5W=X& znJk{s>sKAsC&{^Scu$21YP_n|M;W`|3+`FV2F##8Vr*p8@Q&8R;%+8)aANA#B>eo) z^hxGN1(xUYFmZ8fjMP68dY4cTVERg^x;Kd@jr;wQPxr3(LajdgGmH@v~dJ%}H+!Pd(O` ztNsFzbp(ZdxR=w9<{=@5IZ?2|f_hfQ09_(+MQ*GYCuu?YufG7cP(XT6`1bt;*+=0I zTMwqV!Tg=Q-HaN!h|%R7eNRLA`p$4vUd6Qv`0Y|7ZRJ zck4VE+o;JF{y?uW6YBz5*O1}>{MK-rIQKM0fpkTvTw9QQ8bu_mMZTS>KQYh zd4{*e2dB+-8(}H9+Yr(rehcvzyuf(xnvD(D!6@d7l5(QRD~%~l-6kEK#<7VO;J%M@ zTqoD%vZ%^iqSjaHcQk#|TlQM7wA1e>&?9|ZuXd4hAEccFq`Mz2hVD*wSVW8%@5Yh_ zd=#>}HD6u8N+O8qE8<=f0YnGqtd!^8y38J7DvrUBGp!{bUhE@=L{diJwh?D}oAzL& z#r+D#*YP><$UAu2{@eCX=0DF4fX?XuGk5e)g6i892aeDY#BVhSpan=E@Xg`D1&}h( z*Sc)rqv+T4_t*KB@%;#WU;k8u`~KdblLMe{ue5)`k7xhfgz8%gq`%*FfcOhaj>0-t z%wbsfQB@Ehi+g2i5LAlU=qdQX`I%zr6JwFHtHyY8>m09)V6zVP7ijon#U8_7kOEBY zt)9h-w(l%;MGxeByTcBf*~X>dr21aCMX%ejJ@HXg?5wL$+h)MhVKRSAvp;G z<+g~7se7T*j-BU%Ac#Z#1;V)7IBxDu=aWl#1whX&Fj!=XzPzu9y=rZAIlTCiPt-9x z6T?-u8OoPWT|c|JBZyZi#xHrg5n(_LRWQhQ`aCPsJvhJ*eKxxA`Ys|Olg>){jO3ZP z%OQz2QiXN~9h69x-71cM{RQ7Z{`C=L@HL)8`~@{fVH^|bFpR%STwBvbtWfuy;kL7+ ze;umW+IEJXlKat`Z`;kZVwXcakOu58(DcVd{=L5-1sIBqJFAL=%Mo%#E}xONODAG% zX-)N{=FGgnsRD`Gs0hIKOF@O$uG_jLAbmBz{H1SzoTaAWO5Tg~iucOu{Gar|IPd!l z;%8=*Tg&JOMo^8KoK2KP*jkX{QBHYXxWbP0xo<3q@t8Qb$fU+0l*B*QsM8QkqaLrF z61J7=j(muOXw)ceev1ZvHGU4Tdta5vi*+z!i~jc`2mLqeH#wrvGH7`IUW@8` z@@J`k)&5x(Xddq2CwP8&kDnKS4EG~8#9z>Q6q?Zw^%DuTQyqPV&rW_C&5O9S@`Sv^ zT7_}roUEkb3%EoL&g$ptup{=5{sJv{fAoysj6o~T*BNn1QO*7F?2e+eD(`~%Mgg;~ zO|j1Kz*+}h>W4ZqM)3^QWQ(}fItWZ(U)=JrLC|NDpw;5=RVIm1{gZ@XJ&YSK9==}N<;Jm^%qzvOy=sB6{cN! zf77hW!img!Ln}b@0t$n9Yn#_Z2H|m^UvT3r{Flp8oAL2`dn2^;cz*GObUKyZO~kng zL_KLvr|5w9!JOQUg|8;)2Qz}>!q|i>#;qLXXS|FqE z`sq80e$X5b(;C#ior?5OU)aT4+R|l+wU>Iq^O}W$!UcVlwhN$DnstM%pCA1N+Wu%x zzmZQ!0_er?$yarMEFTy0wH;!%FK;K~DoIeva|!t6uM3pd4&%*gCy5YU>SzZ6m4(Ej zZ8y~KlCa%Cyzoq2ob&Z!TGp+QzV^CQ1Hn#%3&7>3J;#*=ga`d8b9SA^=iGQAM1v_@ z!nS?dX;MbMs6y6_-}e_>rx{#DD3|ij)6k>_xdv25y^C=T;ZN>*`wSZ=Q!DJoF`FM6 z{*I|n_Ubx*_x8lcQzLG%UM1JOp+I_9if2$Tf*TjS+C}7(ThF=Ix3y$B3v~e^iCHqm z5R|@7!Y6HbFQ*L3`T;ltBp2%7t+|lslYzB%+#rM?xs>`ital42w;WcPEZ`3|KmWoC zatiuM9yCjIzu?<$TziWQ3eV5PZQRIVa3R6D&{(>Y8ER6b$ zMV#Ra_}b2)(=RXzxMxw_MLJ`t)6x(GQ;}gS=tqBn4!l3=^iX)W* z^0fu(FEBY3oS0&ip|Xvos&!vM)0)aGQmPpsb?7dUGwz3G&kD=)gO8H9 zLAw}s_Q~pRC$R=y@zQL=05KXOuFB`2)ygh3-#xPHD%Z$3ChHo_8#%E9X!KL-_(_3HY3(#wK8vaV)WMEp!7QS z#752#UyL;J^@Py4}Bg`B611Tpn$E-Ehnd+RdG@ zzy1Q)A_AH0(+%+#EFXn#N}l?Cx{o+6<{9x_81rWv-&#Wx;WCv&av~k4bgsQ@)!X+|JAoJDc!j*Fv(y z+HyYT!g-wSz8OhD!t6rTyk$ zmMOmDgo`T)|2*M_yx};c)cOwZIbYra6lkcw0PlQaNGcA-x{#dKR<@o_TBOJL)lX@! z440XrZsy>fCq5?7OY|8O@s(-{Cet;UpD@b!`uWNfr-c0NIvO>RhV4;?Vg850tN^2L z4I795`)h#Apg@ECk#LWKGh!#8j@GHvzw%s$^Yrx0tl#@SF4V$q?Zu+(Gx4ovAJ?q4 z!9DBl6f*>Cnp_+MYMn1vh|q(Z5Z%^Po3ELizQH=c*q>Yp6K9CO0PXkBF9>_J3gGpv z5g4dhA(ei%y&@OgO(mEg)pgF%;i1SFgU~VIOCxzWkaK|yIY4*{>0Xp4UOHhn;Sd^L z!lDuuJNG;HBg*l*{LA#(U$O4B%sd!b(sJUvX#kTyh`+$l zA5r~J`~@k%1Dp&Z*^YspwCeL3Ru^7o20yFe^#0g?+A#Jl!z3b>BcOo(V9{TpiSL~F z&5$SBc5DM9poHYBxevvFc(ND}(rQo-SpS%?N9vusj?8`1zhfiB3t5@ft%a9)xZ1*z6m@&S_F4}n3XtUNqgp=e6^kRMqL2d>`0wj`Km*^K>OcQJQE-KW8^~h6 z>j3r_Ac23Ncd+PZ>6_m&{SiJMhV_l{Ti$tV+kM!zpDdVsB2Cr&&aTswVU%WJ=eXDkn4w>NZ5ScwS-WJ_T4a z~L6%?RIoMx-#Cjyg zZy$znR~{0{$EzcBN>kL~oY&mScq>G*qKMJ$g4HILKHPc84V!VWzu=lbLgerL1ppw; z{H#}9!`MOp>g@*llcJ$7P|UjXCWfotTHxRIG^*qV2#EOJ%yemFqdb@)o~m89zCvUD zz8A>{Nu-dgq%6sY2#oW-zkpG{8&R^~uVxZ^#Q$K&Y5e%B((ZkTgk5fMT+rBYq zKZO38gnyNYLeIa2?5~AXUyr{y{KBvG-`cRoongGre{c9L2V}J$vBCZVB)%ijEPNQ6 zC)zlh^RGswFRR+^^b3@?1mA;?bV$jPAjNkd!e~J8bpWBoUo2fSoF=!s^*4k$`83KH_g!BIJKqEXY(J6ScM-5!7 zPIxnVLbRJA5v6NWD#h>MWxKf|ol{v6fySun}31pC`UO5z7i%bPyvH^|9&1WHh;YO#L95zN@|LELgQ7 zicyI?=rM89gh=S(V0Yb$2-tR>(N8C{!o3 zL)1PCbak2W_-BkQbgJBEe`lV0P9%*@{BlR9wP=RsgsH|2uixtDYZv4jXfThW&>Bb6 z5BQP;hREN%#J+xh-97u~(t};Be=>iQ09k+4PozJR_)+u&f2bkA7k9TtPbR+*neZgO zb=AI~>+)=I`{Zc+&DIVP!;3+L;NfZsK&IOqnqsdy>|A|L4U3zY9Ap)!qm z?ufibt=SZ~BJ{5N#f}Q7PIOaaKK4qcAIdT1!}X;vUUdCQSF3b{9R~76Lgb1N9-nI) z4{eE2-un?};{sF;NzBFg3F>UP`kZgLypW$2EwqyaqGe;h8dpvUzLX2BU!Jt=^}5&Y zt~-`RA7P>yY5n#xqAnpyyDoa(1@0;A|6k;D2poQ#XJ)hFv$>bq7A6*dFwe&&AM6kObON@5e)JcZ!u!Kl z{8B=QVY@d((fD6`nEkPYPIMZP2HDb>g``Si@`Xnbu8m=rT=A3?5%AD$^}Lb4d{}?M zUGna?wB@Hodyyu#IIj-JBWX-B<*k%PGDq-6>My@z0o46Tw`->{Bf_^xc9c%Pe)A#= z&x(!twYRIQi^5#XL?+~G3)ElW5nN8LSy{NrnRmxHfw{YaNnYRWy{oQUjX+r`?!~7Q z#}r!D(?*u7@-`HwbQ2cIZRHGl8RvyS8Gf}=E&e!Mb0*YL-bA9!5jBM{zLTFuA+8W{ubEF3&%C5r2IjGf2{Vx|H z`AR<3PZaXC#N08?n-Q7DI02iY-K{9wKZ=<9c+ z^wfIkHG69$)7!+d&`+t$Ul@X|GO)kkx<7p2FO`K5)`?f1$tS5N^Mk*@ooY~{`EjKX z+1*?cvaAPja;ndt+_f^hw5p7z^Dwjg2B2?Ren)X${N$;7!xL$bv>S=fSdx?02BOj^W<*twnnR|8SU?ak&`$vC) z8MqBmg-l<~KQ`7*kG`ok@w8pjvJzF(TC)ieJQE8Ke3WAT*xY;)rSv5azV|rNOCk%b z5Ja^L#gZ3$R9ik{YXY7i+1c&eW+<|87j!d*RNHUOt4cc?Vuk7(PgmQtXF-ED)L-C5 zS*Y9MO;vescxHf$UAhn>mIOy?wbY(LO=>Vnc%%22ST`*c+o%RcS#~lki^zIhq{n!2 z1-+_=|kJ(sfPgb>X*w2;@FfTa}q`~uN6m#xnr}|w?5ce#qye}ItO+C^%ua_ z9LWAV))q&?`cTWwZWPv#dl{+SY|%7#*qzHY912qw$O;_iBG3lm!zqzW_ERH_Q2Dv|Q>-*( zcUx9eKwpko9QLF0<%(@I-)L+}hh< z^`#U%=%42o)DPoN-1gi9m4vdtOjcQ^B0AltxY`s;Yizc#(Ohvp7oaSt@_0a zE@B-V#EFrTyT=4Nl%fS_G2Y3O@g=^#or4X$dwCN?j8YSEueQ{jbB5RxX6pyD!r$Hp z9LNz0G}vF@a|E3ICIN@wW+j4nyc9R|DIAi{L@a{aOu|H_x8+Wrv%u$6s(KCXSYzD( z;Opo^Y-h|dmm2Wu#3*hExpcj_oYb9&o|3$X&PbRvL+l0U$kEf?A|9!a`FdnEi{OKS z><8ng_FjS?UoZoiaaFtQYgXCk$IgM5tJA8CIdL9CwL#w1P7hxxH1QUh>hoi=_lRGj zhk0W6iJMpmB2}^*=6$yh{g(3*@nh)IoE>{@*Z|hJUoV2fFq7>kCoe1^)|B{Nia>_z z_z&|23!((GDmnhz3l0%XouKUP+Y4?U1-}nU z_2$oKctN}&lic@USFXyBfd*%;N6cax@jyfHYzi-2nn;JTL?P9n{CEk zt`(jT(88x7^Muz*}!_B;x3=G@sdl5yB)m(R7xQF5Ud&UN%jfU`t&FA7JremlK(qFD{~52Kf>;bdaDmWb5M=*-$Pju8 z$%1Sk=ly;I#9R<{6w>n#L%M}GicF14%_^^*_eQ;?KUV7%)>xGhV#8>%Ra6{h$3v`3 z@44x2(tOy8F#G@RCY%0T{(frHTAlsWQ!LWz{n$pr7RZ zI!hE<^sj(Q1|yksh#~nS#0pOA&NK+3rqURWYvYR48v;pbm-UvC2@x3%BKVx5}1A+#VNPm}rrtHsS7Ux8iG;lWP;5p2+ zD73~;)C2R%Lsa7OKGl%6%tJ!CRX!K6W7L8#_ZO3?9NH;FBThKt8b=0 z17a?pE+%&{(~wRLosB`K`Q&lVN89R^d_h~asjGIJYA&!*fD-(~DKnDJL;p%^uDTD= z+wEQK`Q&`o=YGL0?rk3b|1>;5RR+~hbAOrnFX?>yGV#m$-wQ;)Cx4a#xqp{V#Sx@q z0aH5rhJu$APihFO1&r-XQ-T_6g~e-$D4Xvg5eThZYDlLZTUCIqp53w?#Ldlb>al~j z{H1&l!+1SARH=L` zOH*y=8Rx9$1>hRz92Pc4sW*kS707kRrY*Bb7aFT(0Y(Ixled$f4nBJQfmrs7PFfIJ zNd9emBOgK^_nH>j^FolXDz72!Q;lqa&7hj(=WW+_5k2VV{AReOPi57WkoPF5ykILa z*gK}k2teBmvE1=#?F|;sef%dVd{$g#qXl-Xpx1o(5KrJrz#3c1VTa|g~p zj5zSA0QzvN;!LrFx98aG#uShv@Rod<;jAx*wiG+yLWm zMFxM|_UT{~MfEeV59A$IE&mePfjj(R66CQjGKig^;Rqtrg())dpi_P-vypCBlZyPg zs+Y;nqaP_c+Ay0DV8(wD8g?MsxHk%0Va}V5_Ikfm6+#$=DPFIU`CXv{RssMhTF=(5 z(03-f+T-z8i=X`=U79SNZBT+gvSn(+@gwI) z;yB{+rLd+6S_s))5VGOKdJHPUFTz*0&?-Nb%gn9y5fQ03VNQq|@X+x5Fn?S_H1bFG zHemM_F84uD)D zIC!$uKQJMj8!7(pqY7YS5Axh68)7BsI0D(qhao$vY^s&Yh*kd$*=>!kzU@5E$xxLK zXIWz}be?@+emm(94-d2{vqIQ3|JHUmz^g!JseblsVyKOv)%F}_W0%ixx z1W2}G4o*(-l?LfAM#r%Y0EXeD373kJ{M8_z-bH)TJwrlTZe@U_FEqYJ3|+KR-g(89Cv# zrz_(qXGVYG9?x8-yXdVf%n5+yGkHZ0bsxTO%{WTa3ZVwW^uQ2?BF8fE92)w#9CzVg z8^Ixv_WF+Wz!8wXd>EwH&s>N&&pu|A@x(5;zNjxLv9kKJ_B4BbJrah5AJHil*umPm z%o2Xn>>JXK(h#Dtn3aGyjD^#VwKU=HG|U3T)SS_v76Ojc-Oo0m8_Su=nqdZP!zI*p0kK9rC+b*MDl9HBaQy?t%==%#Xz5@`uo_B;EVNl?*`IpdXHYpn)`BnHTl%;yOkmZ`Q_%n z7J@?%?fsqT@goo|dKjWVgu9$Wtfni2<9)sN|FL%#U{Njc-=_scKq=`4MFgazM37Dq zknWZ)5y@So8>A%;As}gh(k+Mz2$D*Pf~1smy}OIaMOV@P`@HY}aqn~Y%=~7~*}a#~ zcV^Dacjt^F5CguvzUY(P2I#e6n5xbO*rxD_Hd{4u6oz7q|4vV2>>Rd0|AG=K@yE78 zF4W-r-`EF4BVYB3a$N0;P6?|M!8^+L`hA$3@6}1Y6J0nB>WlRk0Rx&!3?<9(V=^vY z83JH8o^<)~s_w$`8TW4%NT-t=Pk_SgyoRI>EMG)a!Haw?hhX*YFc#N?l41jtXT7%^ z#L?R&I|>XUPaF{DrfgS6tvq`XN$W7@{*Hd~F8v-6VjAWwU4;7$sYA=vaQ}D0t?<*a zG<*>LH`D-mz+i^h2c~}mWnLh;qyb@lB4xq4iJ7)d=VhbqZG$^&5=k+o*G4D`mhqY( zBWYz0uUMVv%l>7=pv3KC)~VVJ0*ANFMSAsn@`Wbr>OR6n8R{OuM2!*=`alY1dM1=Q zr9y_7$nHwVA2|eZ4~#)4>7~Zbgo*h9)cFc;nV)vh63S!MmYsN0Wj&0m_wvm2)Ir?? zG4Hb(5D0i26D3F>ww*915Ev{)le7B=O5 zvF@F@HNt8cw5_r9Anl)f0KPato-p)w-2=fpH7A(bD;6KCIiGE?3 z)aMne@6i^^N+U{H+U$Fj?1LcA5$rU?Q+nH`;vb%YY)io~2;GqMvy(RCA zp7J1|^PGQmF^|thoI)4Hu|u&Nd696iQfx4Bw~d9&F9<`4fE{_P6Vr`S&p zSU>o=!I#N;&^IUlXQls}GDvc_>YhrFEadyaUY|S*p95dAutMa|=fiTa|1K(65>{OZ z0Tls~75Wr`96(^u?uY?m9zaF-4bYi?1$2Db(;d3)RLIXeOhc}`L=wA8!#vVaJ=yJd zlf=`+Uy>C*=q{_yPNxrG9RPr3a4!#IwK!*`JVgr|Lo|O7OCtq)%V?D z#mHLbd|jRoH1W`D6XzZqj$2$lAm0_aNl5+G_5#{^N@jLrmzHLsauSAS6G{m z5pw||hop@L$rXZ*F^<0L?|GZdU~BdMgcc2*FB|0xDuB87pLO6X$3IGez~E0UImA1F ziuoIWGyV$T_R%X{E?g4#B9_!IElElwmM7>HxuyqAh4?f)pLVGF zhe1CAVBfe9Ec&@Y{ZxhO(-F?$`}TI9%71ZgS@OHfgTP+9Z8gL?fQtVcDAWE5%E_U0 z6nHD<@>bVePB#kK4@#WP<7GTm6zAIdZ21uHbNEBz)t-_rq}7hk^A*mBbU&GBj(>`h zQGG^W;E$JYR9L}IibWDZweL9TusSFZu1RYg!@VI;jYQ8Vcq*<8auZA`g6VK!>=1(I zC4S(F)rn!nnzY%di)n$A8OXMr2$8q|TeQiGE8b-~mv|W-+iV_RVTlxJrndx)dU2S!Qge|+Sg5+mGhWb>t)!8()16ir z=MQFasg^1PIvf(nF*hwYHkbsOUU{=6)<49YWp5K#uIuH_I$BwF_x~C3e8=SMgB%hIlA{MLe5V`CHT9Q%PZO%}6TEk`U9GsNEP8ufye?7I|wo zVNnXdANbxmaGmNW+Ii3_sMy;64(*S%RwA%s?zmtV|U&;L5cjzEqqBL})%+2ubx>RxR{*-LpobQ!OWhB zHbbP*gv>wE4rmwOPlw$M+;<^Zjh}QpR6oANSF(Rl=a04XgX=`Uc3Wg`C;)J#h!rGs76CpJa7BN1BFQ4zN11^Z@72<(-=3LADxv7enWS6y-YDp+ba>pXoR6Y6@<9`C{59(U2snL{h9`#U zdYj|s5%Rbxj|Up&(d97&Jk9TTJ5|9LLbHILYiFL8^aFHsiOC9I2?yDW)a1{GYv>89 z^DdW{RLW&SZ&iAsJrt!A-gERMWIWOL9!|oJ+wbM%A1)z&G&PYqMuT|09VhRADzj{) z4Hjz(~C*NgZ3=LaXdUa5Fi75$g z2PwwH>2es?NVgiJ{6{MLge2%k zXzm*qf<=F#vi;5i?HAl921jMM`<(J^2=r~{yEF*wv#TX?9{-3~=N*J~ZwH-H4kIQ-6eek?P^C9iZp)!OvaN=B0GA(ly+vjmwsEXz3Hr!#z~Qdo1{?yrF#d+RB2fz3D-aO!jNCaJ@`r#e~!7);$k8+miK-YsX z-3rd$BaigOa(Hu>adFc$*;>x+DZqAB*p7Xrz!2L5$AZ2{XvQA$}Mw;BIFTuX0V~FpDE617Wtuhz`N4@>>~wvWGhz?;ILhz&#+-Z1{-$#{{@q zw54S0y;?6m^KRyyOyFr-ruu?`{2Pk-CMq43#Q5Cf~JerRhk8FzLO%xSfW+C3A zxf;DhZ-sy6fqgOlE(ikqePvqwH<-r%6{c-REF8tGZ6?K7P*(9}hU4!RVl$>p4b8iE zwyd3?IF<#U>F?bG?gUVzdy&m3Bt6V)K}v;>Gh4s{zSDwoFh5K(udN|MVAt}P{AJpu zSR>U&Y?riqS~;CI;3w5bEBow#ldYvU@Cg=759hs3t0v#TMLQfYl<;}@n-`-nG zbx@?AL6M%9^esHc-HC>tw>`Cj@zC8gLcGK{YRdDRokt!1vkL+TM7lMRQvL|5m#pW@ z)jk(@@q5N|L7vG1m1-3`5}~8v)*-V)26@CNhW50s5NDo1nqu`%E?l41+I%A! z0QY|<+zN2U)BFDU|Ca?p-~brSATLyn-(VRRIJTG*k@P{#$KO4kA>@hS#9bCWY7v!1 zR`%m*G(pQ-hascs;=`x#XUinq2cE`w4N(*}n#Ub&eq2fNJ|?$n!8KLB2rkP||G;6? zsHV~cLisHumz(nvXDypvxtTVW4@3L|@1e_1Qw)A7Px_Dq-%~}nv|8NSuHtUE7c5~6 zR+MGfu^=U~Js^7v(k^v}Gwqd{R$jMIQWl5ts~owlyfv@tm;an4i;EQn&~=+Y^V2g| z&JXHCyM!$5s;L!VV{Kj;{b2>!NO{wb`=5URzED8mKp1;40|BbhZ{UXwSK1K#YNfct zsU;>-sgF@!kZ+L-KO^vdQ|~-4O>4QkLE!x-190(!8VEdb{>(stZ7->!ymLpUg}dDp z=ShTWAF+a8Ms%okS}m%<^vsP~;95+8_;EV4C=~C8@EF856kVW^D>j@rn}~EP2Z+xw z--NcZu+0Z5{B_BDtn@@@))W97C0uHi~W!*Kwgj(2$uNrh7JkszHo(b_ZEIGf*ge)I{+ho@(e)IyZZru zIrmc@a)8cPK%Zc+-|Nd4`nC}CH3PjI2pqIKW`G(9tbYUaBfkQ=uU>L{qzWO@JowHH zE(dDCM~6Jdl8kXZ?_4zYn7MrJLNG0{oLn^NH zv=lG=65kE!5~?niYxedI(Tsr03lcIXab&B*cZIJ9;1Ke^=w3!$(-C+oVkTRsnBo)n z&q44N<{za%;NVY9In+Vm{2PR0{R-ijcA2m1_1-uE%f`+gFNY(Q$&3}~UfkmbWIqh4 zGOIp@-;_f(9{_`oz_Sos$I*E?oNZN%g+9Jog^;mR#wpxx^FX{m)^}(XMCG{y!n`EUL~O6qNNK4ic8L0vcw>sVK11YS$qiCk!W6cq-g+Qp zqTy1#_Pmz-1^IU&mFW4SD!7NSPOnUpt6W-Iv%XycWFONcGC+0Fnocr# z+oZ0($(vzSJI;dI&^yXq1CB~g;}~fbel&aIDo3s6WLS^GEv|d`jpwky?GHhGH|Y+j zo>HTSPbRcH)-NIX;d`84p?iOrElf`}S1jv3z9z4uJ3a9JJngxjQZto{ngErx;Vnl? zVCvE(!X4ZrJ*AJE-CSP-Nd{%Ap%ptW2+H;`sHwbbD<@BIG+yJ!o;#*1c#ZZW3Pe5M z!~Q*mpig6s-6%Yhj1a8aZ?4dM|A9Zw{ST@q^s9P;e?vW(zoMQ~4Q7)?>n~&1@N7Ut zu_udVbC`?V(C%K*$n|N9!!!7W4cQ?E9K^2%9Po|9KM~K)aXu+T&7R^v7SHLA&m-ep z9`U26OGqe)8F~TvU6g6Mk=`ID)^qXaPkMuZ*UrFbQY$U%B|<$J`I*I;B_ARth0AO$ zSTl(Pj#W|ez@-21FtKNk7D57!)d*<)goi^Oq+7-l)yeAM&bn0VnkAdYZb;Xs+fBv(uJi8`>P0s`o7Mt#bLL^N7_r2I9Na47sjn^a3WX z@JCYHC7Z~rYo85lU~fds9|{OLm`{%BbZ9_-&L0bC{iz9`^^1#&HSe?I$clKBb@ip0 zS1*QNGfu*3MG$Mr`c)SJLt%!Mz}=vDfk*_bbD{5L%qx!N8jeX1Jwv@LB&Sjm4_v^Z znnxQHeK_KS+o`EfV^&*3#FRJrUM15ziUbwlLbu)^@qt{qfn(+m=K<;IWH^tx7MG~UgP8NXj~^aEtOL`kAW$ zUA#)k`S{VgL~hb6@y++UG85XDKS&Vtqk?_oLNLqxJ~D&Xx$S$kJ;DFMJ}P`yW>6Oa z@EbBihbuG4#8XrZUZv>8jIfh()-WL%Jtg2f{ki*81D?$ZT!MR*ql}!!9Ey2 z)D==NsQz)Ar`8;-WBM|qKb$AOlU!DEIU<&he^#rb5i=`cutGHU42vF8ih^7lK0wD= z6AGSv^I>D2beNtuiz;-OncG zrFs&unJF@Han)M4IVK(~ACT<^t&)<==aMw#LKIg;@9HfbUQbQEc0u4+Qlj!Cw&~j` zU=n4E?n0S`B7UnRt$`}-*Sr9m4mu;wxAm?wh zeIn^K$%ku9-3TJn^Dh3(+;{wON!CbeuSH85j8Pri#D?F0Kz4mcS~dUDfA}I|Ko_fi zun7SO2v@@8d1F4odcoJLy{@Te@%V9Z$5=m}vb#lbpCCeic$Fh>I{K5S1+~4WR~jI zUAzKNX!$?8cUp!1jpcF{B7uT?=szF9J{0c>fxwYpp)UH{P*-OJp{}Q<44|hem>l`%O8FiNeYe%iebkh2PIz z;EWxyP2tes@VziR%QZ&|i1G+)k*uq;-XLxYlX11=dBrKEJ;&XAk{pd6|0O?hER<=; zvtYFVAiekaYtBe0|$9_C3JT&Qw^ic)) zOkY_w1^dH5hO-fYUZIb9(gbr1n6D=$$Adfl5~RyHL)nX_T3MfQyPfV**1oPYbc zAmS-pl9?Em&HlITAGhyzb4lR2v3jwdf!@0AItjGx%Z5udi<6_eG(ANUEin^F0wvbz zShX9ay-Gj~Slb80xpMsW;nACznXl@e$g43Nbz?<63QAo@x&@Y`)9lcq41qhSgInQ; zx32sA-T#+{K;S4C&JZWTliz~*wJ|e2=ODt!!>Z)Chh+9_Qpdt!NKD`CH_M-#>J@ydb*7Zr-(k{FA8v_2}QzxKp50XfQ7oRwF@$50(*|Z zgX|;Sj)AYrLg?dOsfm&`irq8rvzj2)yF@e2TsY^{dqDPHt6mR@2&n?lHf{q}69~B` ziHBbuD!49}wyB&+fN;J9TUB4TtMu-YZ-lNcw3&rj3E0`RZcFhaxVov6dE7S6w^hK+ zceG=mIz>$8yi=5X`=Hyr2fQPSZpvpGkj$5W5M(L?1qDI$ljq>m1wYSY zKuZ7n+6_{{3Sbp?1F$^g4R(o7*YW-ogw+6j%0LcZzc&>$hywX%?+bS~uzLW3V|GUk zP%A zLXkrLSqb*C{ogzY9Q&y)hgu27{x;#ynQG3tFzgUEhd*BS1kUBYiZ&vN3;EE*(y`4` zvk>qL%db!?K@j@Stpq7R=r!RwbtMKy+Plk;U|S)RrZ-^-tW{;E#`hj`^LiejeZ;a+zosi`))D! zyMiwn5IAnPwT4;==KeOz7*Ad1A)f9=RdHQ}DYqKX3{^Pw? z_gD#n5g{$NFA(M4LL_qb!pEAYO2?Rrg2@f%g^Ji8`V61k~tnOW%t zhzt|(pC|z=3yd2*j=bQVKBqQmbKKI~JxjPLd4%XZnsIbwnokpShGf@D@LuJEkxRZp zup))BVQ!OSbAAY4`)JLkDb z1qvf|O$Z5Xfa89L>iI_W%7$Nd)U8Q1P>Zfm_BS)%dN{_ld<*CGxkEr`r1A&Ja<0(W zk6JhM&Nz{gh%eLGX~?8Lz&h<)LEHni68!81`dQ(B%7kE%UsTV3&i)km@5p4g0Hz+$ zkIwpqIMom8fWYxz)wBM$)q`c6=oBl8aZ!(hrB32{l1Ep;8FE#gSGW5oUw(AU`jrjF z_E-r*ekPvG(W7k};>~h@ES}Z7scP92;vFBY7(%ZxW(Y0RumD`uLoHunejhPUp-yJ_q}D#g0xMXG0=i68hj zZv50;d(e^mvi8HIyh9G7fGqoj+~6Y<5r;fZ{v^3EtFnGH)=HJAv}Jv_l$GtE|2rM;aWNfj*=AOXIO!s`r=K=L;S9_(^8FpYgM+ z{L)wb<^mw_tzDTxtORJ-e^q9XnJ0SOReEzI90{(GDjIqI`te$Ml@+xc1ansmaYkfE z)*0Yy3~D6^`&q|iYL9qy!r!tDs;e)z;#kSrSQO;=l{9W4g>p$G1SbW|CA}p3=Z{XW zIWS83uL0=O`W)t1rk&4yOmn-%a;9{1Bo?A`d;)>+*g3I4YLvqs^e&wr}?vR zJ~NA|R(@j;)0I0#C+ogYbC0Ewu>qg`fNXP`=bXE9j~t>Qeb!1|`ev zz03(Kc7#9)|E)N*aVxA-t(V+fN5(H45Ne)gNeSG?qSD0Y&ew_tg}yLmU$=E|I+D)I ziTsok)hz|6i|Ku+NcT*8WKBmJ*>ha-WO&)k4sTSFMM{s~V-79;e^!DYko-*;1Wx)2 zHO=3KnrvEz7Ki`&&dO6_H)*bCn$bJgd|nt;ai5@7w|i8$;07P+JywDU7}NVbfE%_a zL3{ZOa=`6jy${1^UHk=Zo;{5Lhlw)q7>}WoSjay1g>1)HFQ7v*WxQa$*^Z93lkTaC zEQ09Mgy8GiX@%*`@{$0`G|WnHPc3)#U1CaR8yNXY3K%)T>xGX!h1QO#$KaC&{tt%^ z`uoBW9VMY#n{9DNPeo**s*7CLrpD{#rRg>U*^|U|$>y5?xDlSorIv_j(cOkg4f*j? zuNK7C$_C#mo~>vY;3>T5|IbSB4ZJ@{g22gNnP&dmOdo%jK^r0x83Ag_WGJu+_R9{j z=QkmcT3E4JB4W6~6$hVbsFffR2J(%jx3uj6^M3Ssmk?04>Y^xvrJcWdrh!$SmCSap z1%i51;QA;vNB!HgM()n->+0Oi1>j_76ET475z|Bg<%h~p(rZs3+|R z2&@d4G0&KwIKwt2kkwT{w4%#3za`X1-&0g|sEhD=$I?A|H5#Xf9MI^^+q^j>K_pBzbXs@r@(Lqd7*Lt z7R+)cHF(5Y;*&MLxXIh2ua_?Ofm@ zaviB|aw=_IDiE}M9X+Q*MvrJ6n^MIw_$X>JVdQ}9O=6ZM8c0AD5` zFaTo@W+gxq`Yrra#wm$NpI^@0xgj9Tp{R}YHZ(1=+7{;9Ivj(G-zwBfa1(MvS3w2>6^MLZVAT`av&0I9 zEV3f%BMO0W^$x2l=p!QN2`zrPy%SpP*zAmcGJo=d2t`k37rli+fJo`O3mXR)XYgvG zzYlK( zqGY1wo}zvp8+Aj(ao(JctJ_Q$4!YqSo;0C|4Y!ZZOzW7kgxBD6qKFb)(ELnzhD^}a z4|jc0fqZGRu!3P{?rJ$Tma2$q?NJ3*UY9zgEaRIVxn! zuXogH$aH{`G+Qf9&OCFvj(YiXGYf(s0hwv1f*hbP5bXWiHz8=|+o^95GywbA!){{t z9R;$I|8*^Ycgui1a~aTz-&TR<_nB@$`SN9;UkvtwY*GM%euvL_NU#P0muk20`*&!S zuLa+n080veErR^Ff>U=#4G=2#jUR){ z$9MX<-cfZSwGfzhtptV%VPef|)G3T@az*v$MqY{!1!D9!+2-kqE+cw#pda+f)C26B zK6b|LoJRBnY7M?SzIcZmT%}Ou7-5><5p<5GkT;mt z-5k)tIKDQgCyx$*|-=hk|^zcM5S zg2wl0ZK2`+zJy?*J=LlHTkpH}cE95NVxK0Op+!Rbl^-|;0;lh`)(|TJn$F*5S*lC= zlEfgTBKl*Ddr^f}M#UJsp%>}3sLotEGeNyli2|SHJywF4pDgAe1z*8R8_4LkdGN=J zIXe*J%wzf&0@JO$AEyaj=2Ae4I;Ta)u-^@Fq^sVS??42aK5m3cgd9H0TcS6|9a8>O zNrCyaT?K$&kPI$&%Za+=`ee%()#pxfyv__MpeB<#U{`Qi>CKf>BMDTeKTy5^ z)}J>qIj9qBuril4N*l{5@hrtK_r#OkMMIq6XKbt@_ZN!M<$@sU`E~%a7eUb8z)$l+u+m>q z&rdJ2f8jUk`CMV|i0O0uKS!$nM7{}tz!_iFWA?Yz1IAh2EG#|CC$~63a_^~8TC`U3 zh5W!Zqt%mdL$NbHP{CKv9xFlY&sJ;%()dSpnL!SJEFPby=Y~ioHZUg#Tfi7Gyov<@%zh_1c$`&Bw`(Ka1PE1cPN4I0s~(e*Q=`I88o$4)3P?@U zMiVU0NVei+-KNE(9PDd+44P>iV&eC7QkKzogFXUy*GeFPYa&51&zLS_(7Y&nzWTmJ zS`zJfaF^M~cB+@!3atlqu|3Whm~-bN)~513^N?|!;L&}Zgwh*Xx|+y#(L;d>$<08E z#+jqKJRuGb@=%XQqm4hZ9J zdR;Sm<~?}b>*>Yd$N%YiK;K~Xsq6VJ^0SN(tnmMycz*U>eozMbZyW@^{Z%~ne^orY zR)Smh7#2aMOpQBA29IsD(L}8XZ$9pLbw`@*szxR^JGL`?{p_(4#QmiAK@M)5JDef8 zI$ZI`0y2*uWwf*gY64`}7o9e$`yDK}k_q&(9E@u(wk$uZR{B*dfu!UUi8rq~)>YFT z=$0l=J*nvtAk_&fR=hToP#H|j^$gH;y;yC`9!Z27SGQfIa`ODAOuYC7+t^V0TAj0#Hsk=gec?nC8&Z>GPT zA_PnQmdtif`ErH*6R>;eefp^VYyWN(1kT)*8PrPP_E%*FnR!|c+PUUlOkTW@uqnOG zQ%SniGJWzX(66YZshDgP|L6jIjX|vh@jvUBP6o81tL^C0e`6&`0jytgM2u5uxzM?K zBIM1gZIhPx}SAX!eFv5m3EYqaQC%ZeR{>Vh#X zF$Hw|wQD5+DOR&Xep|+8R4)6NS>E@ORjGsAt+^cu&IAsRTUEHj2V~plr9D~S*v8TT z@i7hY2h{_LkA+atpC)yo9Q7l9it#Z6*cAFO6GLy}{F2@^_?iu-M|6-QS%P@jBR7`RSdT7g%`O=O z9v?Q(946Gb)X#_W!sgH_*F;_Zm@^TEg7jf}+UHqZ;!vnzR)YRz_A{j8L*0W8mZ(Jq zCL;w|tj_csv?paso^Rhj{_&u@5>!-8DI6ki?``JA@Na7@7+}J48|+Y{A5~@uSFOgR zJl0>Js?_3Q)M5A*4$$sgn8wK=x_SqXjx>g=yjhyQJ;UyM_4 zbh*g!unZUmX;D~x*Z^~#7S4%%rT7|ek!_eQ7CzK_tOU1yZjV_zq`fAxU>!02V%6ic zR+$o~M$U(9^Mv=5s+8xW#K#{4M&9wQPj4?R6i zWG^x2B0Mh>$77z)j#Y>tU!kTs9ScM#p33gk#WM6$x;OT0MV^4&&d`+XPA(RWk@WK! zo0al^R)Sw)`p#FT6aF^SB5OG|nP+)(r&SvCBpns`8edgEM=w8dCi)Q5)XA42+whr& zS_u+iAX91%auyXf`p$DVr9ZQ-cUs}Prv8nU;HmZG%E~~wSIt=&><`EHg>OTIF3utaNMfjEwN(O(I58)FGUx5Qofq|z0M3M&M+&1t}d^W zQ)r1O>l^-!E7Rl7(gC<=Y3&g*)8&V(o04-GrT##EJLbCpp}$-6`* z+C3No$TRX&^>6UH>GScjoG_JYRTd~Y;qOeZq=-SInf9!e6xz(ftOP0d4A9bbgJhC| z*;KJ?af>U=g{6vfnBSeg(~T8bJbBPx>WP@XwzP%X;q6Ek*8L(Q zJih;7Fgq|7`ZVO;%$r~lq3sJd`{<2%c#I_UkBXi3Tfyp3vPZ@N$k!P#^*Fe%-Wx;= z7OT?|mB2iIM*U74^%-yKh1!p!fv{%w(**n1IfpMx??MPvkbi_e^FjqnP<_eoT_-~S zrsVT;48GQe%!Yqc4!hK+w@*$2(QjkWH0aa6&@=)PH2bM0EbyNjKPxB7!Hiyr>u`O0JG6%SaO{3Nrzeq zk`H1f008APcs4ApL768#8EYAZ1;VIrrB{h*q22>L&}7Kpy|o0$(x)&}v6w#&+6Ga- zT9Z$IE{*@r^}-TX>+*vC1B9UwDBv(Ffm|H+h04GOs-o4mG;W7g1li5@YTmdNJ~E2x zO=r$?X7zw=IRc5TT6z)<8YBL68;8lxp_f87+FE3_4!4^(PakI9UIp&jP3DMTUp;nX zHe88)d+rHPGcezF(e5(q{8n*H7xCslE5Uz(-Wvgd^FOuaP%A;*-zJ<|d*x~_cNo#k z`3tE2`c8r8%jm~@*@46UmlJOv3JR-$PdL;{kOJ3AV5_lhYcb9w>zSZ#G@O{spm{<^ z#0v?`Jsmjzegn%T4LBd^`BD?^+3( zKO((w%v0wZnM(*N5ifmr#jqYVyPi9|CKz}!oM)GEK$unJj0=LESS&E#)9<&8$W(rG z&Wf3a(pK`3XFaJo5ZVP?t>R^N3mm}bNro-==;NCzfcUvmOs6?( zLHI21u@V43SoGg5CQBLT1#xAtB33Oq_)mA#u0{-oL^(1q^t03-_+XhdDL_N>_sm%SxOp(yL|= zD<2mmsvJ-~w5M2bs52$-H%70XF5nE~22EGkEv%6Y}%K8KY+St@KY8Go^*k@tP5b zH#5Td#U~2r3%tLQ5Q0_uxq9|o=hs#AANB0{oIMx*^(+Wn_*FgKe_K6%3hX`d2Cugq zI`yw9zqITH`L~Z|J?4^G91gd1Og1?SUp;%Q1gSq;v5Dd1=f7cQ`23H><8c2z+c7EA z2L$3xBipYZ=i&w`c`SE}8Bo5z*ivI8a{>{V#aE&}f;?|bQP$bkM(gX_)S!@^CK-Yw zdiv4iNn+Zk!1Z7P7PBb9^bB^3hxG}e@hVNd9!e3@RaS3Sc?*sf=0n8;xh>WOxL@V@ z)b||6=(9jkvs`+N8dbntm?aa-DMe}XfYb+{`GDdXH_HU~DtQR9GuY5bknf5h|cWf8y4K+>Q+ z`JZJ$;JaVNGyGS@vuh==p(&tW!I42uOS|WJ7PFP|6hXUZQ=OPI9cN$|6UELi*pByD z3DSPj`ydC?BfXg$P_|?IV*yoeo#OCZ#7-utxrtYO-Adpp(RGpG92BvS*Ec!_w8*W0 z%}M|Q-r*4z#0y1~7kaS7b~v<>5b}`e6&|WLta?+B`6ThiA|QMG`elv5hCxo0DEeYn zD|uy?$1dbES&D6#lfGT?4|$-sD=;g;%uBRUq`Bd^9z-NWHyjSe)9Re%D<-pKI<0ng z=Ps((9Z+Un!L3udVQa|2GsXzM7NJAymBNUgYw`G8M?UmWnjC%t@R8ER^QZN48V{?hN6)8{y>j zAn^aR%n<(X$ZW5SzLVW|y5Fsd^34e#@V#A`L9GOne^q9XiO+e;A?8w_R!94OH;W6* zd_fCC<&7m41gYx=ZOLWh%Wd%cD5#Yn{bwDM{d(rju5x9SZ>$6Wu)rljob0$zcJ=Yl z!V!H5=WH+QBbll)z9nO=wVDIhd;vQBMR$p;?vN7U_xE2;C&wqCY+Rn+0v=Ech>>s} zCx-2~0o#>;a)8jcVc24!Y~h51#7uOC+T|P>WdqT1L6=Ect1ff<1F~(WzFK6ulVM`< zP^b#0tk%}PJPYvdP$#pa%~!Ct>%0tbkvt6u8fwuAPx5KSJ(k7;x|KUsS#(m-;%>Z)6x zP{>R_e75&k2{LxA1pBX6L97JSZ2aN^=7j}QQ>ijYPAd-RLwA`zXa}#h2fJ9{gy~WN zL_Ii1HXF^BNQGHiii9W2?lAMAE=vtFMlzkvRvW9LfHv4LD?w(I@LVA2ESI|t=Z53E z50g7MQ_i0HZ_ux=uP|T~6dZK7rhNDOfxw1{I)UPgaj2!i>l8I&nu{E%kDuLLPI2!} z+y?AAYO$_0Z1lf1C9*%AS=j;}?6bzHT+=NJjCqvwfDz@NmEZ>?e-j3QAAE&+`)@;S zH$YH@G~2h>)hCDDS3HJEr7Lo)$h^vc!7tgz$;_VuKGb`x1h;>(0t;c95jDhc&RF

9J?uR9fRSN| z-w8#l)?e5@25Ke9{D~*#)6Zp70E?2=d1lPUWvRpO!pkPjw!WW@LPDdjGRIme3T{C+N#p-X=P1Dvsy7(%K_Yx8mXPA}1$LNMm2;Jfqq7jdT zfo|UUta7|EuS61!p4sl!R~I#B4v2FHa$Xu5B94R8I&WCKLkppx$7>c7^N-Qd=kZuE zD+9UUItAcXfHS840RjK3!XR)l3}=wnp<}-VbDUC^hvG6DMk9@f=R!u*@}R9Vv|D$IP_oW8Q)4|loY$6Bx6sM4;~yk)1cH=C%87$f)SYeVx_Zs=(}n@e;AXA(MzmmBqTUu;?zy zw+002X%w!tdXhWK)5Fo426`-5|EvV?WdZ^}gs}&+5*#A?E&P7LN>EsQB<8*{n`Hk@ z6iovzx7r}17jKZSTE5N4Mlnn3_40y0K7j5@ko8Aaf)wC!xvZ0kBc@?V%+htV$x9}_ z>2q%ta1qDba~LNH{D|fOqXJ=bo}#Qf4tHP7*l}Gmx@J0@ax5k+$i1gxVSWz(Ewq`1 zSqYAHW~vky_4Yl!OptXYkp5zpG_OPWZ9CHypR|{H^t_n|MBVUYp3p5b1%jw63?ggw z{!)SNZyHFtom1KiWzOc!E-(S)8pC+kI^Lt56;V{cKjK;(a#gOJvW{gUJXPOsMCpVh z5chD-;EGQUR+|)er!)N-mbjn`llg0*_L1QaDx%K&Z|^p4beI8os@V(C(Fypr?HMf}XwmO+L@q zqaX-DE1*E9EMX~VZtn|nH4ymG?x+D`B{;?o$fYR;?ftb_Qu3)LQGN3VX`Sfy z`78E*K+=@l4O@S(Sxn`qtw>*Cz3K59f5#UiCwku@q{-A8oBXp9>?QiYc@VhdQ(F$P z5*%Xt+l1GdAWrCW-aHx*-^UOnM0DsKT}+g0OZ6RcZP)80R>$Yz6ArZ!+<|K)5GP>q zJU{b>(OQDPG-o}FC2?I()D?%VB8Gg(8z)1P95B1{taj%E_|YlFzS{CDUP3+9I3y|# z?dbJ`;`$C$VIag8nb`m>8p)`1oVAE$@!fLpeu;yjd@S^|pm4hS=2 zczJP|@P)v;_q>IyRq3ncbju$i_ogb*Pmy}Niir0C2yCZgu*_Y;{ng)j}OT$c3J>p~h_$Qo#!wSv#`9xFl4PZo2Kfcoi1w% zcQJ`unVf6Uw!CHyxIEep9G4d~UVJ?!9z(`ReMMxgZ|c@*0ZCP|d)pIpj8OH!tOR*j zethTRFRalq7Mw*$9sxaIa1P5xE-RBOe}2ZnW?|=m>bc76(KED_aI|6O*@t^$yi?cM zG~1o~6^=Z8I30Ap5Ct7@B*dK}yZcrg>r5u!^7YpFc+%c+_k5SplgP|GV>FkZ12io{ z*4|FS;qqJBfwdp~rWk08HUo6Klix`#Xmq*TT>KwX&li%ZzNCJT_(oShr~wQAu6jWG zU+ll11_GCTRgdW3R?mwdN41tD#c*Zh<&P7uYBkz}CMj=7s#Qx)7jRemT>OQ78~0cV za(}jBV~k!x>1Q-`_K(FQ?xd0Hll8%0%DQFb;>{MVxBA6*Hbl~l$gfpb#72T}5dlt8 zPnKntx>%z0fQ%^Y6B4RtRAR1PO?W$=eqJD*z*!$4;3;h(6F#Hvct`lQv~`$z^-W@G z$!o7i?3kKUpY>V5j*AJq+1~w`ak{%$FfE5<>GxHbq}{#88ke^orYR)V3}q%>z~{?cJL zZctRe5oY0Id<7yrN`MHwU^u6qCluIY^Tpw>?Eb`PAFng4s!`6yHd~~tm4(6c7PQ607r)AJ;A$f~0I(j?9UFiKd%t|1| zzszCdY8b|y7O(ike52Thf#8%u71fjxN7b-|oI&;hWd_<}=QYwEC+YH}sAx%NDbi3d z2^AydXLwH?Sks?hH4DgDOMP^D-ggUYx_A3&A#WjylE!rlqqP$`B_td33iM!r&aLYN z1Erax@8L|-BxAENSAT4YGbzFRM_!WFZkS$v`Bi57f(qI<`rjZGEF=Vb-y?wCB3Q|P z-XM3wzx>)GvtK^%XBAMb51Zco_YquwP}5+XYKH17jO)1dc|x@L7|3O z2{@0@u5o+gc$#x;IlhtCsS-iBPtcaX;u2=&S(z(T>p^;+-l5{{7 zk4Zn$gPtwa}5v|MKK{>Cbv|HIx}z(uvh|KD_%(jrKy2nr~TNJ&d~ql6$R z(ntuq2uOo8NF2IDK#-C~x&%e(P8ASo@y{+Q%2n3O<@bM{=l8g;*WEMoojGUs%I7mP zXXd?gn4Uba)F9_Y#fUyd#FkQ8aPG-ECIBwvc0Yl|pr)0xh-%O3-hZ(;R+d!p?Ay-R zS72^NyZFB-0c_8Kz!iI%wmE7|SL>6-hgQE$ez)*guVb;j?X#TlsnY5Pg`o503W1vh zny@tuK?!o9LjHB*{C($b!mA7eNMGux)DtX85+^b*Z@$qiUjO_q9;b`_Hezl|HNZ!b zQu@wUR=K#H3|B14nf{i;Rjcf2J5T1To*&+j`M5yj3`GeB3Jv;*j*B@vQLylp>RfjS zD%-l@`C#z=D{`B+^&j7e9a7G^C42^~F_On9u+ObTGx*U=ajZQJ(${+2Wqr1wFdkeD zlN5ki;cpYYv&K~kmGcfturmgRC4Y}^6;J_#7R1Y5DE16@uX7+=M)gmipx6>DyJUNd z1AC2;RaouA&efC&UrCd5yGmR)V#xz{_!PF~FQe`alAyAnM4PbdKZj9_V&J-#&NA>)~i>1tXRuu<(G=YtvCje7Z9!}G?46@ZHHs>tIr z=&zLD@hmowV~NBSzuD&HU+zOP(~Z9HIu$ZQxQi0dJ`pDDvM_%aEc4oDFv(5h`OxiN zDTDRXYYWDLcO59M9a8E{)7Y<1-736b{k~I=OKU3tHEcK=pbWD&<+m0l#jPj;YU4a} z3g}bb&MfN-Eu<}Cu%)0@E@P?Ztt|PBfXbeo12%N1Xi2YINfTu$tBTX!k>QX>%aU3q z3F6CNM{Jh%8{cbYA@k0=QwiVJ;&%eyC!@d4M+p8?F`%7=`n}~Qe#k|3LOZV?<|THX zOu!F~?SI=l5sq@-l*j(6Vmkp4Gzh&g{I}|e(AW>(`G;#li}t@!ehu({J%Yg1yQ2mO zN)UY1N+%eoA!}T3u+0u^B2}IIn6s7A!mO`wAM?3)Vi)Rp^c%314nYa>4u=xVdc7Dg zkf41qV@pGW^N3rlf?h1@n5IPSJR;4bW&`da;5v?4U_8=juEPCnJn5717Vn?g8ls(- zes=wXH(xU-UJxR1C`!3`OgOG1e~9LXB)m+!eJYieFsYc z<-XwK@8FC|6V>8W7-o0#oSbYqK>&?dOSLw$o^M+1M)GDst|v zLBRowIS4+6TXZ>s6DxxMc`@fwxa4NB;xJh8tO~B=><6lZTX<(eaMXK|;AxnQY$&I8 zen3NqOGK7v_ta40JCQz( zYmJ3B`l8T?6yGndF+FgCxCe?7pn9d<$qUj)N$q?UskX}S3M0U$e4;Dk3DwPlQ>z4F z4u^D4E>5ZQO{C4!IQVjdCYdH5(xxWcgtT%B>7T-HPAgsP0lbQc2zu)@kIAWj?g-je zUpvEEA5{_^HD)|i&74;CL=X`2phO>jKb|p;G{o*MDs~b%ufEbKx1nkXYuaD5WL#)( ztn*8ufqohNU63DIa9H<%es|g3_&?n9!(bi7 zW~1h(LH4$1nP_#-%9bsoJx9D=g5Bb|1lv9PP=coit=KSVy${GD=Dj?@h3m}%0+GLo)me#Kq9 zaqL7454G3HY(P5`XY37a$4!&o<2O9-Kayc4v2q56ZzN47X|_qJXq7|E14Ric`5LN%H5bM7_tChA(eY$~o{IDg zB3rk4sCJL_t@l84QVlN;Yw2{lIwzbp+)Q2pvHjSMlu?9gLd)VCq4C;)=c!JH7)r8w zERfU1NFwq}u9(louS5yO^*vTa<9jGo_WvUD{6g6LKg9N#=LhTjQ0WhnKVB1B9R#l5 zGf&x(nr9a!Na20vDBNgr{8VHvxH6<-JuzxqokQlLX{7N@^t%e@{$SR8A4*VoK<|U- zLsziA#O_p5@t+OE1I8QXBU2o_?)Jqq8oQHr<<3e~J=Fv65S4 z9T!TdA*;!S*D3bIpI}j9PE0ce=HrRJdcFPfkbTq? z{i_-+YMKfs0sp5S^8x6tR}?gB&|D=Kk-c)0E?%iBB2oqF%3F`KeFQd=4( z>l5XrACvno+3AIz06M3r>gKZozv$RwbE1(hUVHe|E>mi;E7aAqh%J z;I5geb;C<^c{d+rQ$QoWV)I6-M1&~Ncu}qUqUsrZ?s!+GxFE7JnWYt6={bnDp(sIl z4G(}5r+;#>iH_(ICvC)I%WnI?sUBt?0VG8AsfVWzsqO4gS$HK$72F~82qB>u33t*` zbO9bRLX566OZ<60=*a-7iGcFc;iWo}4>|_ulVwsq$it;Yujt%JyWzpdRnwIJq6Dzj z4gxpqY8!$QbR4y|=gILU#akzlOh-cu>HUx0ysVG7Y0d%$r$*P81+EUJ!q)aal;GJe zO7N@S3qnx>UB?EO#4AF`4{EH*czU!xiruz6d&NE!)rTl~Zcy$c8K9eM-0aDYCU0O+ z_*9PibH&x;BUhJl=r0jo>rh#ctsR1h8j2DKr3b?g_qh5X*6}=S_FxbHm>|N6LP@yk+Pa_`55?pNP6~PteZblGbvX* zyh6$~koE4ETQ zv8e=ZLS)r!j5XK1yx`^XtnJ${>Zhex_89J3UgbAWh5;W{vn9`OYf?N#nz~&XOmndt ze|W3J1y7}_;x@@$LKOPHD8Zj-x@k|-<43LOmZ6>krt{_JGLEa<;>4|b;uU?CzV4;0 zm&tQU7j~SoCfJ&WpadmQAzQJ%V@@r|&VA_k0n60q%QWkpyUq^}IM8eKCO~A$YoMMZ zCU7v*o6~eF#f#1tvC<56Uu07DkN_-ERTU*>(#c;5v5@yCrrrOT)Z&3V<#UxA^w3sW z2pc-?fT9HTCp|2}7{avZNC|~jDY5w2UDHP78=Q4bZo8!`=SWZ=vWfObT2J(|Y>bfi zKfg$PyF33SLNq0cZlIj=qT1|G&LY0fdNnYVQge=alA22N znQ>y_A+`5(vS10xpzWAV9nSXLgCMHL+G%wDu+>#O@7wrSUR`(r5XxOPj-T)H)6PxM zk&{3%IGd3bi-ocAz=5ZTTIjN5;9rygwl_fF7O3`iQG&I9CBHvF3078hI_>fj=0n@XLxKik59pV~?lgCN%NAWGu0gpbbEusT} z2q=`{0?@h-U7zqvj3V>8RYD+{!}sRM+cm!I}8UG0|c z>{IaVh4M84neK*E<_GNt{&6BSyc

REClOl)IIGpaOOSU*o%(ovM%%c3*qn6q4VI zgLaGIc4qqNc9LH|e&4+R*E&1IR_pGlVFx8ZLpW-sJ5DM@HC!mR#Pp$ha>39tP-nF> z@pg>=?Qr9_p=_tvXJ9KGf)bP+4khsNtGT&Q0f#>x^x-%>cW6^YCMqH3LS)^H?@5ukR2-0&Mq=cGrq5x@kTkurfe0Lm5>P(4Ta3Esp1sxo z)JFP>qu5sG6O%^oP^STde8ovGUb{mIyncm27vFIXgCL8HZ_qzg(mnxf7jO?_&W(Z< z#XpAVF|egn;qYQ*NZ=ZqpS9cui{LPCqrh0Ukc-Ck7spvi9m@Zr1m9%*`y&Y4_O&hF zK?%^Xj#}ZQ4~(Re#y#1KZq!Y9Fg$-cexBQDJ1{&D%#pZpC>-{8 zJw)*pff)0pcmo@H6M6in1U;o$Zbb>(SyT_LuI0;@ZUDMtI^pbXd11@Fq}mds({y*{MpQY&RMH4#5dAvFMn`q_e6xfRufX467qHguYe=F(!MVP0=MtB);lNx8qrZ}xlTVlf{#H+ z@AP{^0TE^!_<@IWR_JGp!zM$L zwW@#-99e*!feq6=$pRKja5^X;o4xF2xtyx_QxM>B3=65U-hfHW%&ordEtR&}&`Fjj z$JJky<>qLKJsv;=6f-e)gc5*dU;`PxG zP+4fPj_&TbUH4=(vjkA9x*2&w>*SQHrEdqlpyR5l1(~)Dfu5ofDB{aaYNss#F+iz! zJ|f0qk?fUPsU5xL9iGZaR^CsiWqr6NZj0!%|H?f-s0;LUnD9gBfFwV(Y~PTA@_-Y5 zT?G1NIN%>q{(nRc6##)d_S{2x)b5Fj|Dw_pJzr#qts`F6Wb`>l{8~ys3YC=K1MjWX zco%8d?%9VDR2;No<8ITxz}An>{-4crm8R@{tMn?>9SxP@f#dTFoPzI^d!5IxpsQWI z%kM8#3lE@l3oWloYtLfwrl*{~Ml4>fi*tjSYWpI|ru{#PhC4&z*P4G z`P&Nv5{G?$>RHT~(eqZd@w&ySJ*CxePBcAD=sJ7WNV%@hTLn=)Zvtp8Se-c~k>DC~ zry88Ynsv&u6hD-6RKGO69dLKle__rsObWl3i{?s!;MkWJw7pP>XGATv5tre*GOarm8kV{F=nYPpq>NpQt7 zzQRc+oX*l1(ZJP|TTJg=J!}V4Kb_9Myx}t?lX`{5{ql9s;`G)e`!7V0+Z8BEpinVN zChBk&RKa*6PJ`4OBdt^puGOJ1%BLn3oGqK7c*s8LbEb@uMjPT|!nt-fjlty3@@AUP zr;^FFuWJQ9ciuFZ00QZ4<0K@jkaNv0Om=k7@(0#*pREa#-150V!i`Jpj|?=>$z_(7 z(X906aZg+HjSCd=dc;2rd$A&g>QCnkm;MH}%?=jrPaffi*8M322S)`7{(R#Mt@dx< z{s(Q=wQI8-lmKmKKP6aS%;Q@HRG5>>I}@J*)=00T;hw3V`{ZhwlBLh*G1I!O760yZ zQmEoFF5I^gussGr391h2m~Iyi$=gV7m;HniI4H%i6j^1rMrJP1nci#{)(E*X+;zoZ z*c0=v*Q$#&4sajaNV$Sr=X8FJo8jVk1B2n`=cneCJ+7R%c*@N^D0>;AZ7510$y_g% zKzcFEgA&)ma%IqJjB3%&1U^38Kin1L*cj^KA+@b~E^)G^M0Wie63$#XvOG%97dP<^ z_{&$>qKz<@RmpUL(y$0s_0uQljw_r$tw^?chW=$gGAb>)hju~&&EyK|+Fz95n-G70 z1c6`fYI_GIK;u1XZO`D%Az^-GRGtyKuP1IBArPVR?*5Vrqoa5e?&UrsTT5u`(8W3lU3Sf0AB(nW5O%_FC~mO6L$;wOyaI|Cw^|#+ z<=~HdHLwNTEz>D+5<58`A;>HjQGhW|s55U}NGiO%zJ4=Cjn^3Zgjgs_upD6Rg>aKJ zQssnO%r-IR`|Tp%nfu(7m%M7aD^O{1`wl7U*yQ>dW-<@AON8T^R_?>>Mzy3gX;+!q zu45Tm)vRJv0SqxkURE0{wluYJEoE?m(dULbU+f}3(1dDfM6Nq2I|f^)+uO1I?POno41QSUBlkwz&J6YJUzUy$prg?w+ zT&nl2u4Ae9&gmAlGC~?^C`y1q5k-FD%_%7r;fQS)(XeZC^o2h63U~$*5Fg?UB0Gy8 zQq%U$4&Iz*1-5rpQYoJbWIrCnmmw#$$Uq1loiwh;F=Pj7yi1%m(0gu@;#uii4>jYp zV!Ohfqp=Y7^GnOaEbj05ixPZO?(dHvaL=Bm<&Rp^ymyhX8%-FILr+T64WZsmR@`|y z1r8(gr!fsi8@u zaxyyd>1)vsG1Ia)9+Ng3HPPtXpdVW8>-GV7u29^oO6;Sy1}o)gw>?-rrkVlPTSqZ5 zAQp~uf5=n^kuww}=#4b&dAybU!A|f4UGs4~K~?Hg(r26IpcvjU7U_3uUSzo!HQeg%~?$OTRF zUx|4^)Dz!+E-Q~dU7!TlMuwRCYUg8QCX`?Sn$J0yS1yAABeT0&5Q^sMb+L2uPfY>E+Z2+523oC**oc9w0k zHGyHN&83L2d_1}8gn4uFtdtFFbu@XG4IQg|oRfc10@xk_fnP(l2So|cZvHFz{Q*j_ z+@Yi9K3+`ks%Y4it&86D*zvaBm#D#yLnJ8I3Bh_FV7H}^2OQM>CzJpH&TR>x)2I@n zzCK52g=lZMGBVq^2=}Fbnr-<4<%X=MIdFyd1baeOi+)*12YVHZnrAsrFBroNeftr% zNF*lbH7rOo3q=VcIUb2^ARjL>e1k(DNlEK-n^cJ0InXs17j$8|8%q)CkW#<$Zu{}W z^Ix2}E&KHe^F2gj(XYog7SH-UeB<#kv+q1Vz-$+3uRMos^wycEq+s*2P{(POCl(}~ z724@2NE8BC*8w56yVM2)&kd9`u7!-AbXxY**QeOo?97GjGU^*te}B1OHnY2n`Ji8^ zg#UYx9}XRwhAizvC*k*szSn_+Bl#iyJqg-LeOtlWH_`FE&JPKG1O(`o1xWlm!EbcJ z*YbZ%|3km~w~PO95y<2T%J)x&Ong#)Ed_ylcSj8nl)&t$m97~t$NwNOPklLKh7o>g+NUN5jwYIjxhbEav=n3rTp5aEhv)qfP7aijQh( z8#5a^-Gos|fSrr1;*i3bpz9eC=6wa9yNMKgxL0Sd>80sE;vA2-bQdCUC`u68hES%C z!d$xAUB!yK;+@c3$IVaf^A4URf}`uTemKq{1(xi#MS7bW;X{-_l`Jl!|G|)xQE7luXzPB*b0ZB1TSEs z1kERwFOXjJR|7JAdEQM_8XAi{4x%bSe*Ks$>aLy5)e?X%HPseA^8(r_Bw_WJ;>u_Q zYpV7N!r|=Xc$)SV9>%LXV%|jw1l}iz7s@LzfS(DVw))LX!kojP_wUwBL3gV!1}*O9(T$>g}OPeC|Eyd=|yK~2t@wWd?so4>yvNspsR+Cl~C6Mpryp!=k=w*xZ7Jy4WjzHhq+ z2Yvc^THa+WWRsXxhS#*8@q`0Dw`{+OIm3VA-XY!d@+CYiYMKD4REe~4csuh)n~mmK zIYC`7-lXfd3{OX21u%6pH6p?=juk8|1TTZucot?iyxM1zKVH^kfe&?#sRSg^N(~4^ zIB;^^tPK6dj7p@<)HfD_Ib}5&t1$1rjBGnf_aN{?-Eg3LpfwKlb|miE_uU|01Pui4 z+jEcKQM*TqFW+ZLCP(N~zB8?xf;sn6deoJOfh-9HY4KK+QqaGDz(M0dD>iL(rw!%@ zG#dZ2dBE45)?R%UjlO`ZG%stzII+d%!8>ss`E(ma%ttO?^(1(p<^|ft9#zL6xl!Th zZu5;rT53-!qV9%nhDbfyE1@WF086+8QvCY&1mPpgbB{kFBerY{cw>w8m&0d7y%h9d zGJu!|x+}rP)es^xQ88qkL=EJ~EH*r`ag1plah;BNY1zx9YPJR$4!CDzAP4?!Wok_Lk`Gsk-yjUif+#An5ReK_Tm7sP@*i|4O? zb{0~N0Zvp_?f9!`=@ollYCIxR-W}_ zXEA~oCGdjX`|LvrnhxlF5PclE{g%CX=4Sr0fksgm=vqc}B~HD#t-NR_c@`--uxg3G zNdD0)l5td)FJym)62Jp;)#$yeuI?%JBo`UzebGv-yOXjy8^ohRAH=1Emq(`nxH*r% z)Xk;Gnx0^NK=n4gte4d;o!3&EOHoihSeBVjoVvxJcNN9A0ql!)itfLEz$wl+p?1R2AUl zO_NdMkp?3}Y;Qdgs!_!p8tW5@^C%$VGut*wr?u|^$Q~l=nI0RTuNS2R5InNqTF-~q zs$BO{PsmU;!>rLcvU0J4F<(3bp2Ew-LYkeC;lJI_=AHYoovw({U$1t@|1@ zj}!uo&Ci|n$rcxQSJjI%a?Dh@G4TZigx*C7B<@aEqF_>fe!x!4GVLT>h#V|C z|ArcMqV!t%VfTWEZ>Au9bZgAps!v}2v!k?ITum)&tMm-9jzHjLx{LP(0L3p?-=7S) z)R~z2@d?_yqPJPId4eayW;1jLU47vAZoK}B68u)&-wQzC!Ch@bP=d^()^@tOl6wV{ z(U2zPo#(=7;=aRaPvhSQ(r8$rn}fqS0X*3K$3B#xWfvuo8g#v8UN#gW$5hZCJS|h` zAjjCdgA%|4EEUO)!Y?kJin3HC4G6uD+Ngl8C`5|q&3~E1LfN^l4qzgj^ZKwdbxyM< z(fwv_pV{3JM}(e$?)aEWfgHo^)nJIIp(ugs@)}97U$J?4NsXv`~W%@abeT@zbL_X?fy0a0uSwpy6~t)9nk^rhAYSWxgLMh z(Qoj0+SMTPc4p0D*`1G+lYrnr2RvC0EVkd+~naBCfpdy~`I~ z8V3hnI5sh2Id5}$IRw@q&h%a#eL_FXL!Wz7CiFZ z84cnS=+pi2Nyl;9egIVrmI zYQU3l!qDx=p?5)O2wy5#(w?rLTrgozuWLV~oHtK0Qp>W;+Hv5d#@?ihVw%Kl>eQ@k zVZss0{luw=bq6LX0J8#&G4=Pq`Tybs2s{Fn^A1YT^smJ19!yGs-mw10zxCo9#|v2j zr}8YZu+dSni5`Muco!%WcSh8yNF-iJtY@&$PN#W@T^ZKThJQJM_2iiTdC5(Kit3>v zm|}*Y1o+6oSm)({2#ojSmQ~cgBdSIZ1eC1NzM%xGFY1JiKb9ajN_KYfm}LgcUb8Fm zQ^pgn7ti(noIXZ%NbSi_qq_CpsPMX7|6HrPqA^Iwr)=r|dcylmfH8>odhTbSuC|S| z8FMa+sHEw*Nh^zl#h2)!P2OwqVx>hZxhdJJe^CP1rU8LRq1xL;3A+E4{QdwXs8f6q z7}zLVHlrQ$$(-Ft=F2lt5_eQHMa6)~Bs-jbeVFot?n==9pHPBW;7$4Cgfc1Lu;cAA zs@+kqPf|n^VF^CSao4A#Yd)811P1QCqx0$W!7QwMsNjtpABs`e_gujuxjO+%KvC)J z;~O-PQ3Dhuc;q#eY2e%-)6k69pKm8a{oYDXqo&piUC7x*UejChu+I;6MoG`bYZuWE zjPOWEhq?3veWxmo0sM#y&+@2H9M(q|+fB4w`_*!r7d7g^_rFjp8 z^j@H`Ap$Ga_s+`CeG1FW$uh3M(M5=#3(K1~lCzDdf1J5|-~HFWg24O!L;ha~Wp8GJ zAF?u!wzDV?UA{*{656Rv2{{9r-K%}$O(|wUcqi2`VRUb{nNxC z0{{1t?~gjzjZl6s*d-jGS`me^z1vWKNf#??QyFbO=h&aX6G9d;u4`ljYR?=&|;YyTc;a7|~r@sCAzm zqf3#sws;V64^Spj`S@n|`4!gmghDjOVr1=XMR?FGn9Y0S#hdmMm!}~DhoS^Uj!QCF zr}7n7D;z;1Aenv|I$MbopKyvoB1KZS<&pvp*_Nj#5`|$Z@E98jz6ipWJ%@zgY?HdC zJK*riiOdWy6nh&;Ld!?F=~<42)jfrMI@{F>)tRF#48I3oY^sYv-`%U=FG{ed=ijA3 z;PJ0*IRqt`K5B&v)!-4(Uoz&T&X{|2LxArQJeum&3uhZiG(lKV7Xq`BVJjSh5_G~q z2>?L&)}*x{rFE8cIT3a*0;AF;LiEqigFar*FK6omBk^MZihw~#%{lRxPfF4*j$#3v zTvZCk8dyDt#M9or=37Spup{POlpz12eR)}Bd9`vPby5wB3RA=vce#a-!1Dtd5gI}y z>W9U}y|nS^iiweo1dm-b^!g%kwjP<4mMOCE(($o;|9GV&KY%t8Bb{i0ev|VPJ0t0* z&E=UvYAsc7Vlw13yD4|G0zsfi{oI#Ime2=^Yfc1X%1ulGwNbH`o9n3sZ&Zqjdu_vg z8x-^Z`cDn|b@(??erSAOvrGBU%Fx#QpW%Zee>Ty4@HO_ALVFYtcw)D;hM)vXN3G>- zK5?h<@*B|xcwsMxWn`N&PZ@vGxLD9UD3fzitDkBbwwCvy1YHL#<{#JX)q&J_!A5XcJ^24Uc$!lEIuF^A>PrSv5|0B`oKnww1$`E&mBF%96knilQkOd8aDoxQ}Z= z+yg}kxKv3nd$otI5x0Xp$oUIhh~l{~l=KCD`E*w0CZ5?m+abGxi!D!OlcFOkvf_iC z#l%kPJc-bIPM>tQlw&1t{p}6HHGp2i@X0ldX$|Bj3*x#Y5AGK-^BNYzuJKx z8vfBelwU_b`|2Mi3)4NI!(4Ue91wVN&plg5?Vd{yUt>5t>-eQv3%$w%E5T!OMeIY}V_7d++i~T(cRAalO^>TCak=!xE^6c(i+CVxd=Mr zw=8;QHbXVW&7vd#YgqeUu0n>mt@EWf825c5D$f+w=P8lG_ej4#?BiC;+NPri2- z0sB}hOX}QbqxUXX@n;Wdo+}Gy^KUE1u|4xBx@(r3I?N(4lx zbqLZKH&w~2wFO_F(YA5_)IQY|tm}Rc0YipSBX8C#> z?e9eB9@2IP`9AHbqcP7{+44h=KbmJJKnV^0>TRzF+PUF?`GRSlgFfn@tAoH(-_3)L za-`o1y$rD z=+ugcOYa^|ayxxsXOE8Si*46Qyp<(y@9izUY?z| zI%6Gds#4{XX;0w*gos_x^d%#dXe%GKg#%_k$qR9{71Ea699d${0BDsUUy$&?;`sOpji-jde>$X&k=kr$;`59D zUE<1>F0`)uy~PC9?ucgYMVL7{F2$B%6z+$`zvg=ibPD}gqO%3V$^{RvU6xJacwFEa5oNOz<8Jry9Jo)n z$$t5f>@-?1+a^wys2H78nGp%^6hDiIUWA`Yk4v= zdbfRNssYsFH_@JQrxFgJYF@3zU0!?pI0G?|Z?uxn)#CbW$PLI-({@pU)&=5Di8!n( ztE!KyJz;ZbI`VIAgJzg|*JR)>!A0l}9a7X}EUSZ97u4K3bEA#KMc=rZ;nIqcuDGS0 z%xw{VFG?E+nA~n885vnu8qbg|!OC|e?yd8@kKRi?1jateBsFDh^cN-gS-`)_fxxqS zqNY7+QIC00lnAF;vL$!Rws1_XF@U?ms*Wkga2A$e>Evc>nZp+KK9t}!bOrWT3s+E- zpy=8V7*wG!xIsQb?3bZE#)W2-gQcq@`KE3L74ecD3BY^J_bfu}IupsWy2dz)A$t*J zIqr|5>|4Ha-YG>v=B5x$Ls5cW4R#_U4E_!&{mJ*(WZ0XFp*E~dF1ZQb({A`l%3G6% z)HGX0J=Fu(TeR`CL0i-#25T*tC+^&?qG%*USAJG#o)!YQ-J0?5SHRUGfB2LtR;Pd{ zUEyw>XDfJmWSz+QRjO9QUzFfiD*ibJ0?+Mfn(?SLO-=l~z{lJs+Izhzz1q!%Ly75? zPxruw!aGPx6b`d-Hn24fK?!=HLe^2b0AO!QafrW}ywEn*NaCjM4t(F0003_3@g zoQc8ftv!zA;*^>^YB-M9U&|Ma)l8MXdO8zOTF&7qP;a?cH^4Z9Ab)O_M}ENSF5-D$ zWjgRZSy4I{M9xr@AiHQlE43tneolgAz1NNatOGO(d@4hm2hVfpmY8! zF}rv_WMCw5( zS}(WRF*Q?oyrs%*q4KyKMhR2Q5R`xbIk*?mh(gTCxC*3@QGWwN@PA%%BzX8jyY`9V>F zH~$GG002WpSMm0&L1Ff~2 zL+Xx&-S{D;?!?-@A$;6MiQkBk^LAS^+xc@`=vtDf$Z38L+jwM)-vW_0Q(IP;8KuH; zCwx9}ziE9)YMw{$ynR;mDnW6;ne8Y5-37sE&FZ1p$7Q~tu2?gvU|OZTD`y#(&W*uQ z&MTh?-)(03A%fVQrUOBeJ1@!~C)_}g$j%EoE5Q#SDIU@A8!>hD>W>boYD8cY#z*-7ipDfD{}Wk zy7nOjUKUYS)x#JodUKS;==A-@dQtEb4K*4n^Li`+!v0ajZXmtRV5vG?SLkl>nL(uZ zQ5&$KcFk%1S45cV`m^Weg{l9d1P}=y`~rbLd~M5jPy%%2qgHsfW_ttWagW@(@RNPl zM@$+nWd^>hJ0@N+j=P-322^pwRyYJD=!1z8RG-)3Lcw>5|9~F+<~-U^Gl{f*>03?E zW2Z;RvoBFo1ObJSS&RAbTR{&4^4^%>={RupnF#WHG(#1#8qsqr(9zs!t#?rZ*R@k4 zRf;c!stw(%ZI$?$N)k?M$h z?-MLXIW!6}IW@=A04vR>3|&TiMnn^E3}+J_<55Iaet2->#+fy3sFB1c4WJTk9Q^0R8$=Yx!16TohpxGV26J`1o}*$J=x`PHZ~Gj!RF6KP%U& zV}!xh@;;QH|A56D1fMtgtam=+TyOi&i#bC50TFM=P_MZ%Bfzs;3`xzbJZitLcsE75 zB_|C@!g}YoeSQx@1eSHsFA}e66PMoB3K+irMoUlY(*)X(b+&Yz76Kr_k;>qo+f(h` z8CpH`Nw3)R>T-4Pb35li`&i1_b>2dVd!Q&m6NBJL*r4iVGXecqePS`Sd{bOfoC97e z^3Tma(J3~m9@0IbK^u3-;F$Rya(i`(o*z`O(^MYlF%KUY*Ni5u8kf?Vf!o!N5T)Hi8K5ai6zvwEwet`0GhI z{q%yCDW{Qz$-2Gq>dgl4%gqoae3{7P34KPobMGBePje&PgQJW@sRp@37yHba43rHX z+!%!s_0t4tcn)#^i#PU6{Om|Ny+l3{!3~;u9!-+lTPb?k)z{71$YNWbLd*k234q8_ zQaPh)l3-}`<}qxs#;j1TZ}y8?J# zK+#!~H!ubtmm@(6mx*7-afv3hgf0O9g$W;M9!faSKB4W;QvMX+hvxq8ndheuhmrZc zgFxV=J@Z%{sd;u$f|paT>?*r1>4uXu7YoW&i-xta56}zuO~0@=W|hBKlMmZJ`%r?x z19~4sA2ajk%;jh%?;e$*Yu)I%{?#?Y3*Cf>`c_h%>p zJRox)b8YdqJ&twTnM_43Y!#i=W&F`NQ`xgQ&ys43mhJ+2)0DI~R5ck;^OQP})MD>k35)3J`Fvq4uwtWP_eraWf|0T>03ouC+ubfL^WTEO$Mo z?vOT%H(KToR7C&`xoO_Tx|LSmrvB6=i#2YM@0y(uvTD^02sx6LwB3H#g7`9MV|FMW zb&yI%^FIGF^Z0z)ZR!FM2jKM*r^O8!t~=$yuSy6pO;;6S3FsPz#I)=N*X+Ge-I@=y z*-rcYUA!>*0cbsb===Y}HUmMw#P_#--{yOKF98C7+_f15C2%@Yo9#?|c0SMuCNd>a z9_TYoKl!=kmK`Y{)3V};JS$7@Zjgm%6l{+{P=cX@Iwls+!FQ`lAr25bu9T z4P{??k>&9o-t*QxWUdK{65Mlc$|3NMi_v=<^|}Y2`7)!MLCK1wB%Kj@=vh;hMxH}z z+mnCj&DzTlvK#chuc)xv?>WCXPjx(4p*1PsQCs6Uts3AOUu%FnYk0C8LqgmeF{|tP zQ?G#V9O0xAbwOWorrf2!D8ZhFf0qJ*mv^-dK?yvLTHB|NHw3e%NeSBDDoX-D?`r7E zX=y$0@^MgKbT8l^EYOCn?R_Z0@GeU5n+F`g13X0qi@t`)y4xZ4I=6?h_+&&G3m&7C z4L(&Q;dGsgZwH8nuU5uxW&|PSts9=(oV&XPU}PvKm1&%Hlb&nTKf?wQHFQ^kXfVnt zgx3|3+$tk^d5=mb@o)!NiTqnGN$^Ut!y5sIy{R}(lpLdih#JOglN8(=^_C*WZtbN? zw{cTcNqBk=E;Bmdk7gpuToVv-QDLQI+m_D*d&5W462!z8G|tCmH!#lo7bWBM*-daZ(N$j=1Ruefa~4YsKFp#&q)6<7&e z0(m;Jwz~3wn*K>5dbN;>nIR}j5QMjRpZn^4$zVNni5yGoWMkFV5Zf2*s~RPi%&Xf1 z_yB>H+?|kzZnW_?5^E%)WDt~B&B$|k?@DUDfnzo#{hSBUG!!K`Mp23i?{mjtZHOjU zSO|F_g522^^>vku_cLS6*Pu(8his^`*f97bRM2dz%w$lnk=A|epH=QpZk!5)H>Fq3 zw|g%S+`k+WO>aGdy~%gi;|a0AIPC_rZsTJmN(C36ZmXLQCjO!Xd)ob73Itx+({$8P zYudX)T0yEp81$|#&5~5YLc`+0YVl^#>)ye9_w=^84OiHjhM)waP$B<%%b4%G61>mD9kY+a)zP=3tyJ&tbP488Yk5nS#r->5|K zLv>H{&Dh&=^+{huM_%X|iA)2us#1EoB(^i6b_3+D zd#w6Cu$1p#JT3a3N6q&wm*2g#&lh90Nnwf^f)WrOj1utcO_c}9;wv_PpmJTs z4^cC~pnl)v_6qYAlZ1#rL)ZC3Hl^r~-oHNkp!tboVQZNi2QSF~eIC99k=~13XHAMcnZfISOZzNO$qpq~3v4UlXqynDT?71Y`dZB_Mg9 z*j&g<6~|!qC_~_6#S9HA&@$;Lncq9c>v&&?@eOdluHHqU^Wq@qEyMVBsf4L3Vr*8q z+d5pHf_%j#^5-FsK;1M@OByt{CJ7sOihx-ZruutD6 z$hS-TEPNelOvo&(NRqgRfGJ5RRWp0?@yi8M0uGTZfX7>`;pJ-nLh=-stD}KhexBi& z5k~A(rd4jna(|RH2e8~&dCe@nVTQ_Rne?h5z`F^)aBucJna3BmkIW-VU1mGY?C&Ne zAZt3Hy@j3qQ*7{{Z|d2d9r*G6>seoGP=2fU>mwxy@(tj>uKe#-ri4QR!R@}F*V;b; zLAg@~+AaU-ouB`w?Eb9|D1R=6=J-M2_1#ee1SKdrYNe-ubv@S2nVJgY3pYE81)eSY z&^PNTf!=|t9o{*)VyM7YIs_#cKMYC$0Q{uj4NoIUg*us(rpo&T#aZbx8M0IeNNwjF z$~l9CjRB=Plu#!Y6A?kFS6P)o!B)3|o#pHW;7^~DdM~aj!!H35I20vV#3^zYfBB-n z)eBC=R0Q)K@4HVUGsV)Ot>^m}PYRe%9kMN#8&flv;Rt&Yd+QaWpaNsR=?U8atqF1+ z^Jcpf2wNE`K+_!>fr=@-`<;&l;`PFwhXrqAYhomckf%-Ri>bTe)Bi;Yej(|7ArN@u zYg-OM32Kj8;cw-P8XCCk?{A&8$Ps_#xkzd?bA52UmW?b^@BWj77#G+IhoA%#Fi?V6 zpqwk`%2M1Z-5WDpE>amQX;UY-&-;+&>dB(q%Q3uxHUXd)*GF+gQ<2_3Y@MHBS^6MizV%|jwqN5msQN+o*7T9r z<%bk=(G?;BB@4#W4^0%KdnOQFYkE|RYfs#vI)64JXaNWH3gDZjDycx^J&(8MC3dpb zVbpagpkF16Vq->iy{|C6R2{ex^14Bj6=O2&5uk4{7OhTs{^{k-2-&xfCkb;-#nr~b z7!-r}_xs_?xF;Hy$9&ePd&IM2)w!5T0>BRwxiZEp9M}^C9TL} zPO$Iz9Im3RlJ3$gLJK|X8rx&SgqeyOu(iAoC73*5F$ckC%_XqKvjPjc74DkGnp*Jv zR^qE1g3Fan3diZ63UrhhZk-)k69f+aCxL?l$kveZ$ja$t_|(ST&x&r8l0Rytybv)2 zK2`Qgw&9WKB0OMkoR~mFihV&KVAX8kS)I?s+Tzbh z=F*uBX&ntLi5d(7hIkmW5YkyRuk%`4V- z1jw6pDxc`c;))*9J@zY>PwiZNR=GOD>_j$uhW*-1eYuv6)A<9lnh?V9$p9>cYkrI?}ukI{Tx3a$N!Hq5O{0PJ-tWmp0k|H z0Gjjd54e4T#F26J4OOuY4Lny~q@Pe8AJ$B?oQ3V4eJH`yK`S23GrAZdq}@xfR>qz<-&+jDop_%_d z^X$J_`SHWQrXe>S|Lz3>f8I0C=#iRd7bP$gT7^^O>k*uGa_kFUIBtS#{6@?d%_=oB zfZL*EkaY&OfA*mS(+BiEh(5L{GN_1nmd^j#K#w@xy}4H42S#ExOd|;enUhG6XOf#r zmi*tz>18K9p!-vlfXy*&0r6yA%(bc7e7UzFjCy3uH$~dxfG&ws(TZG*T!4N_@60_z zX*F_hyNC~NpEHWG6E9r4hy$l_OGhfaSqu7@Vkk;r^xiBWfDG=zIB};FMNA>bQ+N?n zhf6&l3)zfPIH|G14{0+r6DoN=yTZ7RvhHqUGj(c>2KY$4-bp!_#iIH2zC2=9Ojhb;nuw|8v@K?&xM)Mh&q zpDWzXt0u+fmuG!NADke>>uLx)(b7fFe?_ArJ)!?13K49NK~RF3gE}UQNQ3f(6OX2T zLJ6q8U`ljRmnPwa(j?wu`*Oai#iNxfj%lgv!|;R5CsZB)H=c&|@PxGC?Pt_+4XTRe z`5FQf#5H*hX~dVbnh@_pK(V&%pvl@G@hN}TqI4C|-tkkg=l4>So?xa(cwO8oIGMb;baH^FmNViZeh<*?o>Hj3Gn) z?)V6)zi3+}I2hzDt-0LO&b&Sm#u)ik)LKB+$E=%uCly=`>=~@%tT`Xni`1Vh$4gDZ zCPa6ztCB1`q^QFty>fD&L~*J&DC%8wH>Z9?(eFbY@8KHhQ$vEr_|^)DD`mfs{N_=> zF=x&TPx7S^xJp)T(XOKA5K(;Q?xUbb|BDiQQ{eB9AQyPJ@2bXtJ8D&{O~7YgP8+Vt z${E7Fb%)WI&5vyUYV%C)X&)N40biRx*hgj`OE3prg8eNY)1oA~_(Xo=B>dDk`lnb5 z`ss9AIWmhILaXB!*$W+-5`iL}oD0QtB^+KcfR&M4fhgG3f0&cqN= zx2_{-0g)5Z6^7KzDP!5v%QWfjv?Q_eqx@BDM#RmRUK`#3=^awk#!iGl7#GE$b?H(v zi|Ch`qGmyS)rgLBI*Tge!x+s9fSZ6(f-hFz7lRBAdItslmv_j**vjRomWmm1Cb}t+ zcP5w4ca`qio5v{VMFH8Uho{LzVn4RbL}H*h6V7 zzg5ko6jkkxT0P?TeF~~r*tc+w9@5JN|!X!k|HRQA}}-(f{KO2k&qNY z>5`UKQa~D{OBy8JnZe}oaeR>f`o8yF&RWdbai24z^Sk!mv-dUokW+=Hwh^hF^4OjA zVRKTJl!fwqeFI4kn8o0Z7*Z>^h2NnYnhW9ww_H0rIloa&aFv;HFhqjh>wr9SJ2F@o zA8SE9b+S9_Wl@Sdd*K+n1gc^OrH+gBIMT>E+^!F9z+avbAUq?%cm{i868$Hd>npNL z?p-|nu**(LZsb%FQ*QCd%Vb}5v8cLW;s~Up1CW7-T2}spd&!$6B7PAiT9i`T_RNo5 zQ=hcQB;j9h@eDJBzcDe?O+bPg;X8hTg(ocEy7@_pkf6k6ccbOTvyf?%$qgg}poy1& z@xgnpd#mMG7v<*e9#O55L+&cK)TR#F4P`<_xqU$R!r0JU`IZHTXu-zRbrX z&)_~GJ*q0t70MhD4rt;E0uLn-?GuY#2ReKUGe|B_c8Q-pOKx1|JW!ng#{KIifZymt z;6sMN2eT7k()=g>e#1_1gNUR=Mxa2r)-}AYw;tc$v2e3Mr4R2p0d-7Y)dxvt_^qk$ zy0_QtAKD2FS5QtNsu`Sn@x-llzAAO-RG%G*x7r6vPP$KadFe)yfVxw%)@3vIur(E; ziQ?BDw)eHvY&U~l7yQ=OLXvPq#-R=E0CeDR);>%PgYUJK45QYI_P6K#7r5fP%S7s< z2?arIZCejY56F5jdPc;uuuZ>?-PAk78VzC$FCAhRCMO#d_I2A^Be6w*a0CAtXH#vV zX&(Y0dLVC;k=!Y4=UDy3Ie}K2vpD3*z@$yk2DmG9JI5`1+}&6Z!`}g9yia#2}>f>mr}8yI&$;N0c9D<`E&RqwEbFAbtW& zmcvGUJ|JE%_k*pwE-hHWw9cidrCilUDG#@&HJ^yviis^2KI%|E!Q4Uo1hJ59)sctT z?6ZCIyx;X9Aw9kB!N=^Ck9-47g`GEcLthrM1Z%Eii)|Kg6r7}{Ki;KXv-nueWWdQ@ zHac(QaguXP1(fgxXlrivFw$nS#DN%{zjl3R3LVFv-j?*pi>&S@7FTiYN`czkY%r;Qna_LOAN@)*NCez&v%>h)=pK>XR&6mH|0_j}&Qcv+%DN zP<8pn5aG$lF(U~WEW$?|YABe8V<-TCHS%x*v0k^5uuL-!Wg-uSb!o*o496g`{tSJ; zu4Ypi;9@yG`rCEY@x1e^`5NwObf+Mvj^b+06(U+L|YnbM}fGn%CwfCNd z->0Hl7NPG16px&XkI&Uvn<5NS^d!J5uy=lmONMTm7jc0T-!<=~4!~wkQKp)T_i07U z^X25HYBdM-gqC5pn3+MGc>kkvsUP4BjKM#C{$E8aEB#AL{H6Ny zao-!+SNk;!q4~&Odkt|EV2T_z%#{-ccZ_m3TLed{7y}yF*HU`WHha>a(|gt9$YI~E zg>ST7-{&Y;_{EwIQb^Sc=Ia(>?o)rfrehKqFm)FtBX8-hv}8exT=w{fCJEL=`?hG? zA?(vA=cU1b1c|Tm{fF6==`xQsXFrsm#;oXFN#n$k3S>80)YQMh11MV^&(mES+-kh` zJSdJ)B=>1JA--{~XwfKo_&XO>xf-Z^grV|zOt8swraW|oqRlf!A=-xYwuOIDVSX|G zNPJ6~HZB(D0p*iUn3&9eDu3`z9xG`9vgFvWqahX5PR(@orSmnKpssl~~) z&DV5o%u4>7e7+*LKe0zD2v)q0TDtwQdr|vKzt7WspZSvh->ueoBcgqi&)LJ4PYKhx zx|?a+)I4}^b^Gps5iUM+VC!O?sF=C%dVp>kSrvX?u+LKP@mDK1l-9jh@f2^K# zhoDA^Mc-llBa(6Xz5L7za{d= zOc_Mwx!s62)QF#&ybGvsQm%-Dk;W2-9ALT7*}Eju>itIjp0=N+L9pFu zyPY`^`(mzfp87-_d;#tA6fFLt|3MD1tF7UacYe(L#|n~^(Q)hTpTNFFfD^HHb=sqM zvet@{Gdzz9hhXi*f`8rbc?!V5@{%;I$Th4qq?B9)+_1!k6VZ-K_r)@Y3YG3yvnbph z0r1_gpx{vu>p5OR!diH(EK3@?g2i(33-bvi49e?%c#T(N3Znq(^WH+GvWJ6;1vi$V#GdcI;| z+9gpxxA9RgSnf@lJ}wpp)2RoBywtC*LBEf;LE$!!=Im`nml;_*``Y4sPlc1HYpA3w zhxWL^jQ6~9mgdU?!X5vyWhwF8J?YG$#EIEKHLsQO+ur0Prqj&@UCx~jq`H96@^W*~ z_zL}i4L2RB6E%KfH!t=Eo$u&UB16cWRORGfQvrMxK;XvQgB#*1z`S_aaGSV^Zz)>xn@oi-gx3}38 z6$t=tkz=}23tqeIjmFe^Wwxs#21+$#*A0CjxF$G=`Y$u?6k&4ex|`Cls?QLLkCZOz+^2i}tw+R$yzfW^zxJ4NE`yR$Q5JI7aDJA0m~+0fYMK z!je(-lVoA3I-SccD>fsP^ol5StHhSRagpY4iX~nF<}rLF{33VE>L2+9zLn$>3+xHu zyj6QM1SmDaI2MWk-O6vzRj^KiRaih^T9rqL`LgE7k;Zd{G-cyvJs&$mfZi2$=70l2 z?S>~oyxDfWzgJr2(ks&rr$Kew>RbH84#GXuWG4eiMF1wulkF0#!3ZCFm>Rug2NuiC zHC=M_TVCI>UwrgP8awOn4h6pjHTE~CT@D-SwFKv~3U`_Ckx!b_>}3MB4j3)fQE4`w z$MY3O^%N{E;6n{{6|BNY_IW)*-*WR~0Aso;0?Kwp1Z{|%%U4$c0OV?&!&B@aCaCCRMoEI-=Q>^HLnSZ%vl| z(DX8tXPB$tjKXyLbURTYKmId=w4Kc(TZRZXOy)5V25&Hlk<$mp9*}27^)+{1HDKRgq^-S;S9x2;Y}YcKTg(T>-M=?Dr!0t?=z__SBbO=!s_G!4YN~8dUe~RM%?0O+&t@WT4-=ys zm7)$&M(P$0*AgWW!ykA+T?M455e~ewT*=uPnE^NEv28@@O@+bE z21v=e&)X7jwYu68hahJp7@-Vbcp@~H1nke~ElPlXVN#mY% z)?4}^Me%tyz{yNtGq^~O#{(Qc)Q2=PU*?WEnYm=AobT(0ACys_;6wH z?YRm<{u6({;VQ5<78B!=GT?tM@WAfU%aK>^)5U9un$!H%=}O{0%F(UxTT`g3VC@fG z1x0hYNJ;08q}$#elawm=l|g?&(-TY&F^=h5BiNTB1_Q!s4qCBDDH|b=&J(0vl}}z@ z7jT?e*JpcKS}0wv-&(r=Nwp z9*}hg^x8SfN6QS42$96^>N(Q$NW5rnol{)MHSslUBkbV-ZYmy&$O~U2a*jAYBdSHJ zG1?8(qf1rTO81(?O!4<4131dsZ)<{7Hm;#8yH3>aa6kKWw4oDm(D#V-nhZX=_iJcF zOZS-v5a=5i-%TK(e5r@leqQeXVVVz84=w(-bOhOA02&KM8&Z8}Z7ImYimD(#==M=dS2<8ir9~V}@C7NcxE5 zdDxv@G;@ZJIMh|J0oPSfZq=r);?mg7muAy;#_+hhKUwWP^AKg<=gKj~_i$$f02}|l zqNPhGQ{3A^nSzxr7FdvYs-DM|sSy*RAC|8lh0?s|Du`d#k`7V6<%dJ+t>0 z`tj?;Hwu=fk0tfI5+OVw&0x`-EH`VX<5q#R0ji!S#Gj;8aW-N1?KnzBXh><=$O7Zq zOg(i%w^*BMV&Fd&F589XBKx-Fe9xq0CP_}DTqlf5*Cb;VV7%=y)23+&i?4L8@l zo=Pr}WfHk_VG&^Be;pXpefzW_joX(8_)Frqjv!dncg-#|PX{af^QUn5mjJ^L`AGzX zW`e!;8tN)YJ#3io&e2GLXI*(Q94xMRs8`>Mdn%!@J~eBQUh#^)na}GEe3ASlq+sue=#3Bmm5IM2fD()*ws*r|?(akID{6*yP*4 zDSDmK5=GZvcGn9kADF8kWzu$7gkZhHi5@h>(#nDpFy|8JcaO-jFeTpCT<_l60p+9q zX2)H59!YV8MD@bNll*qW^eyQlm^%^2RfW+mi@F;DY1Cj0lI9?S*K_x**qn+OUnN~p zFDM5QnR}RPu$S;^0c=VFT28ZB$H^_vGb_$*vVN7%Pq=~< zfq!27FQp(@`M=2L%LwQn=YFTwf7E?f@Eh{^F8W`SKa7H4Q4skMev?o3Vaq4eGf;yk zSk~CQE&L*XexBWcE7{_Ni*{F1F!fxsEu%Jk`RsEQeEQYWjqfz+IPQtZfBrjbrT3`I zX2n^L^vA&ylbA6IZpe)BF3K3ZJB=GP$FSzLNP$lKY)kqsF-e7)z zMZ(FOw=83&tfS@tVwcdh=XWI@z9m!EXuG=j4BPb$5mrpIk{?-D;zAtF7F0bjSAmqs zDQ&r&NrjJsVukMG7kgiUQjoRSut@pi$o=>OO%8g`114ZJL861|?)Vjc5lgbN8jp>> zcb!aEuG3O~sDGgg_6I1o<~XN@BG3A1rb-jWG+Hc%-3B<9T$TRg{^vOrllqIvTd|)qIf=UbN#mqbo><&ADLvtAEyG8q zU$ObSu7bfuu<6uE26WrBQ`fKVirxLVL$5nDlr2L+Ty_0?M@bYw-snc(dFi+sSPBpG zUIEUXqqV}Xm_8)5k0mAdD%e*gL%*`XTm|I#L{_Mlu|+ks{Bmyn=NU`UmAS5KlIE4X zB)6izQF>5U0iQOJtl`Tix0Ep6A0bcB8-1pF{EA21Kxd%5Y0gkZ?^~c;{J~Dh0v*0C z`ElM`$ZI`)E%6=!qQRGJ>5HdF)Q)5V@2OiPU&Nl^%RqgM?k6-hmK^<31suig*KeCr zLpEha_oq7h0b39(_npq5cl)FJE(H8t`oAXsM>+#T-8)2kI)l0jY7bRskoix}x+QtB z&X@HiZmee6aYwzLNs(LN&N`@|ZOkY4@fm!CFEXgBVEb2H(^!kQDQ?(S@^`KR05Cw1 zu8Q3X<+QodWLJSK+=R(pqpBAmWq-uq#~LM${u01Iuzepb?;_4>bzC~fJl9RVx3kAd z$)4%)C$@@Sz56f;3OCGEpl`%lP4-y#o?%9wFAHO}h(a50S^aBAvqvLI?^3eHP!2c* zo98piN|-~Tg4f#yY$>-C5|ddjV~|I(1yDOh-|jyk+-^_#q177^L^$7o ztwvv>b*?XYLs8t=4)oVm0AE88xQX}RhPnz`4;${~=fYQLAESrTZwTjh`1@R$rRvSP zcdD=D;_Ml5S~1;w@ZsL)D%ja`6^yr>xgM7+(5j%5jpz@gKN+UlZGw9fXlXmws$ zb{P?++#-}}n5*DMpFRfg+<>X@ow)h7wIlW2YCWs3zwjmF6XQcc>9Bfm^& z?(y_)MW$cCiX_<9cNM=pC*CL}v($~Tc?@7YS1X@a*Vm?#9=?FA===a7m_Axgi)S;! z*wk$FMvC8GSHW*lP4bQE-ovKa+x`OvpEXi&=%vEp1cGS+vYb-)bSr#0C3|iUjg+z9 zFoNxK73{*+Vt@9#SP0Z$AXsD??6NKWf=idGl%2rJDGNM9$K*+|A=ykUL%@DL6+nOV zIG{+;HF*=a0$ha0E2*tEW65o?)Yxab^w9kY9c%-RW~f!B=ZLGjQp#|vMAFB1S4~Ho zaL7_UA_wK@wO+^TR3_bYJ0R4CNEsH#!|00o32S{+F{Y&*?IXiX8E$f$)ZfM_W-v=l7azpjGcf|~Rj)MJMY^^=HqcH-{O zhux8$qR8DA!ID*;xa*6h=WyOX!^kB;={N4aWW8<)*&_so^3N7yzq1v{jXqsixIp$; z0aGIM(%4bQTSC-k_@zai)2E;DKQy6C1c!z23v$EpY7wM7_Op;zUVNuFDBzo8p+}6CC_Y9ns1<>L`$5b)3eBm`LXGi(JPZlRcD< z*rMrK4;yuFXTFV3_o=L#5e@Fh18OTELyh>Ee-UCUn1cSim2%8wN^H{teu6MHfbr9LXiGZ?9XhNzCA4%@su7df=lV7<;JwUiG0oN3))72?9S!rw&C3V9 z`2ksz-buWO=`i=GLkezD|W-Ktfx0ZBWDa4#o zOkCcf9jz|rIiYIoEk-aRY0W9wT#{D6&fLTMQJNKAOmMyCNeu4nsQNU7qNQ{tvUq+R zOnaRLDB_U2f*AnORp2mVYRVI}u^+DZ)hNC8(sW#N?L@k!DD4}Z)|1$L!1nz3aCts> z^CTmlJE~lR$7((Si3aE%xnpcCwE$T-i?$=c-1lA}Sf@wz=<(xeDE6 zNpeYJB8T0Pzs7=}asFc&1aZpG?K#9)fQ5V5jN36#7@WtUVWL@B0poZss#S^%$@uFq zjnPw|BU>n#orTZ1#kv_}gB-Zdg27V39Eu~@59*krdDt*N6<=cxKzJ>ch__Uv zi3^CE71C6=`jW3uja$Ww!lVAl1&d8$B7JHC*K>o#q^v(EJ z7dLO`aoyG?$Is@#ous9cH}Jzt9@IPJ(n-O!4qx7T9R{rCZY%YnZof0g>&{DELG-`YZ0mJas(=T8Tn zbpB_TvG2V=aHiU8u_4X^EV9FFYwTJY{wR0) z0?ko_@k|79c9c5$9Dr3914b}VHjf?Xrc3wEty!~?!UtOnRH`XF(BdCmo0l)OzV7M( z=)CM9MKAHpOmSqho8GhbL9P(O^%P`d@y6hTdWEFu){77R-IX_=LiL=szir*zi2 zN_K!rm1@y)UG=(P)*L`Ie|y0XIr)5TOl%y=!`RzW#Tg9%$HV??XVQM@o2u8G^7Y>Y z5UHC*IOkMX-YnH>=H67S{n>~I^&4Q|Z^_?CfnYKJOa1J-_HVo=_#qJeP=C|U@x#{7 zKp+yc12cZQGI!Zo>&r>iw$%#hZ{CM#e`s6d08Jn)!PgJOnS;3A_Dw&izgoa~R^L`g z5*eiW$MQMivU_Y@N$qHAC^e}tFX{SNWs91iPC4RhUGAPF%mN8SfWFQ`yUB14&6Vz{ z2j>Fm=5Y_xH5uES)%z;fFLvNP^9PPN>e*f)sUB`Q&$0eWz|vR6Jmda@J2-PG$GLoO z2G%q~WcL~ne$mrbfN z*f{gnE`7Z$;5(=PutvxfE7w91L7G1-wEsQpYOv=`vY701uFQy*mOqmZjJUlaH~2Gs zd!uo9D1u-C`w*vtcNYA5Ff`<#-4EQ$L*zsAO+IXgDxW=X!LuC}VEuV*6hHOrJcidH z9DL5ee;`wj{KqrLJskjbc)rvr5W(Y67pigav7&1 z>u?sn^ke|x5=MN$_BAn`XT)p-(GO*2K5qQ3xd2lUXH#ia%D}V*Jz@)OHKeN6H=15N zN)%Nyy#B3}bOUf!>_y5eC(0eiXtU;H=~wus_!w&M9=l7O4npUotdI?aer}<+Nq9&g)MoPr|PUF-8g?8EhtR(P`{`_MHTH7k)Jo4_D zVB&j9QWoANt)Hl$--kHd(74$js9I+(@ePT|OleGtTsNlX;UNxtv+E{x?Fya}fEpL@ z8o&K+cnU~7I466O*t$0hV`cl^i0}q7)7iCGeD+}-d^=y9THAoY!XebMVUV|(%sybQ;++l9E{ ztIT=b5wiEsuR5nQbg8yWajh}mxeHW-FnGh2?h+mmoBiE1!zV$iT#1WgDRt2TI9ZGlAt#UER0l!w= z$q?`Ec?EWA407EW(Fyl=FYTgoxQ2NSNVk6Ok#|&6R5Pcfyy?!W&$}dB)J)@{Ez4d+ zpz{${xo-?$v+)uTb7GQGu^|Y}x^y-mrgCx2X@(Xq0fQx51NdWq-333_;rFGGZiH@+ zZiu@83v}3YFVZ>CT?}okS|GpULvt+Z>J_J55r@9`lWAu z2>PDA;9pi^5dk(eWe43Jk+C|a0KK%GH?aYlr=wbFb8V`g#W{2-e`*BWL>?fBln5ao zTWZ~49;orZ-{ORjU%kmu{kcx9e|Us zZaZa5kauUWu4he=v*_a0<<*+hsWub%SVK%Ri0jthu*QHb%F^of^JG7-@&Cy8I=;cS z(rJy?SqJ7XaBVW?+nmKr+Q1n-8!x(yu*Id}cXR6W>zGmM`dj7Ua)42#p4y6B{{x<* zn&4H&KCST0u81p2=qY2|iK5o^)FV);Vg3S|n00-NWXe{ThGS2Ij)0AL14viMQX{!h zALAx+L|j=pAk_?n55&(+eQNEjCLH%+=ij_ctb@zjcGrwjLuL+7WfBB1%#bd}xy=;P zVTQ`Q)X;WcI9YhEM>NDMHln8SAyrJ#Uw^^RN&dbRLiO=)RI490)k@d3oyzAp$xbu5 z>2>K=YDvXuH;P`pnw;EpA;1gYlbi6Qg!2--#Mdmg{Nc3^X;0w&V}pj&L@SRzqf*GAAlPGXKelb2mk+S z8Ui#u3}~=7mcf6*Ie+eWE-O|aA%#m(D|)p*T8vR#8py~xR8B&X z0(TO>aiB89!&8D3T**(*euRdXWsvcmJRdzV+(sJeFCa&a_}OhezW57trcs|s>vy^6 zUH49&!6FI|mMtiFd{gq4%u{7H-uE+x2ZYavH3;#3f&rD&)+f<81Kk`gw-4-mh9#Cs zTFOYEKM<8&q)c(@ZT^<9)6%k8&)>-2xL0&W{oK(4>c9R1_>zIZ#{h#5 z<}bjq{7?M-hQDCb%sW~=Em`J1KkB0eadAin%PtW zS4zy&2ZVih8@wRNjwQZQL90{2jVJ!}?Ft#dIX|;~CC;QZxFrujjgUzv+jv`tPIr~0 zT9cK@HZJi>`Fmn+>iQ?omdqryrN0m-!m;LjvP# zJqT95cOC;Q{k7_cGxi(|@Mc%&zMKP{UID=}=wII_5*^fEz_>SbfcgvU51aKK{Vl>C zqN&Z+^pixI72;KvBDNl&q)N@lHDqtNzK(B(eDrh8U9pFQHqg&G_C3}b94 z;xK=~q#66TRhMyI2JITPk^fti*aeyoSDD6NdImM%W6byF9T4&3!p6(S@8|@Nh%KUz zw#oI&<&vGOW3fb<+Zt-kM;Wh^Y!j_* zg#YyyeBt}wj}XL}KDXykf5DBzW}JjU%=I2O=cb@fwO@#t(TlVi{wgmABjntQoDh@A z#B=Z&hxrR|;Q9+LxUTZG?=JGbVG29T0`@3eU3F0h9Sipn&qv|dl2_aWOd6|CSzkD9 zYg<*ts-Q#klB6*QAQVqFZ>5!TldoE5f#AI7FEHufBFf6ndVHVPVWF=SEATp)wAJ4@ zrR!$3)`|GQ1grz%%uo@C$t0M- z!zc+prB2a-yIb`bV6+W^LGC%HoPZY{yedE}#f&O9)uUZT&G8IX>@D1*nFX3YCb`CM zhUydAPQUbrGd2eQI|ch|AfX^w8MI;i?m_om`n%-6wZY$o{FsD4?+p6!gx}3UaAw|X zvFpRVvHTC4<~&2hn=dJfN&=fsRe7`#)QMG{_{b)=G$3|x$J>rH7s ziJXQ@sO|}vJy89?`~?rsN0v3mM}<^7E9CiX>f9|8Dj4pq8y=P`4~<-PPtQG|eqJnP z=eew7TtHMgZ|%P{S!8TSn5%(SOKA%sn1rtxqCd(lC$pC2iPZj=2mjN!hKhM@qqgIZmjbi zrh5SYo_@YN?e~)K_4C7Ze)10KehB!Qf#`?jvwk4{obbce&)b0Mh)y2&lv5mnD9_&Q zy1(7FFJLGjO&-F@pz~JEj)t$F{r&>HUoGLNSr5`@y0wV@v3y=S;x+{44$&A|+7vd3 zGK?L)-@N=pO^>T19=BKiOn(^|Xr(+0POczLZ#{`>1>&BsFD>Jysj53Q5bJD9!T+dG z2awm$AuOMjHAO4H3OFhGro3SDQzJi4ca46{?Pgo`VH&7>VEzJM#31%%8Tu>8C%B^{ zcbW6J?k=;)NrO1#K=(rX#TE~G_xEv?t?o&B&RCa`t4GYpV(+^n=UomBecx#@Y5el_ z2cctt!CO1@M*A5n#HHg-f{hwQ!sO8$WK3e9q548qG<}3500-krJC1fpBFIVyv4y~l z!MKINoa;2@5_Z0pzc0$*B2+%$uO$5=@r$Z|#Qv)Lq5DbnKZ?NrDEYqNKg#F(Q-7U? z$mhg2`9vP7eD?eW#iR`Ke&i?E!-}L0P0oMtXe7x23wMN>WOgeit-pBE3130`{RQ~H z=z)+!f`|f_s6hR=e=MPmTp7t2w*b4yCs{6Are4eP^>g%&ilOns>{8<)%Whe}=Py76 z3~rud`^c70>vP_JQhvuDkN)yZIc*Ax4vFox_b)D1@&P1@)z>r#8@hEfa>B$MQRGYT zt0PBG4k&XnOrKwU^d`oZNq53p_ z&`JHE1)Y?2NQhHO-~g+SeWkdZlAG z4fxbIl$v`3?-Tl<5l~=}0~a0f)URYd61XZ%GeY5$YO$b8`RB}R@T4s7XSdBrUGhUa%|m| zYV>S=&pS;SQVHEXm#Kz~JK5jJyI1o7^pXs=?B4H)vO>o7rn!kpSv@VWJ}9MViDOqK zMn727gMKN8`3o@d@8pMLiRO7<^;2qI%FRajxKjs4agSZNeiTU~1%L5?qcA-a-b@aO zahKJ$I(H@XanjfFXC2~E%{Z)Vd|e`MMwSA>^T$3?N~J_iu}k%0ayrcNVmKXfrD!F; zN$@_y`d*IUUw^?jivKMIp__G&Zm7Q?h!o?x}KS_eM-8=entKbir0)cVh&Pwu+z*iTOIX z&V$a$N_@JJUt83b3uX++!EUf>2Uuxx`x4afBr0EsQ2FpYDMMrx=g0hD#_>u7ZK){3 z9`;)nn7`o3u=UP_<^>-Acjc)W;|`ORPef4- z4#LNJzrWzFleo7BKFOxE3ltAmFj83n-Gav`7x9%?|soUoAT>*h-!&-!1 zrk(c@^Dp8^(!8?Y5|(bie#$9MEd36}#WPya#v0}?cppd6pd#5$-Zp8!*w+zlyZ?Undc00MMn_ zQcEr<*#y%C%{tl`%mfjUo?ZybWw1M8{=xbcCe>em!8dyUEd`;P{TtO4hfOv0EAl*S zq=zc*be~R#lv{LXx&@r$CG|t`ptyEPqWe(`e5zso0wNg7*leo6Bg{GKYvsp7hqNty z#5EkzzxoRRVCT5b*gJt0A;*-bo_+bjyDg1NBrkMH%rgwayU_@Puz}OB?XP)BpVQT~ zP8}j_%bN4}Fi-!CrmFI>izyq$_G~E>Xqdl1(T)9vW{*)y&J79trrAOeH@V9c#q&iTQv=^8~ljz%dQv@UN3u(K;H>hU)4ZSlx@ewWhwcz3m^%qe5djA50$%oxme)vL@ zV@nj`b2w57Jthc3ra8OHC$umV*!xEh2p=)OQ;T~pKh60Cq~|%p?73GT%)53Wy&^`K z3RxhxDW3&i}69(U& zzo7F!@%J14f-};yRSdGxIP%4K9X{@);SJ(tVMtcYX9mnWy(OX;8{zV|=Pw}sLw`X? zU(Pd2_oz>rym!@4tQOl{>%qYzGCDb%cfJF!0Gr?qAmw!&?~;>PUH^5CZs?1x8aE1Wax##!r{7hW8mgGsda*znr5$Sw|(#N%0gA?!daS^}>rrDk&f^xi0-D6WkqnfbXM`F+smpe}sm&*x0*Gq^HOx$_GRXa3~6{lZ@9)=6%=F z((Q4vx2%t{mjUk`5D>pkP2eMdK9@nK2kE}1e?8&xU7X6^DM$#Qy;u5@{dT1vQ{b;z zSUhwV{mV@H=VLgeNCxsp2l@M22Tgx{eE!!!^WUwK(ZTA`A+U4p4IQBVg2BUPy&+7$ zK*~nOAa#7U7Sq>mI`)1D~Jv7_#=Hi&8MwfpSb_5t8HFV-mLVHj;E8qUsvA);czf+)pjm?@#s?o{Gq?iGK z4klp!f~exBsli9?>N;TNYk(~jF0{W$2EBEu<3Lc0V4(0w_c|crTlD7Kd9hj{AJ@3j zm#3oI(9b9kzT-^j#<(98_$uz>98gO|*)s4}&VQqO+((~-{hZ4^d|G=&jajLq(&MFM zSV@2V1u%?%`2<0n`*V8^^%qPXHshki#LPK`B z$h`rdahSh=6pp_j78sk6<9%MP;a}C~h}w7hRn22PT>#$@k$ zYqz^yx+qk@PV`-i{q-{keNFwk8U7abIkk7<*T#LXlDpo=ArHKp+3HJ z7!aIK?X}p@{R@^4o94dzFZGl+izjgS9q52eEYDZCyVkdn@QL-0cb7b*{LluU=KcNx zvR|z3Aca&JR8`jz3!?q;>W-p}Mm1eoED!H+!`bHS)+g*MQH0lNIRa=rV!V;JZHB;r zS+D8v%)$)9!!pk30Qo#{bWGxj$w=P$R*MMbSR`s&K%-!n_Ki`S ziA_8JxOqC$2nc42ccA)#`3u+(&V{@&%JUK$2%H+l@#Je@&5CvxG7t}!BItpE$c#4{Mq?b~ zHJ9^$(Y$WTb94s~*HQ**ibm*U)*bpUq_m#~0&lTjt&9bZ47(-iY6_hp_Q&x3Q9q#l zfP(iI{4x)M)&8b_etD&zRQ`^B{#}#*UITShp8lqvox|2o`d}rNy6BlZOCG6J`gQ?a zpNO|oQ`<2-5{{FleYjDu3|~L{{RQN|TEZF8s2YsyxLW(i@)6L&dP24JG)`~I_<~1S zJwn#*cM2Z#ehF?x9>Hjrmh38yvxfBdZ*Anm*w>bR*lK6 zfQp?=V3T!Ko|5X&Hm7)GxT=PxK>XME?(ZBK&X@G&_z zFTCc&s2t+aW7R^~nSp@?^;J3tl#kRkr)9qPP13|l?5s_f2PWx8k1ozfqUj~nXGp6D zUR?oDPtsvT-N(L`Jjfy;M?F3%7#e?XrJNhfvQ9hCWm=~S5SobPCR#Qr7aAG_>lar? z+#%5d9=1{uh7m+cJ`wx)?nn82rS{9P8vM27yTq4v`yZ>KAXw1_J*OEw?xyr+MAlcsCO7&pHqHR%CU4yAE#|i>*25?C<#tzyMme zTcY?V&J@}mu_H_u7)4a0nPaCp#Lg$h*hkn3n?D2WtmGfow}*n*1)CL-m22xASElFW zAn+yQaHS)PZ4OE|%wIsz*MR#;GU$X^p@!F$S&;_jbjji~S6EDL^}YmIQ=g?j zAl)>q>JpB$Xka_cxLZ7GDk{%jt)7WrA4~SGysaN#W`_#!W%X;R1U8*HTX-HX81gGa zNtMz}KjOUJobFAZ<|9|z{`w1kB>87K2;F>pbVK|F*i?s2chqQbjd&xq`tEyBWSaXE zE=7Yuu!PPfUWxh#{6(y;$?*G-{r&=~J%0haK)7S6-sB>Kb*FkWG24Pp_zMh}zW|ez zm_{Iag#uyx)tRaOC2|_znZOL?$hL!1Vfw&$*EFE;epq#Y%B0~8wT?Gs4X+@El)Jsf z6MIHwGFb~1TvbCT)-ZoT(=4{=H0^IEU1AE& zvrj%Ka_{hOZc?+18T#ul_!-DQmO-%Q|AsZwVPl;jKY>{C!2g8v1%!7vtM9O!-WJ~W zyEj2Z9Oq6WYS%vrAM5@80_wf}3uGCxlzE25bp&o;D!flUwbYmS20}F=Ad)G2I;Q9G z%W|8f_@+Df%q;w2qU`jXMs3;W>AQ|ihXE)IY*RNErTfgf0Wxe!H~L|-I1?i`XTPZz zmSUu?JG4-$Vg7>DYOgXRWIY9NKy1Jh@gVLKcW7mK>}5`>Wk%!W(w{l#t!!M))-WY3 zdz$KT+p3OQD~4$Hro53`;Gg9nR118gn(MHsZYKRW{yf$rIpr}kxXX>WD<9MH(}^jy4U<1g?is8`7O(0e&ZjxgDs+iB8$QJ0{3!~A@Xverdk9PVy_RHWoZ zViDQhlUy&oZSaPjP*;!7B)zY+k=RP%IJ2Py`^h29U*I8#QL#Qdw!u>7jcoe9J1viJ zu5PW=SAJ`~?XBGT;GF}GJ_vUVV_Sl$rDEGE^t~<|F?irTToKZ9Owl=8!)WU+%YC@^ z0k{GG7+e4BBmaN93<9(u3}~=7w$OjV`CQ;dpSR}-L=`%!vlp z+UJblfnUR-#Wzpap1FGd4L}O-Bg8|AiLRUqFc(VJ7!f z*|7AJZ^e}vhhvmp^&wJ89AEY?D4`WoeSQ6oXC=n9b?K8m9>`d}YRp30jsqV9eV+%> zDI5?!;aAU z1~)|OY8Od0Ni?BG)yqmxuhsT};SUmE_eY@pBY#0Gz=5JWN(pFgx19=pn;za}XD8Gb zal@$BH`H~zLX6?0FMzeAZ+RxHGhsNO_Qs9i#+TIBYzWPfs&z<5FVch`V?l68hWcMz$E?GzhA5MD-C8f{!y4#-h|f(7_79oQKl-qFaU-wN*d~9gK98_y=waQ^jk$zN`u+gH zqmOx2^-8|SO}L%n-`{NxWRB7x({hkK?&?7k>e__=*I%#?{-0zah>Lt~&msN-Z0*Bl zeAAM48w;m%)!E?DQ!fTe|M)nmv&BJHO&Pbf5E;Y-G~hE1^A{Y0<1YXJ@#Nli0ks%n zpBLZ`LHknv#v6caGoNIaBPaQ|f{N%QaC&DWVfIz!qXaXK66!&zmZ*NJ6`JuXfezjp z<}NS8Y7{satZje-1q|alj?U)S2 z3rD_uk1Fd3$l^~@8JrX>d!5!6EvauX>=G#3x9xTb+weo%``L+wCve8b;Li;nl!71H zxqo6o2X%-+AF$>UmV!OeeM|mP>VG-!EV|cXL;MBUCWlQkzRlA+&Td={C*wpCV>NE8 z%CnH$PE2N6$7T0t#N))Q!l!w^zu@>UR(Fs>5(0F*W*sxt|9EvL$f;#`Te}f~ES1aF z=Mj!W%cYczv~v+UhOZe_7tX3XAp*=h)d|?-GH>U@Uesl}6yC+D?b6}2qNV$YB0b4A+eUL~q@4B=~Q$cCFi6Y4I?Ecdcg?@G_sgKAXO>Qy|LG=Uk7sOdUo*`hbvG5uw z+Cg_-%CSM!wjbfnZP#tG4Sg_pcI<%qVd71E#JrZN(0<+;^-c}d#S$Bb%OAyqa@=ol z-+Jth>IrE3v5xV zbL0d|7>*9kD+71qREdV=HMyQ&ho_(agggiqefau;T^FjKUk-EjuMolB%x^qFx&yIq z`ms4|{m9DIyuIr@ZH>0;7`;lxh(2bZd6i$((Z+jBrX<|In*zRm_WKLyf3<|8*V542 zkQWcT!S%Y{x=w_96R9M>#By2vdBT%9;%^HrwwMRjB>;H;F+j)ktw@);pZ0DnblP+Aut`pxf3Zo%9o&mT2b2#13Tt?Errs)r9=Fm7nub$VJNfsOGm)M~UE>*i zCrjK9K&zW!XaXM7_ z?D-2YM;lvTZ5r_tHA)L_-&wQj9jm)Wg`6%%=d*M^aP0Lx_zK$ZFJSmZ4}=^-eA|_- z2`8@fk9(kdj*skwIw!A0Up;w|4q3k0Rma>1jJ@f`xf5P+zwkS0qY zVg6{AM@Dq)BKC@|X3*9JgMsaauHtrSPRMqNx_-7}1DiRYwr^1>uaom~OJ@ZeIXx}{7R25p)G)C*AxP*lP7VEL4)YfPj(QmA zr5xLSJXxr5u4O1H0e-9^;+*V4fj0w3H_u%-pwMi#>Yjm>#?)n4)SvtG_Imo%+}$cg zdrcPHP}0}8bO$mlU2zt^3C!MBVrjO{8a!rdvra3_AbM-uZRVVbE+29~Fn_I)2&bLC z%wa(LhA4I2!}598FsAgaciH?Z_|IkwRv<$AxA7YY0w@>`q(4@JVA;d(r(l=;ccK0G zCVtHQWOCuh0LZ`58xgc8G^oD-GHKcZYeYC@f+i^9 zBBke9fJK-}&4)H(&MJDU3BJl;{sP8dbxy!EZ+JVg3FUYG0szot64fA{(&?OXIc6`< z&WF&XRL!<6@PSC@QF*p?(W)zeTw=)-;n{_BaWe8_8fve%t)Yf=nmefUf^(I%oPnPR zlx~>6pf`|N$VZUZK!H9->QpT8#a%qoCg5Jy4EwHRTOY3A>;dWKsbU+>FfG6Ikbqgh zHzMb$gE}*p1IO9RZ`utmN>%ff0`(1Blka4?9?R*+>6Y8+C;Fx1Bq606ddOxTv%~1e z%>U~z_(I~pA0c#$@6iqQ7eH=y`tPB8HUEw7djSH<0hf0w6qB{eir z7?vw%ZvrO^G5FV!X4AD}YD!<3?0QPOR7fmq_*%_zuZL>U-I#4E2gEvxN9wKjOYvAXinL-;X3T|DYRkH{mexha zd)jE(#wQv8!Q?fzb0@?z&t%zzQ1V|q8)<@1{m_8emP(uEzM#=G;$MFOJVs%Sw!}BA zA-`aPcglamT5%mi^h7ak)o?QFD6=z-bKDV4otDEv|ajkwYQ#)m1!48lNk-AFN3(fTX_H-0=!bfYqSzpnJ22VZcuYA z@yEGJle`V%n#rC#eo8)O__Kfg1@I7sQ7!q6>X^f(y4oDH7SzzvgLZ|A@4S_h9mbP~ z2!&Ue?P#e*THe?m@rO?}%wNC)L;08YM*slM-7vd*7mS@NS3}Af;@lq#n2lOAwZ56q z(%+s%W#z>J1e!hHiuPH8YXr}(QD|fG_)rNX6c@$!o100673V!8fC3Hk7Z_l)2HC|( zmdY81hdIedF}|m5I3*gn+#we!V{t4u&-{Qu2Od+N&lQlg6XUCySW$bn`+wLw>#!)g zfNvAhDUF~YC5=c5h)W|W-64(AqA2Bp(jhIOFi0vPNGl@JNJt|{gGe_>>bJX?JUp%s z@_yg-ewX{l?97=nvwCs=&YV5x-Wg}I4|)^xl*!nk#2aQd_#<%pc5o}e`4S5shyVRG zAiWQ97|;;EApJk#{FZ6Dbbt-9Io@*3s&D`m@<0#Ek%5g4bZ;L*KLV@>14z%>{%Xfb z%oO^$<}0$~xmPl1Eo?V3?Vmp&K?ukt;?=*o1Q%zhUqFcxvEQSZzWN2}<3z)1!s#Kz ztrcxOxXYvw&zcm?j>QE66RWPpJpM5Ugm3xH{Og5Q&-;WGEUe6@I6TZ0HFFtc+q;@Q zG#$B~wQvCnZBLOTM{|+;Ww1{)>YdcC;0?rBZ?t_J^jggziAlocuU`N^91!>!6Eyyq#coZsrK7e$qH%JI~er{@So4Id;bEaKk^Fz zAV5DQu`5OS)F?&Z=_t&o!TvJVMM`Bxj-uM-FJDKsoIFNG}=fUjaRx4#jNCstoqL99p?*+H?*~`z%ijR0Ah0^bB zY4;34?4p9W&lue97#+0w4@2Ovqrv;~^{o{Ab;=)74fLhz*Y_iklTm*ugS;JqRschX zS)hXvpdUU6R)_lAAj|Gp1sGQP(*pd{+!+!Gx)WBJ8oIXog1|1h+jW5Y1*M10x{jfC z!I7YN8Ku``$0|4N5a;zobe|&gfND;@`wX|OU5EJv%m?!egpc8OSv{V&VL_Ra zN+oN`>j)nXn#RE=s%T2(;4sdo0<@++P)Cd4Miyt9#OvlyB>1jOc*pJ-Un4lR>USC=F6#}2N7#`OQOg@z zySAn5=uTyejG4rR%Oyv~P~1>1R$GVp1uSsKYp9gtgF_FbTJmWn)werO*?%w;Q%BPUw5nLNt^2W9bHu!l08SW2gK`R3N|@4l zy)&nBF*79PAvo{)1+Q>KvrpEctXdmL8~S80jaM&PpAzevOA)rnubK%I88~2zec#3U zRJ8shkDbiLwD4eM($@QX1v$$GPB^ z7Zo}xe)$xOAVMQB!;Ta$!k2%kTrn^`*xY&0Hk({nalVTQ*?rpL6{MErf zZT~!z^5d@E#uNlAf;HRUcT#^}{ZDJ)J+%)u?!4zD`)xvSmfmf#ANFzw<{_*UN zvoQwZ$uY|7b3@lmdr0gttw`1(Lzp9dthTRR{7(f&h}ijlOfMF* z_c;@=gK(3&lh9L^Vx9*;V%eDzyhduE)gtaT`yO!7ghVIV+Lphr8xbzIJwk-SK3yh0Hw7k$bm* zi><_)1{NBNAy1cv@Tl&U<()c8CaPJk&1TF^grQk6{%zi7|7`4qct8HWmw{lb|0O@W zU6t==_{G})WPUDu$q&TO={@ZHczTPJPEpj%5i*sY?E{|lzdB)IzT8XL@m$HSS_I;U zz|YTKzkuymQ#j(;!kJg%_4xicJ_VC{dPyJ7hr4Xm=f_~}yzthw77)BB<*hM-b@5i_ zH4!jip1i1gg16F!WCIZ+rK4FIhl5;X4!6$hli#4S8ass&;Eo*{5|~fw;!)@R#@5E# z<%9_v)0oIwbb1Hp?1`kNTb3AEXPvD9=ib2gWo+GF0`m_N=l^qw{SOk%L++~99 zL|*<<1Z6FWG)r_wvVr(B{l|9z)sGmW;vQAlUT^7QgW6gt(fy02Ch3!jkBHFdL}At* z;^TQB3o zo8U@G0k*^_Mn=3{=24j8mGS(mkbKF`9?R6R5~CB1sJ*{&pGr*ugudItjalY>v-D4$F58;7D6eoY&olWuTuS%HnoRGR01|GWEcYu( zLM)&`d6AKz>#BvCcKU|aw7DiKZT58;(Aa@3O@zy0AkJFPx-RF zPtg9Q=6@`P1Wk4~Xi&dk?ofjU83pC+xr%bsPr_O&dPwIYPO92TeiOyOcVjcg8@4A& zg$fhlrwry7aQy1xe|C%oLN_SOKymYDl-z;a@CAE<)uunPahp0kA>0=^9$s!1WC4>L66pt znR9*A9(;aP+TNVOxF)ed-e2wRM8Wm}>0WVYGVC60Y2S({esiGe!*L0ixblB zmtTQZo>Zqcc?fyW2+PCnU$81&$FMR|TutbgaPre22029zyQv$pEej{RiXykoaX&zY z_*5^eawVGMd27ykkr_6gYXzW*ID;arHcGeC=G|RTtYLlur<>K=^c$yR%GJC+i=3(F zNHLepJksaaLKTvV?iM+^c|ffBThF)iw13td6l`wpZ^4Y-NWFNyA~N5xA(UHe&_XQ* zSi5c3QYv@k29y0_GYP@5>k=GEq=h&ytj0x4)!)2AF8=Em{1(>o->}9(I&7?ObgB`R z&eP>%yXS1ZHwa@Bw|Et?Oz3x)@tN0K75Q~m_*n1t3pinuvNCu?%8V4v)h|LS`^HI_ zwIW|m4#KDo5JwvJC$-Gr$)ZHs6g+!#yg{EX22mTmfVRird7qjUK&>{HhsSZc?0hsb zgK?J;zLhvtM*HS!W#f_=VoLmoRwaXZ}gFFEf*0$1j4;kn4Vy>*7^`OV$MQ4npDMnMR8ZGy# z7XsVD>59N-;(vb)NMo(=ooejErkWj3cI<7|x+Pwh{Or4Gv1MHyMIiymh^15RPH|@l z{m)4GB%o$Vga>bvpYLG_J#i-4B_>2pg^fpr8$`jNa zz>zwmhx=k!x4<`1`1oXus*kf#9e!&8;IEfY|+EeD4`>@vg zC8giWP+7Uq3oDArD`8cxWzz?2rX5pNr0DNTSQT@V+dLGzNA}h<{8?)uolT^B<_;FQ zzdBrf0B(i9KttVKMHtXvPaKl}gmdGRqy0FR-4@-o6cyhOs$%SI`uIpj z+?J4@^>DFpdexZ(QGzJ`8w_=!2ya>XXpBFN<3Ez6vZ{4mr#--%nv_< zfKWFcx27WZ4(-GpzQ29}{MH@<-$fXFFuwqY`akja8-9U&Z@aaFQGXa>^@~m~+7$-{ zK}Wp)vNkgPGZwdIG6%1~jU`9c-snVz>Y?OOPm`HA z-6lD*i~}I@8`1a-H#ts}DBgZ|jIRXs13$U)0MbMEcGbE!Px<epebV{zw@z)5J={Dck&JD1^TgdJg;EKApUPJ#^!CE82RjdRtE zo|qyr>ePHdPes^B3p_*MJ9#bEf_M>1Ooq{2|(5%FA!{rKn+GwV_xNKI<^QfcqeR0RVgmV7$t*0C|8|Y@PKGAEJ6U0 zfnrxc{}9cP$3K^F=AMPM&7Jt6EObZy2wv;^fkqL)lrlttZci%XAQtJa{wXB_4i@WAy8bn&GESTW8k zk84Y24A00%2Jh+>mgzYt;%=S6+hM?lrZr$JtCWVF&Wvz|Ai!RTQ z#@;ZPXZXC)0k}f#R8uCn*?VikK9W8OBZv%&XJE}XQr|X5k4nw7*V@xJ2JiW+@7>@cv?U2PyFS>}kK??nfETo4e+E#Iu`=!UtZ?JfbHgq7uoKywjW?e1mVZbRcRcfcAv+;7KQOZ7p!;l zKbarZ@A;8F?EKs*M@8`IWsQ8-N!qg1FTa(B8k!QxF?J-76W4dL+(r?8e)jqWe7~B) z8S_{S7+g;E`s4T{jq5T`Eh~Qry;?^2-eE2DZMS|Rsw94~h^J(aPNOI#B5?EU*aKW0 z@(+}qG}_ohc+Fus$~;cz;_i{lt8i_Eh5Gz_$rep7B zOzTHpz}vdMBp3TpC(TrJ@+FGvyc7~6xE?Ka0vMDf-ts!bqwzk^{BekjM?-nO;}+&E zc3PSA$!l}brO3d`&d_ShI3F9l&H6gy4{vWy$KTmeS%@#94n5tq(=Vg{-{S-A&;R4K zTLyxa@7YiNkIDxtIBldsHMhGeAc5dCLy2>P%CZ^$b zJ5-^<+J-Cc6zf1C+xy@6In!Rhfd3a)Aml(ARZbH7h~?Hlj*#_g^tnFJJnOAZ)|)RQ zkks$856!rk>n3VHrY_)ZOIi9ozW@vz(Y86}GE7f2OQEnOa6#cVCPf}OIR$Y%O^#3Q zv}#EKAaEbeSuroK#;FG}Pm^@b|5m$Q8`>K}VxH=un7rqr(EIG{`UL}Vz!<~39*>17 zn-YtzHmwj2%^G`|%hIFu z%P+-j9s_K&H~h6HxKv^}Zx+>_vrYXVE6gcGh96;JNL?;$*oOs3dx$SJw_O%EM?#-n zQBvKD&+UVdeZ5PXRC}PfmFu>{m!Rzl$2TOw-`0L-1q7?K=UmjkbL4N-`E}3^@F`#a zosgiZ?*TsnIJHsz++5!5PksRbL0&uV6~ZH^;7|vRvLdy!s6ie6L?3p%P7c(omsdIi zh?cIYn4|4(Q@@x;bsm z476nAx5)V-A*9vS-L&GK$a$85_BgrBHx)g)J=xWKSs(HuvsMt)lcmu;um$)CJ_|CXqCVaa0`UL{Je!=b9QQB6&MIZW1 zC4?h86%kZ{GB;rNFEEU`!gMUvQu^M?PGp_OM%`^vC+;+Bv#;DAq8)fJtpUt8_JRbXl?fTsl*Im5iAP zKgR|w?|Rd0>s3k17YD>z7130)E{c=}!HS^$l?<-&x9?eK&?xkRU7Oa8!8oK-wvSG{mHq1%{1(=l-?6qkY^>u5g{rYjB|CLT zLUE^`_+w;PD;OFY)yrgtSSOlXsXPxK>%D$~AZ${0zguy`sJ?Uock-O`JX6JM+IMCr z8b+=oP!aE(GOR8fvPc)Nn?MCE0VVL_kHrl)m8-rY82P^mMXQ_BHj@ruE z4u9`o@LN=CeW%*_u&I_3M7yHOmUb$7V}P^5v*CEt^@fKJnftDWn&a)1hN79lryAxL z2*FU6AGjlHUpg46#F5_@F`-!KqQw68n_rLsJUt!$0HH-MhG+KGB{rVqE~S?DH{+Y5 zXjT0Mz0YR{RRNaP6j}yyXna?8aIAC(16Gxq=CvNA4eUA2~(NZJ6dJ^;4@oG-EParoa~1Je7@h5-%n3-0_UoZr7)r`3CvWlDcJlZE(# zi=v_D*^i_rP>QQ0E*hhHbZ|m?)--w@4)a%dQ;xr{q)9rKU|)xa$8+S2uLMKS9oeVj z*Wo{57U~zAK#ADzP8?tTf|ysfNU!tx6|#Bcm9XxtB;++ZWgcUjd+bxo%^1;4f8~Ji zt#9ENU?IP27A`-1S#@gr6AH^!BZ1an#_-C=>O%KfI{`ny(+p|M;;9C0HJ4NN=X8ha ze2-3Ga9$c?)AChOKOruu~<2-{OB?K<`njPgmZu77bE~GEbkpDSDdM7bL64MgLpT_sS3R#n9c<(Dgm#(4wDT zpdUBU5x;Eh4&6~hZ3c*m0DJ8g!M4D#f}c+f8)>J8?Ers03v68f$5BAgZUtDapBB64 zg6xH^;6o1iehNt8?h69D?rzrs>K8;GHtXUfZ@rdfI&OmEN`~}(B_%G$efS`$R+8MU z?jK~230#KHI?OK+K8RnC03fnwx%eKt@Q~dlzQ7d`J^0lJ)|j*JJr`}3)%-V=h};44 zl}`K?wtymoYfD(|PDc%O1G#Q{Ee*ced2;!(pRSb$6mghez-@TD5KnyPv^#$H(3*>0 zQY$U;nXD!A3k|y976jzeSO;v+jdO>NEd@-WGqIn(i<|c#*GVY^cjE%%{m%T}@Gf!O z4WQz~`32plkzE2wg^iCN`blx!aJ78rhm4PmH(UC-)b8qZL zL38Gs9R)8Qp|FoRB2KPJg90U974Zb>OTY{_z?&NHX(HzO9MXn~PVjN*BdjW5RTh<- zw<^?pK@kv~cm0Ba1y6)GS_UR*s2&L`~enjDbYPLb^L_O6EG4qd-gvpXR-O0q7UAJ4k`O%#<>=OnqX1Ji8+s zYD4@YXEnd{b&~k?h<7TD+!#7K6-F_yWD+z4yJ|HN0UaT!XPr*|wk!sh?vhH9DTyZ< z&7MA^*>+yJt37W@77Gx|GdtTW&eSZR>t=&j+Zz;9tfJk0`Z16DDc55%(po;y{J{JI zJBo`>MZ3mPsj0iHM7Y;(R3+54_8GRkM#3AA z1)o7!<@d}dJeS1rQ26K~fUfc&l~PVqxRvRo5^>55N?tF8;v?ha4=nvIA@05cMZiT> z^f0&Er(!=}qh;J^DSh3eAFnWPL}8uPFB_N)LOu2WDnDQ0{r&fE`T5#af*$%i`S?K= z-S^WV`7!vOpTfh=PX+tgg_DN5w&$Xp=lK?1f^r@>UC9)FCu-8thsGZlEdf72d;J2D zUrpgeihtHEViLLk$MI>pF^Su1r$4_CwmgWIdzqiwbF$JG8Ml%wD0@>Qjk6yN=tqSR zKQZ~lCC|QHNQ6pI3-XwvAZ(Bow*9PDT^{LE3xFo?O_8as&Q*Y#lUlpIP{&7*9t18A zKf_2rWkgqd6A2n0m|t+lbeli5+CuzNE&F}sPsjUXQiS@Sw%fOCAN5T<-EDL8fboek zr>e5pPPu?hS{9j#0A^X5%zm|PI*+tqgPt~h4NC)HwcjMaDu|2D@PZ_j>{IupTvX&@ zzGm*4CxqAqTbxBJ0NI;_;vHR-CuzenCw$W!$3qA$5UA|y6yotL`!c+T<9;_jzr`sC z2K1h))cZ2~-wVIR=gXns|9*=7kATF-@Oym94>dl!eu3+zoSj)s$San3pHpfS^9DmY z*D!00owe7_Y>zePk2J$i&|bel^cPnkqj`ZCKuoT+OyLVdf!LgUu#KY+NX|FSJ;osPx z6a*yFu?5sVnFVINHE-1kGrW2f_1NmXJn_;%Lf%zc^VV_b*B03Q3+}w6=9JW)dVGYV zdXd$2jdHjtk4BCDWfa9xozMAB&klNL!6e2V%pGa9OkY#kurnEN@@)|f!~Apfp5}Bm^&N#PZy>K^p9^KaU5oTc*gO6bKttB~f zpl3Y_4P$4_UF>FTa*e>Psc~k}FuNQ#Iq@1Ert1XeB7gmY{doSl3_`cjF5OVSpyROV zHq!#z^%mV3^CuSEsHuGK7Wjx|)--IPWHMtEJ<)mg0QWBt-}MXZZ}~qg@hlq}NzgdQ zImqa3JlsqI`=$4lR?|oA@vH+8cJ2sv=hP_8-N@zqPnBY5J~E(OZ4@*EocfjhGh<`I z(S5SH4Ag*_ub|$-K0^ z0T|VwqKw{bqWWs-?rXQF){d&Yc*DG6>mXC6up@fz5}G?9z_J>vbt!S{_%VdSO1&FB zI(WDU?L{waf#~_Zy7d ztZFndn9F&2hIM0i26;S)H6C~U^$Ygn_~$YR)h6Gmo;Yl(SrEw5RfBa$QVWZ3uG*a8 zZM1Bg{|sL7?^m7ghdQLiI63KO+iY~pdX2|wvVwL2bivTI#xc+7bA})zHrtAgrnt3z2&ZSp7 zOr_TuR`p{hFkz2OhWP~#)hE&va13w?j&=>6I};I>t9E3S_|)Bv=vMUP7^L*B0|I^9 ze9~&RP*pDBB#Jj-gg%8~cTUY$^FH$-V2;W08gE{-NvJWLXZaqmM8B#(}PXP-vkCe4`#T4#Is42D4%sSolrsh>leTe3GE{ z(@H>#-Emre-lug)s5my`OdeKVm+4d4Y6YJzZHJeT!R2rF{smHhbgO^VA?47NtRQcs4KA6g_!EmOp=5_CTv}wv4p(y|M9WW! zRFBEQSAcdMhM;~y4W@{Yk@@>&g^3aO?c#Wr(tg?7_jBqmeeCB0D|r~%9uRh2&@sj{ z5f|hF^J8ie-Cv14sr1EqNy2sJg`yYztpTJw;5_*bFX|GN5eKT;Y#{h~H3i^jLSfv% zkrn7=s6C4Q2}qrP%+fhgpd0@|O_v&9-KKPCX4*`h>!sf|ikB+gnXkX*+a19MA%J0Q zfPN$d44n%4J}v)s?Vke*)X<|q(8WJr-jE`E$PWSWkNog-gxzY;^W#HSe;h>rx&;b{LlL=AevbwnhOU=$_sZ$L5qC zXb^J`F4CHr?F;}`*@i~~-r`~;+8Q~(zmk4?>uT=M_;Gn#kF!Zc`6x`KfBk|V2>+K= z2;vrB+H;6sfQx_FjN2HZ#uCKfZWxwldK$F!Op9mNPm4UjS*Xx+K2B2M`2jxTFu&je z9KQen%us~+JhYD#o9T<7PoU1KD|R}sx!z)I1Qmrj3>}@h24LvL+;+{deU&0eV98tl zw&FEcY_;~s7@Ba}qA7x4t_Dbpy*od;W^M5>r$3=oBE`gNx*=xbi3z1Y-zhonq*Su^r=EsLPx!#b<)#hE5s*Zb#g4GE&Hdi+kEX zbXKO{x=OdupnJkq%y?WZhKTyLfouGJbl=)aWQ@=Zwl4I>OPKZ>;8?t|^n|3KeY7;-!9 zU(D_x1y;8f(8_BC75(w-&dqC$_`bYEDmmnz$wx?C800>s*vet#rUTwxM%W2onh>m=M_4s?uZ|U{GxrDn}u*5nix+%{80BeFaL8S3s+W_>JAG>-%S7!1K^Bdh1_yFP};bRsqjmbR~{9e1(f>aDJ-9|K7I$D(6t~j7WccmRo!vj zefXrDy{n|iW#2!E&p(m?f|VTf>hq7&eJS`7#2@3c`z7Ja{+~Af?Igb_hg^NGe82jz zA8LGd^#avYT2w5@PFo=y^YAguFw1J&kkZ!ZXfGwg_Zwr(rEKsMv{x^X`^5zaIS|Do zX&Gaq=+PfX2su`DSn<*IggXe1Jk~SkUPx7H-u*x|DZ1r6qd(XJME$N_5We1SjE|L7 ze3^3d;Y-ro79~O)))4ECKx}d3y9x#ZIsmPe*k|4?wr)->^eB6`{A%uZ!t6|ZJD;)w z*4l>K^OvDt%wc)~Nq+OQp5<7#Jn&5-rS?bt_j%rB)hf};iEPMadxCj_4;VD`cyp?Z z&!>t~Q_8BZzKJHbutK><7!-EL#Mxl-T1YoP5U7q+#d+4AV5MbZ(#(v2qTlxHO*uI> zLR5vd2byn{Y5?a-uB!}FZwoFI2$XeDHHy8_bMcbsL(%lON~C?s^&`Rmji7<|rJQ=- z(tj`f8Z^+qS2@@oNYJcyg9gzHaQP24Xplir%1^~jdg{Y%Ncl62N=xi9U0Hrl{1p&R z9e=V19J&$dMMp6z2Ktu zmDLP9kUjG{_=cy|?N`e0XJU37v+aYW$`=c3%eM|l_jr5z;^II*%ks`U?zAjx?Iq1= zeDF=7(7C(5;!Dr52U_=eBqbN^=uN6E4Eh^og{VoN-z*iqleMVeskRd;0=~ zUA^F6CSnl*l)`xFByn3iB}&x{URu2mM>AEe7Sy{ZUFwle>htQ?1I$buf{ONGQIArj zlj%$f*l%{DA*UJU zmQlJG60X}SPaZThNoW}8UqbORdujzJ<2UO=Qfu)tr|x=7Yd4m_8~K{iMKM4cXWR05 zMW4L|oJJcy6#x5cK(My?j2#1e% z1kK@Ny;m<#gw4tBe@}UcJL=%8qfM0Z`U9bYa}xTz)6R4gYe{5JPt}Xr$39#;RxN;j zZX05ot<>U7nmpBwTcK6wL;Pm8Q1Hy3_|Pg3i5(+?QVr7!uG$|B*x)~}9}xDuurc<@ zh8(Jt17p)j#^-J6*Uz>oW)4U-M}x0{Zf8fyOVG5nE?$R($z8R$xsg8iK-7>*YtH#V z;4F%G?q{4kg>~S^vs%eK$w1GTse$#(B+E-C0-mXu+`oDO{M13Hw*5}E`e9R@e=#Oo zu#9v?#LZ1KlA=RYr`dGzon?}0s^U}^h5Bi0_*BF6f{QSe_rJf4583d+3t#U~y&@jEgsk>qt^P^*Q)T=TO2{C%t#d7lWot=yfS?>H>*pIcr2L#%| zWtdPs)QK% zK~GL7ukV2HT~1TtPP*9_jgE&*IC}$(vsw4S#*V^xc8&q>^M`&!PhgXOwV6e1t8}OH z6z+s@o#{}X?|p%-YX<*8Z=&h%RA zbqAV&)^BoSYH%k8pmL*JPinp_-2=eoZ&xo+`a`{-Xi>&(^dxi5h(RN}(Lx{O0fP>P2 z)ay_L?tHmdc``>a0*e|%lIe*<8@k1j@lxL#+U?)c{xH=~{pIe!d!Rx5Vj}D*hHO%Q zDcUn?_>YR<-4e(fXm{<~5(yu=N)21W3V#?7{9zTk3j{SEAPd+F{IjC)ZM^^MUJzve z5lDT=3lU-$?EWXwQ6Md?!*16BsuwsOHtSufl-IM7;tERBtt~`Z)h6hUC-fAOs9oXH z4c&5}D~gBDI!rH6K8RkB03__Z)*E;1Ox@BtBjFjY4ZWOjmIWqawW!(+!eVSf}8&0{ZVnE0_vD6$>A zfS18qQsi#bMklirZmZyN5{a2m(amJr0RereC4dV%|4 zGai+B5mg=k_EjzNY@xcA>&*9`u?+0UTrD0?5Wi)S@a#8!2n5p$RN&|Z2>_*>e3oui zP^F^VXhHPn9OgHvFUX!(CakXbJDy(b9_a$ifWpM~Zmj5(Kq{@v1YWP7kLtfvF@0*| zr!Z?BWW}-p!Fl)g1uBa|%)N}~vdHsmhF4q2#AgPZ3Y)TdBeGGleChw?(E^JA;3^_9Dez6dt54$FCUM*VpEx7g6XJ_x!1 zLl?jO0wF=G!WQ3;q5i)5M{8flo%cD(p0;_P8ow%n;Ow~DVng);-@~R^ZkB9S{dx&q zdu&>1Hz`)8?aI~y-eoWjkzbe`GX@a>{1$uf?F&?YF}s5lsNcCXoho*(qK(_x`tuSc3mCv3y*hlxhU9b-UGVdF4CRT+?NPeh)0T=ie`~JBL1S>k^{D8lF&i?uN zp>Ou3-hY0B;T-njnaF6gY)AIGPGXRBrUW0cysFD=8XXtmEFW!r7<{Y1RXX4nNz?3lg< z0|iBzVRU-gIkXy;O=(OL1r0SCM2god@6mQosi>q>Zt>r-D1G{~J18e@h5DUVg-WnNwHUH_Fdu!SlOfRbHi;OS2@U=P~ z^yxf#gyBr52nDRvxt{WbG7Y>K^y2iLq`YNy>_npW4Nh!XfUoIML~n$qORFhe(Bch` zZUR1=ld_n|NUyEgw%mN{5&`Th3@iB8+e`-ykwe%fZQZ@gul4E28n3VT6PUMXIWqkF zwS@mDiqn;zb%ZPe-^oJ*FSja|vWKx?G(Dd#W4E zFYumQYTCXfzD~Fis~(f}6d_YsIyoe9h(vuUhKfXO+2?@$JfY_^$+6@T1Wp#^&pvA+ zJxeSeXB?9beoN4)Zjtaoa2{}bU3nMdUCLFoROij(XEHUHHgBph-HEc_;!Btsb~3R6 z29+boj=t;W&N4QNlfckXaqpN?8+SFowJurx;H=gK_5Tz!cmRW7751ct8s1UAx&PmS zw&&x1JR9i8jc@DJ-_~|lAVG854I0!hNIle`K}JEzI>CzUvF+=jGYk4y+b2aU9(~Lo zK+b+axd=8-oG}T3pE8(Vpz*7VQ$Zeb%!6galRx+c0Kh;}VUvNOJQ!a77CGsI1MwOY z<&EOHa;vU)Ty-4uXdr;UN-{D1E(O>tW_tM30`X!j8if*p-Hj!NTguzDJpNVCFXb@5 zU>z&OS=pjBdNqQ0BJ$z8wzh@23*iy#S4$sv$&Ttgj65LSbK=L|$@W}&kUh|(e^hG( zxBS9fn?WMY)04B?G0#evOn|afCtvx^+CH@a#NyBKhP}@%2Nf0FnFKx(AD{Ek-s=79 z7wkcKpG^qeH+JcU`UN?MO?Uov&xK~b+jiJx23sY1H1%SQj!y_mYGcr=lh5OuJKlg# z_g=q1bJs7}|L)t6nb;XU9S1=b^uo~=P+|am?@6mpa}#F%B6)V^q8-UxtVyB)D(!11)-b=I>YXLQlBqzrXJJ65r@LO;G-#BT ziduk!DtI2xVSIt*fLMQu5UJV1nmVP4byU_qH0VQ33P(@Alxfs6Tgpq+=R(DSR4a|h z#gEY(DNHu!1o_O9y_<#wP}Zd}tl5GGP^D8|kPAlWzR*wk2N@lF)& z{AeS`hgrIlr$y7_o(LJ$clpY?c|xg%`2`o^TeS?(sqP)~-qe#+HknZyqfpt>Qa5mk@!qv`^<`8jm`pmLe03M&eWLKm?I6uCT!rj4E%;Q!`~qzl%KP0^{@d*f0AMV=DucZAbDhch4gu#Z zIqO7N1b&N+IJ0d_wgU~n9s@vsx%pmW)f3w@Sy+yny*((`qpY6dGDF<>6W&)u3pe7S zK*RikoSSyHz`F5B$Ax)#Bc8AlM1IBy4%xWku64^$ewj_t?SMc}N7v>YJ?*Hp6U+H7 z=h}mT&g)@9veuhib-{DNYCNHZaP9>X!^ z;mOUm*C$udY;(SF%cwqFL7aRwAN(o8tPugyvzFsROhY}Irywfyk?=*@+DDx>G`x@~ zkM{Qp06Q{FKjc7O;%;|Y%#Ug7Hu7bGLh9NFS1JDb1@N;1fzK5N->zQ( z8CipOjPKV9XmG;~;qOjL{wTB2%QMk$CC6p?qXxH1Uu|yDS}y}q1vETPEiQ2R+w}`{ z{?ISjzIr}sOS*kg`2E~AD}y{Ya`95s(FV>Zv?smQkm?wp0}wG1aC&(;+2RRbW@r6e z*S&XB!ty*zr|Ye3F)p1{5Qnz3Lr}kfulm?DZ;Um;k>N(#CB?et%PIt?r>b`%0b=K^ z>=+OERV2C9_J@rmSU19g0!-NaGYS zZ|wiIrTt;T2mi~Ta?kJp!Ixrc===BTAm}CnWC@18!;byF6I%5BJ^`?;pBH;{fuA?2 zzt%;Ee%|+e=-*diHSl2{@parC2_Xcw9~}X9{MY}@{u6J|gCMZG?RFiYe!<9Lv;Ntx z#Ett&F!SV{th7?wle0|nac7xB1l~T+EejAw=lzY@DwtoOdoaJCH0d6j`SO@puKq&q zY7q533P&V5Nfvw8;>7dK@A=7ifg|NT$@eWwZoYn)n1Z&G9dM~#p~y_RdDi&UOL>KB z3#?GYVSd5Z4cht#mjh>3G^`r4&Wdn)-#_AS@dY$j2>|AWn)(0?LI}XVcj~Xs4n~O6JA4`rXAYAHqWfKE*4a zifSBwKRY*^|MskO{LWIk4@s&MjhgpW1g>P|bwE^vJ-XDq>r=C0ybV%haoW4frV8f- zl9l|r2bDf;#s@<41M>@bd;AeL?-wk!3G`-?T)(nzr6Qt=Xw6G=s>m5*%xo6SqJ?5?DY!_el>-IY?Gl!6lI9_$MJbmC9gS9!$)m%lg?k+!>{JH z=?LaFt;msU0|oE5}Plm{kModGil`7zvmYq0z?@xSoXGW z%LpeQialue&y!4GNt}GobCL67r|T=*5r2TH(HRUz73Jp=GSu#L;o~)JpYn3i?5nyU zkfT} zg1@f();mEF#ul ztO<~-D8{h)*n$uqIZ#88;Jp|*O+yO*I(2>B5%L6wUtOHsQ;%lpF1S(r#kUSBVn>oxM6xhL08#Tr5I7;D1+6mvR7&s+&0JXk0+#llu{Iv6l1MdJRsbj$X?Dl zD?GB-XBA?L> z3r;IyzW$1qzj^_D>>+U9-h~^Y7vP;ZY`BAocFgr1J_fuFSD@PLWRcuq2t955VUn+O zbn|F-TgWx|aPQR%jCS>c{eK->jtH>R`+vY=pRJz*POU#T`KWWAv8Fso$XGssWsTb* zowpRA=oL7l^CbDgOLn(f`kbhO%^{!X#zGuVFZSy=5wNC3La~PF1!5OPUVLofFlmRlih%7KJj3Z|&h{Y@VvXLA zC8UsoFkp#T+9STaBc;;fVYO;KfW@I;3^rCadb@n15-rkIdko2ayxric6fSM~q&9kM&-?z!)|s`!6@;00UM`t51g| zBpzDJDQ0w*naK=Xl>DHxAvLJU8E+@os(%|0q1@tns(1-xBEDRR)y)w5jOMQ8)@d~U zoA2e0YhU^ZK&gi51&*fiaZ@-!XXlvA7C{VGDG}aGRb=%}WH^GB{hXAMwGT-3lz^8Q z(5-4@E4?vN`g&*Zic0A0gOv0Mq@L{0XP@yP0E>1envCU7Z;3>izAT%;QRb75T@>d# zxx6HhtLqd~=<`=E_${ivzEgeXu&JK0oP3@`CcEJKU=_DkBOw_nO35_HICe!naV<^t z?CE*hyR)F&z7CsLD`)fdYAKoya!Jc?R{|V=G#t46;2vo@% za#+q6L@~NQ#PM!v5c8eA??NB7v_OUe=~&PA8yQMxVWw`cqt;erxx^0h6WZb@RUjO% zF07}1N304LXQ*C4gA&2UA9AfsXL5<|a+hi|G3$a>$Wx53dVz(pXxFez|MAgF6&T42 z_ejMZ2(TG0bmWefcSYQ&zM67C_#P1^**s@;@b)<0sVX7*ES&XP^K>ij=Ko>uETE$5 z0(MQ8AgzQ5(j5{aC=G&uNT-C-UDDvth@^BML=li~rCUl60g-ML6%Y_L~Kx zb=QAoEl=#T&l$dt=iTSb-p}meDYHefz~{*)23X!o&Pz)6#?f3hmFA5d!x4PLoR!nq zP}ru|dOm`HGWeff06QFD`23;pLG=P`i9h1+H>NL8CYzXU+p#v_e(pe{92(3Ud;GLr zF*k?k%tXseb)R4}nEdVQ1qOeq7lfj#&<#*^>;^V$2nL%^GjjTA*(SHD3vU#N_=Zh9 zqy+GUU)nI-y}sB_lBSp;?FqV!K6bHFU`vtghOtbm;3S%3+ zp`C(XdPX3fhx?~Psvz&z{8~_e6o2zW0p9!Av-tT$NZF^KuYSWXF9SXJ

3MfB$6A z$3lefKc%~WIUz_VM!^1~16VJ>zI@oM<9qBX$8)@BlJHW;VIH_9{m3@|IndHqxhK2zS-d1hA?b z0+g~XZZMx9QoGKH+Aks6fbf(oe7VVIS(qx?aOR(0un+RDmSDther(UddI2_g;;LWO zh8zp{RN=RDu`LWz!|eBKl~fTb;-KdeXCl?YjXm@fHFCMGv>n0L{*8}HK=lG67T=~%N7@3;q$mdCU5s~i%d-A|j8Z+!M;k-12RY0|lTO?Rb?u|IDqLFTQKL}6s zZj>h}_VSgENZ~C-iUu&w`+C6(@Mn9b5+kQxwX$on3S(p>&+uXdaq)+d*FVusjTb#} zz!v+Jc-@E^-10CpPC1?sDVbghj)D>MT_J|XRan&Y<6Z0Wi*i`zRffu_m(=3lWkl*arRO{4=Of{(`SKAB>N8qyt35; z=PyTW?8fgp_9xTv%;&^Fa08aALi%N zD+qns_~{y)pIcw@V|m#5F}Brw{g$W`naXd+bfYL%GzgyiDpgYl;c8gKLnb4EJ=ppA zUN12D)d)_2R*1Fa>Wc1P$A_Fqmj>5Mm7TE9`#zBbp91nx@#j%*up^)1QJ4$%vxA&7f0!K zJ4U2PTIViW!{e%{%cBg=fJzntP_S)L{6+`y^w?sjBGs*M*4XLwjjhVrjU;MSA$x5xPC7 zUV3XMF!4k|n<+c%E5mh$Wm^{>FMh0+oiWF-RPgg19NYu|uOah53RlE1?k$((`B{C; z_ZljJ+gjRVJM9CNz2a=fAb_%|){#&-=3MegL9@nNd#G@%Wx{aNQKgC#9ul70FEIdx zPBp;xj>iE$jmZO|bFk2Mec(lx!6=(a0g=(F_ zc#5H2AHfrYipZ)v<`Eq~4%%lrL7%sNXF~{D>6bzKoy+{V4$PqKzxtPxf0^m84uXRg zydN}(Uf^}8K?C=KN-m-d$Z9;3n=zkdMc3e(zZk~RUSHaDbA6L(__Mz>WBk3q$;K@5QQ_5@%-w@N1S z@HF$0(J(ZBOh=O3oaev@4X|M_?&R1Lpah+q1L_yBWlo6M>err6*Ni~-Yi_o78t~6L za#f)?jJBboiLy=ofOLmdG~05VQ#E>f;*^-nxVdFzJic?rvqytxXs{%LR!esP(1Rn7 z!b>$bh*0I{$~%P`7UOeE)%CgtWEhidSllTc{`m!8X#d$3nC_5$x*>i+&|%Z9rD`M1 z=-d5*^Tfo0VmuWor;ue`{P4Sh)yIM5b*o#su<8EZFEHQt3x53N-nH;RbbWmH)8$CF zkR3yAK0!eysZ%d%wA8w%jSwX#6W*040eTg!>^8J)sY*0q33F&Q%%{4I{kSf1h9o16 z5L>vW;XoQ~s9%87`WlyTnPcuA%H=GD>?UJHMo>%Lcz=}qjp;!$DZYb#G%>gc9t*88 zr|SwG>FW>*w>j-t_Q&d|*kg^ua_s#eEpMRUIOUqhKp7sf#9JHXnL2i_>eE?rTJL1e z*C2=NDHjm^^9z0k^Otfk)}ddpjy`OxDMaGtvVwON>4EZVg-6U}<`wUn^bZG1bF}#L zN@faj!p8c0zrX@IC_8{?d{WZGv<{@0*OcDrdCu>RfGl|W0uZ1i&vP+?N48!*{MP${ z4_$j>oF%uO^#%!Y3Ota_Z8iu32u&0}Jj14@;}h$@^G=(PBCoT(#^r65?Mg$`bD7bj zw;@zR{Q`|@xcjzb4~NNX#Dk@5WDL#kh+Mj$8<(@?xD(uW@#upCHr5?CNHdbtd+YCQ zZ8sLPzSB)g!r6W_V8Jy%in!W)VeTc6IwC9^k)}RNQBStGlhJj$NZ~4>ayAj-+>^e9 zI0dVwe}2IidVjVBraJ5k)xcp>U3O{SV&#d_LJCU4>wpxy#?Nw)%FLWaVM-{w6jeQ4M}o^>@_a90=$JH5zZf0QZ$PcL2>aP(luVTUJ4^IO%#yp*s(u8_+f-of!P)X|JFtP5;>}3!kw|0vEM&j$zX{tLSQ4t_5 z{sY42X^|&NT)}E*l$2;&i5ru>dq=k8Eq7LzYgC+CfwZb5(D*zFpKbeT3f_)bTQnPp zDI=3TG~V-lRIWV>sps1G!+(AO?3jSzyA6eJ-!CZsBmREFFKAZHpG!PuW;NvB>~>=^ z-}gNWUb7FsyKdOaW%dDf^Ff&W?fV5*f9V%!7FluaqP(N6!o}%{;W)3B>tqv>j9s*6 zNt8Es&x)4_xIQ2I_6$SXn^|kwy8~F7_>>y`ubZ>Cyk!^z4ddg8cpxq9IOIbT?YWJr znHAn^ba%~8`5SbM zd&_vyB5XaqoueIdu%pp!|A`NyTyy86fkNclS~e z5Du3qsT(oJe*?KVh~DzL&1T&o?`GmjDpznz`)RyMX#br(4jjc7K)xAdf*${_j{-c1 z|7B4B%XwdK?OzTJeDceIo7!O)pS!twwz|0E=D@jZ))pADgPNS}rv4)qHHJn1?<0MdM=!%FG? zh?e&H4MznB@PXBl=CH^vnj>KcMEt7v?sc#HggYxuDbB8R7xh z^!5YNi;4*q%X+?%jEZHG1;}EqeGIrA1hG}b&v{roE6Yv)`2}Bi{@E57@yL(uIm9n` zeAtY)Il(d+pbYcrxo&=3s*ug$gVHyXw$0F!aOz#yFG{c1vW7Kf`x@}O^eQ$ z)quirKbA46>QIET6O%467->&0-zhtH-T@#yIqG12XE`e9g%M52Zd!GPTM{srlhf#d zYE9ti#AXe~dEYNUd%nCxSZlEm@Eo`LQ;GHgi*gP=zFeaeAmT@`nau2mg zMVQ+uo!~geCct|_1Xnb^@RdWD(_HVPc8j|X=h3@5KArKSH5lip{T90|%nQ5cuxW0Qlg?kE zUS&p^%guf$X5Ke{8IvDgRxO~{NbhJ~>FpTUG=J|G*#2U42O$V`BmpfG?zrGzkM4+G zV(QQlmNX3RCnC6*&AE6*f}eqjKkFGnI>u|WaehB|!2fbAE$tPjD+Zamff#^kL)6HT|Fs;`8Q^{6PHz z+r*=kTxO+?Qb|nl?*_TX@*EViXMBx}1eD=!!c7G=?#`RFS$BSl1Se2FTX*j;*4ZcqbG{zV8s^@MMud1qR{n zwp(9RPR1~rV{VLr&#Uu9Z;BPYgsgvoI!Dl8DEl|^^IaaG2!0`VpGYC-YJd4%eG2HI zzhC?yKYxEw$Q8i(iT;=$u%9!0*!dypjK&BdYYx6wFeKptVkh8bKWd91j!|SId0Igs zUMK-}e!lk$?0z+bvp3?pr+1`3`mf_dyp=79K`j`Bd=iIJ>kV#Y*%qB#w*T!m51vQm zQ%M}mAV9k1Y~^b&^2_lr9p^(btGwzJ?_cK@9gEYUxTj81@ACn0ru9nq*5+!pdg89` z>4iNVt@75%ioI6L2g6f_{4mP&+MVbXWuU{U!+S& zKli9r(a16&S#BaLpRhUq{Hu z#Y~jx84tNcA}jKV5cK)mZ?TY)ORnLDt~Z0WBktV)UB94F?gAOE1xcH2T%Z2b`^ioh zEaz)D^jG$Drz6w|(UNrm51fH^(mguu7u6#KM7i5UFA0aP8^325KV6&Gq}xwQ1$o;7 z^$Y4(oYJ{cbe4s=B6azgHfnfnyvCmwj*Es zSMn$UTsh>4_9%5hBdCqX1L9_%WuHXjc@`M1t;pKI(G^n$nM7)T{sOAgx&ov(8%c&@ zL_o)@C}TK|Tv=#4op;{(aUc~smPXzI>2~Z;$W0yLoChi2U7-{;TF^}%w-tuZevB!m zfAnUFn-Q=ij2CCHefP1sfpXqvD5n5g%T0D|95bLA$)su#`V_oG%M|B57k{N@R2Flteb#>=g^LHE+>u z;0g=FM0dk{HV6Haf(=(GdJoX>PNFO~v&OyVPIim7W(d|$zu-u~YTCTq6xaB=y7#ug zz|ssNxr08-x zx?J}fE9J`cZvM`ONij8n$ozQ6QYe?kiN=bnsHV4H2*Sqtd%wUDIwQgu4OyZPNBZ_#&9&F@FkT^`l~h zpl3l!yn$M-EN{y2-jHXgR82WTw4)8R#4-j@aG*m$j;ODlb_9n0?64S&VBa3u3y<7lU6Xk&(=Qz z;D1{g80bVO&>$}yqCdiU*9Os2Ca~;wzYllxdlC@Pr(Lg{weq_8wX*bSyePRw@RKzN znQ@E!?7I_2)#|w)b`9k3%bB?oYS>M^Amt-Edy3*dOq?Np0X1?Y!L+XE?D?bThL;tQ ziC4SwxRHYjKlud)N21SPs7Oiht4~NuR;VYguvSpgt(C;9vbZl?JH2uAfbh*lv?`Sz zqcomA%|W=vbH&HbbStj~(^sEIS|vA{WV0BsZVqbB(cLAz(f&C7k$y!fTX;@gK_)`G z$18Gpnz@mpe|`b%sDR;1g2D&&3vej^h`-NVV(HP+5+u&gxn*o@`F zVO)8xNeciMLYbIFkD&Ul7r{5t3nnpNa7$gUU0)1i!F;Q)_^O!eLr?i?@olNt=OaR& zw_t3*gJDnJe{=x*1vubdWEdy>;b*&Nleh(?8~i+rtAqL~o@f`pBt9D*kY7rv8O1A& zHvBjrW?Q=s^$T1M;uoX<*f;fYYxI52D+<}&P&#=t@;1ooCXYn2v}v@{jJ!$4F4%EUTE8^2zDV~T;0TshyERwZ}Ju!VKWZ(3tVCN1t|bR?Z_52%|+Ul zm*)Z*+pkHOaNJ^X4<+uycgZuyLzjvM+!!{+u4CtEEou5x zpje#~xkCqF6lT3GcTK#kmt*w!l?f(BELyLB@PrJTy)CD{Xb4N_f1Zt@lGJAgK%ck3 z8(-S+uS>t$7J_bn+lGE>Z@;Q`u+6Z8o%a2B(|@l5Zn1&=78^W&0gljN(>&a7@_JFQ zq`ESSdE&9lG$KF#ihEpFfHkE^FoGQ4c~;mofA1H#{bFUnOU!fE*1m2EDBkFz}5E)c2&VaSUwuyPEy(VRSBetcItRW2IokIhB2Y*(Ira*s1e~ALFZsE6*!hut`Z|Jmz?5T014*#bF`GD7S+gq;4k`qay)3 zKi~TW?!OwsL9z}(lEhRD{_FT`(b=ZqxzyiYOdF)w?3ax?vSGJH5qo=Qoy);AqqE@= zJaC-fQ@pB>rDOWW1zvBP7c1{$F2=Iq>B$Imk-l(6Z6pTp`7NX-&muf>W)hX-{c!ZI zj5h{byscuZczxFe>6}M6bNAu3XDeQuTY%VQ&5@r zNlRNl5pGlLv@-Qf>E7~P0!={V5*K5?;*A%a);L!SX=Ilp=Z6xG*%VHF-@!W8*t*Wl>;2MOD;OF6ZE}b;PHz)5MofYNQnCwW%~QSjt~uII@1o( z$h3giPsE`C7o8_{2Gb1Ky_^pN~9&3mlN`+d%S^TOCSD zKN(9)x!nOTwIn{ZXU3ojxav*{&$ZI!DU3PM_RtQWMU~W!l6|&H)SrD+FMYiFJkS=z zh9|&$*I0g`rAG2Na>0Y^LPC!PuGY$!O>^?hiE#gJ(0^y-EWf+t zm)!#f4d=@hezFA)TH1clz5>0!qaA-noa0UD-82;I>63!0Gy zd`_@Rw`gCAN+z|> z`uJK{U1xWQt+98<08p^|LH?9@LwszdLTH2f1p#@sq98GpTX{=~hz>E*9O?i3f}bG$ zRWX?EyZdy5{Q?~Dq%B_^`{8E5r_$e=Q;^&Z$8)+dXwm&#$&P-$Jh!8cBH#32MZ~42 zu`RDW+y`LaL%#P5K>L2d+*3i9I~lw^GU~4VI>TJ>ihJv+q;ur$bEUp&j?V%{Brd6G5GBdl37%ZeS8AX0pa8r?5l z4uUn*FA$PeN?Ablf3mZRqE%+echoSeFek{ABP4!oyz-)5`IQ4=ZFg4X%#rwO4I%uZ znHLO=@!?d}ZDL;X;0||1^zdv*_5N!993X&QQ@ArjJ~^X(uwE1XrPOVWP(lpTD+=)#?Je^~Np}+gbxtqU zB9F6bn~UQ2i>w66I-MiqC9%O zqG(5(sJ%=~6Ir5gsO*4Lhu@KT)dF&pslPv4Px~g&h+leJspRCu&IVhb46~4u3m}tb z311bMO>Kj5gD;z)=3$wW9@z~);%nDVAucUz3EjbP;cE_QKQ$U{-+fZ2jqv{=az*2Kqh}Xs}=4_eVG{ zaPi;b8@G}d7ex?c)(l%j^VaH_{o|k{-KVcX=6Z3uQsE8|5|-T zaEnmkd4Whk;y94DHU1hV&Je$V208L=hQ4NcWPtKiQK!0D1**D*dNblDzkq+F!aqJ( zV(m74QX017#3REvp(>G7x+FTy$a>kfjqwA*2bbyIk9S4njOZ5D+~%>i3I#sDa~&_w zOfELj)WwwhuLHH`Zi`<~8Yv$v>+gT)ZlA=Ea@s$pQEfBG{c(Sj-E+!+egW**fZ@x4 z!nf}ig#8hJzu^}&mZK5g$RQ$bM7OKAVWEB^HHNgF6Q$98hKFXjvMI|ICV%_$7kK}b zUyuSwOK&h-ZeW|b;X$eC(s zIaK9?2ZSB#5z}e9%6nrdtK5O1BC?<(95+s{5CfS8+gKcu@vi59c@zVZ@mB$bN5@!1 zcLMbL;$xqx5G04g-5O(Eg|&jZ7rx&P<@ z@eAS)oAtRSMe`k^borlVI=U{#TqHfOe zmD;u_!_JC9bVt8%b2p?O1FqJ@x_3G(a}*xR09U;tpY|NWB1hmmqFvNGIk%=zNv`La zGbD7q=I6I~Ko3D2>KE`-YkTK1k>*xBRH<|mdoN_Kt>JVp<=mYtw9to<+gA_zf$yvR zRX7tZFPsy{xpJ_K`pb71PiQ}_NR=zg&$)H?ktR9-H&uS@fp=jXZpdrDiCO3FJz;#) zT9R>iYV~t#r%!lu{PPRG;`=9?V8pXNw&xJP;O=2F9(v_Q4X4RoM`8=vW1D-jA9mQz z;wT{rgs^U!8q^x1)W%#gC`T>SSlJ{ z1xp5wrj`5n(p}DWu9_^Ir!+{vl5)Tn`wm$g4h5F&M%k8D;oOzYYTf6?R&>35tQ8+_ zUp`G$J_AIdvdynE5Ju$Z=p|T>Ua#1!BPV&7n#Oa+bBRrPolhHxKIx^NTvWJ=KKUXc zQmIM#@)AnLE$Zr_*R+~4w?a{`{_@!v?LRH>=VtCB8sF{Sk1caLe#nXu~L_2>?3N5ZSn_`t#D+3cowy7^C)&KCJvVJ$GSBM&*FUR6ki z2k32kk9p5KmY(k-My(TLQT6RxGM_Xq;weh)X-<`kO#%q8gzc{dpCEp6$yUYa2-(i7 zmBd}s;-;R5^%_SWEPcR)~K{r)>lDy)c4Rh4aeXlSeId{yLTQ7mrlTofSv zWqyA61oLAEp{2Ue<)P$92zsfn7eB}k#n;CVybaDz_LuyWA9j8c`(3LyOh@*B}>O{6O+?o?Q=XZRZZv!3r3Sceg$_}%lD zu}RuZ1Q^k*(0EVx&~pYH)gS4v);4^b^AN4&cG=d>(C-NOrOI z!NX%hJTn~ICY6Tm`C9MkQ;x^GkF)+PKA?RXKdycgO(AH#{rFIPI_LAor-I*HfBoDK zIpqiO`OPcAs0NNt&X@SqA8LH|{Q?w>M@eG$bt~8zdU>CC?&I2Gl6&mc!j{4B`UT5DKJ8T}2%i^+ z?<}g=);vtniaCqB&c_nwy>&17roI7yMm^%$>7OF+$KqDhrTE6w+b5aFpf`0Z9f4xR zcHww2yo{a8+pkQdKHbO0W)l`6noVs_e=d54ua_I(U#LrL6CiBSLWqo#Ih$~U)*q_@c- zbVL0DhsMzA08iCuo-qOAUFEAM;}9&}V{yvlHag3co$R_*4tN_eef@?iEp_wQ{GR*2~IYHgcfIl3HS>p=&0l(sDG_ zqhU_R8xM$rwkt)e>KncU!J;)b-mz@*U^kv__kJZ8g08YAa91v=&6w@5{f^3tS;RvZB5c?uT_1+owQ9`U1 z{re2TQmrP6(aZ2OrH)TPjH4G2-2UkWzXf%{7pUhC8*1;_9io(O%x9-d?_2Vs@Z3N% zdo6(7je?26ZT}Q=-8>LB)KI-32ukvg=a>2T^#~~dvTpg=>(Su%qt&%>FR=phUjwLt zx1Gw0gLofNkGPGz06h4PoEWgVmhXj29+MV6ZbSA|b-4FZ^B^`OmficzS33}(p?X31 zqcTsp`qHojMFDCHM_TnLJNEUF?RSxAE}^BaW2GMs2y_r1{HhxQ7KQ}Ika%BB&mp*F>KQ_H3 z)y0(udFJ&N>sHe<;0J3*?Ep9PvWVLzEXTLi*7Y(&th@2^dK3!N#ZU!PeGz3~;tbIX zXn+0n2svt68d6O|x33+2-cov_!DvP3vMx}wF`Cpb9cO@?ZgD{Pj9!M(kdUp)k&lE# z6-}MikguB=m@P3y5AG_i@H|pV2+YlM_tR@xwZ^vg zoimLGpHotWKE1i)NpWQUo)?M;w&;6`R>|EapqK~rNaJ)r_?$%z6gGsCnWS4h>t$O4?gw^+OGq7+Yhb;S?%`#>=(mPd|ZEAL3{de!4F>!{r5lQ)77Bs{SHa+^KkM0qXSqk zz(qT3*4wknT2H4-Xle<*>yd0jU`O&vpM6zIfbKo`TJ80*yIinYhw23(2hj@vz|N7C zl~&<{4DRfFsbp6pU0W+{Y@~HZ=qiwU$vZrf4mjtNaZ7vJTCI$hAjDjinzFQNjgxVT~E=rmd}&UwOXk@rnE&xU!2 zwTp_rt4d0@ZE4Q@rx*Mr5nmO95ij}Jo`dxQT)e|(9Ozr!S{rUmSWqZ>=i5x|>4BCu zwp{b{#OC;Q)fxd+I@pXu^@30sdI12SG%CGB)OxDHfIIaNarf8_`DXTU^jDUu^c7F2 zs-Lvo0Zz<#@X3_7&9RCJa6Kq6d~!wC9mJ7iG@6UwO3fdd5YLGgVVXJhp|N z=|O8(A@++Wz)oDt-A-eCR(w$%-L!VHIfq&XToQpQUxc@)Jvk|5oWPhbj^7rQ(yntG zmOwK9D)vd?772k@YQ#jZ#p1D>lpp`6V-Dx5KlGmtj|xH0{nRLaybuL+{rzao$Af>c z`l{P`|B`=j?k}r?aW36&vB7!)?uo;u*|k%<&jU}OyC>C5^xP#i5e`3(-DM^z#Qf3o zkIWIIe&g%ozt;=GelfU%5F~1?OW-Re=k(X(qm2rs18A!n>nAvIhBq!AN8(P-Yq)^k zsEO$sm9yH3kOuK|Ovw-0&hLQOsBtdv8btE~@}<1zuj(6g zS*@W8EHu5^Ij%l)uWyq=KrAGGXi1=y0Foc5UeL8S`GSUQ+NO}0zK)|hq@9$LwJ~$A zo_1CaiHM>NJibEKA^UQ)<%-J#-x8zJj$V>yc;cjyf%f=IsUkZ z%_AO-Uy{AtM5n4pPb|`P!7zB+*F??zn%*pfHQ;Sl!pAWzC9QkHoGok070D|z6`i<= z!a3@8*~(@kdlZnlWg~d+%^mglmt;ZDACQQp2F)QoQI~q|7!qB@K5+TWfAaI~?*aOD z7sj3t^!N|+13f|kT^?}pRerv@!Z*)2pUVF%tl<2VeaR2)VdsY$8T@6a(HTbaEo7na z`2Fv%#Z*d+HeWY!)@@v{z_*6|<$M3p3&MXjf-}TB61&b39sAeux&E5>l1f8b8itS` zd)7kRQDxgl zsB1e57?CWbA}nTCjbE?4o`L=7K{8?bwDp@}8<(MJp2Z87`5^Iu>IK*REmbRx`R= zT(sg&0Ep8j-Hee{l6NYPk8^y`g0h69G|Z)Me)`4&o)6nH~@}M`Iq>x zA8LH|^#W5qgSG9IOI~j)an_$4Z^>#>7nh|hSuZ!pFVyq8A6^LidFp$;;Px->K!`zm zaWURz>55x_9if?3N0|nd%&5pE+tFlCe7i)0E%}n}-IY}oy0%)D7uSAQFEDy)JrA!V z9Oyd|MwD(@QnVT~IA1?ScQO6tNxXj9mZN}qCzdIC!C49-_j8X&(BHCG8NO^u^0k485FNEiIALOFhQ0GZc6sx#WbHVv0S@i~0lJr^srt z@rp}bBjZFO!}9KRfDoQT?U+ZV)RA7E z+Dcwrerih!zAb-XT1b%6GVS7JWlX(GqPW79mE88xymT~OE=2(|m*0}u{cX@Fz9v@~ zy7M_2`-i@%@pbJ#-2Oq(aQ^TTKdufA+Qa>zf%O7hzC#TfxEECP?tQJH;L%F@8ua2^ zyys-K4p$cQI(cO$na|+bnqC3HP8n1$i1^jb2}csWqWbJPvTyVPrYjFPxjTr5C@$}K zpJ1Hct|qEyxPv=?_V69Dg9;Q^hD0V3b={Q9HsxY{=)>{rAVL=qTiEB^1jJaVA9;N&PUdS!>+9~G2uzg1*+;eAl(jlEyN?2yq$=& zQzeye9S=RCUF5#&Yd-vjLUnIZS~mb#Wi~gQx_z;risgB>)~n|G7hIGk$J{=wvI{RW zJ=4nB{5O5UkBR=e6ioM{eY(MV0j}6#(~XrtKt7dpv!{BrY2`&IGUqCvtecR#?7OMW z%rrlLo!^)?^LxD@a$hg_@wa<}^@3WRnzgO;rVVu0K+Trn9zN1VAzucW;& z#&AFh#W@=bWXI7YqT5>0T03Xr;mg>CXja}=1Z+baZRqp`2|<>fws7b`}@j{>^9Ryh2QN=vDHIpG(3Oe*kjj7kG-}L zg^k(UfWuiU5M{W$&22)gbM=@h$*Xc4F{EbK#jK*rQ&+u0qJe5&!(jyFZeJsc{sAiM z=@g!*HhNdNTasAYCKdI=HpVa-?XUm*|68}gSXX?(`qE)zjbiVC2khys`nQF5@S2~OdR2ZG~LvED# zY|p}->mDb(PED=>RN4NOt#?`Hz6txgXI z;JdzfG#@=-&u5u~x+=?_`Wyl@)GwGiR*e4c0@D`KIqB(F1{cpvX2kkpB@BCNJ!~Tp zD-0((AkapQVOMhcvU22IGG!?ehfQJ=Z@rvKS>j!fIDY$b@X2|Y`T)!d|C(vYSC3Ue zfd+ZuUi~ASDeDqTHH@)tm7u$GdXq=u(=DT?&zTd?6c{~wFzFSi#Y&NT`Yp2zmdKRY zKfmC&TGMJMd{Dmt*Zhz8`whR~aSug3WeR17XLEAZ^BGwa{p@Ni<)hin83M%tjeS)a zF!|f}3u6A#FYrlh(VkG~XA~^<+j`(OAUXM_-ZiDfkjKDwT6Bs7Z3EEdBhzHVZ&pPh zux*CxTvRuVqzy6(4wxCea;^~lk#-WKr5%U(1*!%_@H%=Ut&^c$(Q_-QVhSODpto?$2+>==V|iu3zHk{D3Qd?&+q0R{J!iC}d~9(}4m6J@DU!5cpFT z`>zCoRHcB_K!yXC!u&yi1Hqx8L2JTGf@?zs2BiItc-TE1;Fh*#|Iq>B7dRX?>$J-= zp{SSZcEeOM)6cR9tmVnv*mgn`U>13`vtgd9s0W*Ms9z9!5WgS=7=I~?$;50fC9Z=# z%9p0RdVZW#>XCUp$%qTbC6O8FD*%5qcBr6FO(w^sr<*Eoa%qm4ofe-$$wH=xV8vEY z>4D0MP`}{R)4qhRQ*E`jcR*)(*LF-^P(Gb@xn(Ui;OS*pQ|~N)K*W95yKITYA9Anz zD7CwHfZpmla+u;gqD89czQ;zUTf77)Gz&eM912+E4^xZOt)-9(U0b9y*FKV^+1G4N ze|7ofKfeHGJ|GVhwIADah+hDH0)TZ+z^4kZZBg*m0&|I7gSzmV{3taduL=uac0y}@ z;4zt;JUd0*O{lt~4V!VOUl0e=FW~WJXXvfDdtN#mc}hF@oi%FNNpIG4>@1^@F?LtC z>LtLr19YrN{OswiD;kz)(lE zGlKk;qz&>8lV_tXxbHdB7)yKY9uVhy;z0&2LofVulhA29oJe=X`H1q5&f{1Hx87-u zzp_aWeBjVDPnxFk%kP#ph2sbtrbnH`7^`@`i+16ybgswUKw!>zEtj`x8TrzUb-rFx znbtFs%>xWC-`&s+ueC<(XjT8*Vt@JzeY*yIy9;Aa2znga>Ovb$=q?3x33~xKZNI_( zaow;V7yo@J80WhE78~Lh1Rgfckro!ZyA+SfPP|9$$yB*fYkBW%HlGp;J*QpAf}qza z7T7d@?-#`XVsr-~=(3Y6Vb#2A@?Vecc;qIf4odF&S?Q0uG__!ci0VrkJv^S}qqiPhBo zv#Hck@`FC{Tn9bZkbt*GH(f7l!d-Xn3fSoJ%vbL7lYiG^UnY4`3-Dh_LL}-aysktR zDUi-?n9rq_a%YxAm-v`F!{i*p%XWaOX@%(+cH)qC-UNGC+w0cX^TYEO(7n=})2V2( zoXQP;nIAME(5L5=-@;S~TI;(IQ2f@Zuq*vf=BNHkej*P$KaCtFdkUmWwt?C)47d;G zJK5Ad_HI7ZTft{y3x9$%{2QMz`hNO?gkKHe1ZNIgMYqXb{pe~h2bdHwvjr&D)X%Q-B z54@5a?)IWKScDFd4VFe$baFnO`(=C%rngakdm1=C-vtVGRDM+Be=v(KgcwxIcvHrl zdR*qOBV?7b1XnoBrz~Aj?)AFG*39~7S>8MWNr5FCpt?fPJ@mVNL0@N@pXQW7U9Llw zHq(*t57*q&3QlB?4sD)lD_4)CbOzvg#n;S~!xN927cc4GFpey9tHEA<9LPX`C+!pC zj+P2}XAboXgwY&63y!H2z;gD?^YDqZjHF-|F87UbRL7|(lzNH2 z1{EQ8;I2QKcn|inoT!D2vQpu_88Nu2smy2h?o61*JS4Gb?)W-rp#Sm>`a#)$Ykeva zf^K{lD~eA?f4YYK<{5J8|76e__k#xU3o;HhXyA7(4=sjm@byLuhJvb1`Il=lyelUK z*eG=yfR*O!p@MA{u%GjwenHZ&Zcc&r=b~P`yL93kzkq)#qYq8w&gna$T}SwcM6~bB zt|U}m@9vwLRbZc$7GwqJlL>AKzi_W58A!^(Ra+>;7I}ffdmW)ubAGGDK*hQhLO0Ye zD8dxyODPXQFeWbW3KQHwGWZm7}*RVsPW3HwU@pR$(&639zXI{mO$7*`P zru%!pAbH;}`0?!9V87t~l2}oF{!Ckw(;4#joGjyV{tA!mX;GGLCQ^|NL}P{nlnqZd zUmPb{RS0;<<8;PzY-!O8XWNiU1I}ik#?3bGI0S2`U!WY&8IP28!b3T%PIDPP%a%>O zF)NDc?bGp5<{kudc(Vgy4Y(}u#mQap%fzQt9vEOuT-?P_GvhZLt2dK(b9qH82y{v7 z>U=2YJ&lb_n*DlLOg!Kjl166P*1j<_xhAY6^_lwq$uG0`k&-z$4rN9iJWXTHBA6G%WC zz9zQA0YWv@FUY1l*Al@;jr!&o{Yi5V(TW7>#lGY3IVgJ!YIj0}jvkb1Q2T@0ie_^m zb%AVq!h5e6C+dg-^$82|dm^%3syLB%OJ(!)7_~N%nBPwu z%6TN9HvaPqehTyF17NCKzEItK*i>H}$ES|1eU#<&+}N2`@1_8f^1>a-ms4D?vMQ?| zhs9&UrW)!O08o^FEP#ESzW@Msl}XzqX5u2K;<%Hk9yy?B^$`}Pqw7jO*|=XvXXoAn zC;(%ckLsNySh0OLZ)}sitT%K~qtMQ#V=;(+{0i3+`fLsL3q~>dFBE1dV?28OLP1$m zLs#lpVHPI$v24^2T?uPFH%kxLOdGTgIE~>wb66HG<4!gfL`?v7EuMa$%-7uiF0nh_ zumPq%0JFl^&(@#+#Q%S7fPsDt1sd!ZbpH{~Ri>|ds01R3nBfT15m}G(`!OfSdgfW_ zhUS92cj3;LfuF348tYIeV)D!yXOw8*aRR503Tc51nq=gzJ4v5%8(GDGi8I76phu3h z=YGqcQJkCa@3x3(8MOD3>$RWGC%=GQ^98u8MpQ@9#x8}5M@lH0byD$F`uek~o9R@z zG@LIF2;WO`wq}f(%i$q-jHp;@=cea}k_ID}=$E9mXne1_Bw_}HhLiqiDM zmHQiz{YG=FU7jLwYwAztHhLQY#JjfCxJe_fwf29RQQt_|zGhjw+$+!fdAm z8H?ylrDXX{P44#Dgwux?n3zGj4#NAy*_=u(Xlfb(xl5Sm^3e{*`@T+*Qu1!<%ONcNrXj6@OBG(MW84da2 z8+=Lloc{F$wKVK*vGA)b&`}lWfdYIB2=ZiTW(4pRWHJE^d;MWwWGNVGXOckFK7}H*anc&>AE(91`(UB_#UNWOh}y<+Y3{ z+R1|DAn=Nq9f{_MPZM5-sUmg&jc4pTHE?t%Dn15BJzqu6!{D>%feQ^oyGR%(8{|IO z^Uq%Z<@k>WFyalL>vM>|VBx44pFus-Kzl}5Njk8G-~#BnjwRW4z%-(Cz%{$j2;ypQ z^8Fcy`U~Rr^%rdB+$K89(O)G5V!}jAys27$7B}J1%Yk$%Dz2#XIPpM0z^q`D(Nmk6 z7~kiTo7e!W5LedJ_4@uH9s7}2a&y{{m%Uwo!L%cue3Vt6C)?mTY?QaOT^hW13iNpG zaV)SnJVq2-P8ylos>K+4eIA$pvC z(8js5b6g!$fZT;VT5Q>tj~&44o|YujuX?3_A-4|52u*hSZiF*#cltJbhh zF4A0B%V^%GZ|wL-5cVd5pb^l@@mmrFIQZ2j_AUB9hYmRIe9%WhzSiysjC12|jScY^ zY#cSsCZJ3G8@Pi$GG?R=oUS58K9e374R+#-j?0L)4&H8H+@I#X{(|^F%o6x>> zxg#LA`PZ{Mq6`yN<_}@%{7w8`9)sbYLkVcoys=?f7|otf$zFJ!+7S@tqP}VTT=S$N zOCg^g7oT*`5CuLZe^blPGddy`jHTS zjWM$=hS$$d@4F|tnow==Wzp6o@vcgr$&Oe*&6Gnmy zt+wt;-I;g58J}UqJ0~jraH8J5k&CFR*?wufl)-kMF8Ri71uoI-xBoZ#fr1M9%T)aordw*4vM12=uYGj1eGAUwM=EP84bRc z@rv1BKYRTJ34fZxLGpf^v%+OJ^Vjm>e}3h?O#iV~)HIc(@;19iMCQSjmCMAb#pFDM z=eUsx9RXbIrt)lIVfx6|R7#k%BG0<|6!XKj)ZC|28sfR&9&Mk>PBFOBBS zVCqM0Fqv>|9*HIP8xhuFG^ZT*c>@eL&Yth1#M#$M#pyUVp*UKeRyb-Y+j0#Nd<+ll--W zwu~zSQs5W*v=KQOV)}an2MCRG=I7Dtw^FX3rr#EPelUN5Bj8-FEh=v{5TKWz9bAH| zWb>LVrbet3>wQ6`_;^+0sv)4vV}5NGy<$C*52>!fIUHeATjFVY$;Wppqe>?Pmfjdc zUR$940wK8^Gz+s#yP~E^GoKy5*sk1XNybsuUH(&17d8mvpB=KDGCn=|5^D#9i}@%e z$RaR*M1U&t%}w$i0u(b5)VK^jMS%GX4aU`JDtg#%_&(KTWTA`F6+#Ls$swP5no2E# zcvb<&OKazgU~9Y+UJSyB0YR=sge>L4(MIB@u4@jzs!nP-h|vB>>-YUC5H#n(g!acm zf0ejLXnUUWt2_P}_w9Rwh1RkwG_bz_8~;d!1|9{a6g1=3N-=nz_*DMRD}=F!fveFC zCX^M{O{(`ljU{ht?yoYazaa5XjT6brNg0za2elvk1yk9NN`*_+CURPPFFiD#t5e#B z4d4>BP~SqhLiL7yK@T9Mpd2^Q$iE$Rm1#k9X$#&{z1!czrnOceEAvgbNti5zZm7Qi zIjNdrrrSm+Gk16jFF`z(UoifTBq#5;R34 z@~Gr#O%3PEXYI`)qUm!$QxmO5WP1Gh^*8P*sws-qvAl@dPe854O7v&pJvYRK|M?64 zh&VJ6On2)p-C%zKHpNlXtxq)P>g4C^GilCU_vxB%c3wqZBQrCx{nD9t^4-<7{QJ|r z*I$sd>o55A^4nm40SKXPWY~ec0hH~~il=DMd6W5UMcE~5^LP^}y>sCGvH-Sb+Db(zk4g)XM3`cYie#lHhTfiavajDC>%IPhWay;qZ?;E(1;R^)NNG*vjw{qsr*i8T zZ}s27M3JSV#(u;^t}&UnI0S$PcY}E&P|x~Z^YS@kfQaoIP0xa+t2J&hchh2`QOpQJ zHPm0A@UYD2gV#xpI>EB``el&L%d0cm1E?)w7H97B81ZMoAF{F*mh>KyGC}f}nL5te zwpveW!liNSbwbbtEfqcIWv0xsS)T#h?BTC zB>(vf4n+0qZ&aT+2X>}}}Ts{Yueh|zsFXzCWJY|0WvfaU%R%5OmZ z1u0OJYYM~huWiBE1TG148rJmM=~fpHf46>tjY-hBrfaJWir^gOE$V?Yw7Cj5npo*< z&gybAz|R&n0oJ*^uU1Z~4v^Kh-d{x0@rl5FFiDf7HiehE$C$dSZl&@nM#lkByoeb1*I zx_>1N@fXk|28o@(l{`(-Tvh7#vVQ`fK_R$&dgzP4pgd|3N6eC{!f)Z!G!pT~TgI68 zcizg9u7XmCDu-F|gANH_`emuUBJBx7x4;Zy{*lU8tcw^VWu8W?I=mlAa5`s)fU{f$ zL{Cm);eDd?D(IYEF4aqGlMA?UX^rznLIoeD8ty-T!T#a^!`BXl59%+#7Wq&79l&3Z zqBwQhsU|joJ4Xc}EW-jRVqDb(@6wn(uPKYj^3^ez{W}RR8_wW8nE%pW;J`}oIBsgD zMqh((8-vl~b(|uXrZkofLH)q-nl#g{9YAE`3OYtcoyO#M@}* z2KmVs+Rl)gb{OI>I7gR_Nd7v6cU>HnG~fZX@f@-$53wfl@Qhc7-|KVKwaX$P>%d@z{ADeQ7sKi*q$pD{Fbx28SVFg55q zaNuC5cCR3#g-{oO;*P0g?h97NmflzFGw;zPcGe`~OVox8Dol%TfoQ3x}S* zJPCT?2t7gzfE>OJT0rBWUm1l0y3_#}_Kw}I1K3}HEqTMuw;oWHI^m@tPm77W0WboD2^;EcK4O5Io!?u*%}+pjB^!?Y`5JFA&5i$1(+@zS!^$O zolnLsj!&R#df`hYweK(>Y+M6gc&gd3iysp4em$h);ckyR&acC?PpgXAYa^eb^uaR` zvF^;E3&jo}0ca8{d@;2@lsY*B%X|qQPB)tx>jJazY%zPrCeIs~n*Z|`>=ON_BN*|{ z&-FRjUx2NA)Qnf{7;UKQq?R2Mi5(;+W)f44)K?@KM(e%CT>xUl%eB~_aj3r_eII{8 zGyq~`I=2!KcJo!5CPz2y40kPw#fQ!AaEBGSj!Dgn)*#?)62_`8PO{aO+dE$u0|}yQ z(>t=b;@+4h&)4NlD+HnIyI+LV*j4tZ4K69z4G2M~Mox4JI==K2od@?^A~71>I)96jhSP@5GroMAO%=l>ymWXZ7dAmx^r%mp6yk zoZz?!1w5?v*6vxeqTMY?wIu~4E8?SST?+5P-FpWpg zzn-Mu!{>wP-|8dIk(iwFU)kJGg;ru=8=^Sc>RJ(7Bf?1mXlADz3g5u z)+{<4CoQ&GCq-B(({(-Hn?VbZEXGwl$0ys#+IECf^=B5SiMnU8?IeuQPvsBOyW_)?%NZMRmC)JPjwO1DxHoxPv?;O`DDjFo7rGk9+#D3 z^0cLrdF*4&DFK1lI+Mb!bb|G(c|dwdW3dIJbHaqAa2OmRwQ&xrWA_L`vlT(PPrf1k zP%97y6A+9LQKF+)fF}4jCTjAW-QAB23z_$^{EXbHY>+1QdAD@@5w)MLesDa(fS~F7 z>BsT=Qug*lzEArx{JnmDO!>Br=#^;4y4TTr@?lEL#ZJ?&=cKty8F3m%=3 zM0VQkC9e50^2Yu3v)5mc`KKwIa^_)0HD0#A-yfm=ROjX+b^IZU*x=>ca?jS%Y`o_l zQwf=_;Lo^MEZBy^0uxl~7A@W7f}3$pmOEXnDZ-x39JEw|mtSg1T>M1z;3l9bikjkL zfdAGrDlH+L=JM6B(EDPTDNc&T@g67yR}4NtHfL}x?zY+{d}BLiwdy{Qw%Ps#;iHGdA(r>;;wDBqnMwhS)h_|mAf)_X}qsGpXHE3V>!Y7 zen$c6YC|FjM11vu!h+Pa`USHkD-l-b7L7*|PXLanZVaw90%fdz65LuNdgX_a#V_7E z^2ybOJsz9^$`pX&;B<9+k+}LB=xP}sFstWHn53maf)L)SOr^cx|YGSFm4EgkT{sI7qk0e?@>3p^z)RAQ=O>@mK zk4^YJQDEmLr-i=pRe~CNK&j~5E5aPyyO%#kU2p96I7Y?faGB_sA3;4=&05JkMKFYJ zsJ|e$ol?UnF+RPDxU_OsFGGl+mixZ8v|>l8i^+D9cJbID=`MJbU1@Q`z{V@7cPH!d ztdM$h=2XxCJIA&6aE5&_i5bAK_XLf5WNXForHjJkx^F$giGuiLxZfE_59$?0+(JPA z=P&pb-9N^G>F(R58{#i`bkuZ*oj}W)UFG`_`pn+>WTAeD-1+))A7e+t%>rb z`}e~3`U{@z`U@UTDxW^*TVT)}lgxcV;EuieO5+vqM)j~jCgRiU#P!G-0~D=yx+mVg zYJS%=?jvH`(s6YtCA;Re82}Hra{BF4?#*q+Y0AnoMx&S1@1soa7>=kIEXO0STew27 zhWZOQ>M@*(t6#)A4PV;|RO}(RoM?Q{ix_cL@VzEV1hTp4A+fg5?hb_)oxyFG-RdO_ z7rq-f5}8CT81KGA;2>cJ0=j_9L1EicGo20sZZeVdScL;A!xSam;Znt=FE4t?cEY(qAceH_s4p# zzaR%XDa*{|XI`c>y-Z`!Dcy|6w5Z_s3>8eZBS6rMzYxA!z~n%m`Z%VjHicFIZ3{p6 zdX6TgJaZvh;Vt023HyuQ2fgL#{n9Mkab1&Xi<2_Cg9c+y2~b_!ZMPwo{9S)R>?GMB zrfuaSi!U|&l#pXQ5^ifnmfUF5Zo(jJy$B-r7PI>5FNg-(l8OU2vQBh7=rvSWdA9xF7K+kc(uG6|Pv31M zs@aMj0OaCtN%`_|I2V2doJcsDFh03y=m@QNXmFD14avI`D-fWe{sQkaTMtK->9q`} zG*6Fwynvu^ULH@gt@{}EyP`WUGvilv z=Tzw67P|Mf5A2&^pT6}EfBFB>3oy`wP@uv7g0%mHGkV;m^w`VfPIT|w)sZE*sR)8L zG}o)?7;aZ*(4^niEduwf;ofFuB{uY^JrB73C^F92jKv`A!93^E)4{6S6Xlo>4z@o6 z17eUMXo2JbLjx#gR+bbtlFgQeC*Z5UAcIJ$;6tqMWi&L*;*Gm~OgB=-7JWofp76Li z6WV$)9~M3xX`6}@BIC<&w51~rvqr|Qdo3UrPy8LgUog|T&HA{>{(0XR zQo~}Xnfx|QX_1>^RmgP#bvsMhxwrdQr%->v^S|;J0KiJ=DCra|x6?qEugN_QL02<8 zb#VvhsY}PgoZDU?u%H4gNnDL{bUQ|GxD9C83DytvpcyX~-80iffX_WRMYucOT%_Kah7b8DvsNbgI zTFcd%mn1I5EIBsEAAug#kShr!iv?%3G{vD&xx=w6rV#dpF=-90rp$z&}`p^7`|rv@_0-TB!@cW0RCX!fSyF4NASUM zHvqotyE%ZbWqi){T@ob55%SLoc6hhz0Pz=qdz9d~KYrZ5q#kJP_$6f21q5YFI}PoV zhAoVs=NFb(`UGwfz4_WF^IZ7C;lTB7UB6vt_F4K1QLfte2~R6akDMo_=FT1CI%X|xgl+b zK3cY%sX={pn!+wbXuF^!gjHWLnUU3 zADx{le&N1HBrX?{cHo9c_*~ndnO%dp(lx#7mu5d)Ana z7?19dIOEk#6hvRsk$312eZCQ&<1DY}uUw>~-koJURQ#M-zYidJZh&3l7XR3~8@ZCD z1<{A_Tp(cs|A(E9%b9JN^h8uZGas6;9fO^6DqLiPNTr82Tq%u5ubKM^OFh?)mXSG^ zA8YKN{^*WB#r!D(1WnviNmA^&V>i>D$OByd$8qN`wfwJ2cKjg{jPvMjjScY^v>!Fi zl5O%93`OhGE2{G!8-}Jj+M5&RO5)F|MhaA3qk<#G+@I#X{(^!(%!>7f{OTNC@ z{@1fR+@WZ9kn8;E$Qopg>0T3CR9UsE;Ay|IFCnim4HWklbOa15FRMR%Pd2r~4A)UQ z2%e=^IKk;8-~XzvVuPLAXk-=;AcVE%Ge$$^Z<{@r=`d85s=$5Qx|bz~(mWvKC39*B zL_g5|5ty&*r7CCVcLbezL+ei#KPZcFn&cI}8unxa$ip}@0CY(Gm=SL0$7TDQI5Q2kN5XEhM zi2V@?|1^cuWX{p~sJ1EYujSJQ$Q?tfXykvKE38})Fvnldc40>sNvEX8AtR58EU(xR zIJZNFbZa^WC%@As(FX=^;qZoFbSGry5e$p)WWvj zjyUbM=AISqr(psK&fXCDK>Y;-JTXXDotPcEUDu-)1N>bS_-jVo&Ut5-ndQEB;eKUs zNck8woIK~Tf$T|NaQ}GT>n)=o`4Cfyu=&AB372>mQqmqEia=iEGcN0L=D+CmYY z{Nn|w!Lkk4&O=vj+gLP`r_OB&$ zYQEQ$B%}Q)&MOJeT3wg3b%Bq-n?u>jijHx;e|^>W?Lqwox|znX0r-?RC!|pADb`ou zJJe7sVI5>^3nOm(NH@qO0&Jup@;y zp#B22QFhUM;gA`39Xo7^v-ukrPd=@Zch_J>yt3JCg{!}KNTHp&fL2NrJ7g z1+cuZ@Pr9Y$&sNR?t|oj3>wi&_VR`^g#wEJo-NUtJT0iu_Pnls2jj26{4RM<5CpwG zI-%{B_xT}Tg!cIodiwSj|9$L#41zAH3{zhL=Dg$5o4U8!Qja3*_5!{8tE*5hWZ zwf1G%x3hllO_^}F(j?e)srOeI)L&5ir^cy>ahBI5X#L6${(=dO7u7x!hWJXld6y*k zSh!~BlJK=VFZr5Hw4HqLX43_L9e7=H7pdnQm)qD$KNvC6POZ2AWjy8;`;a6mD^fEt z2;ESB!R6cqv6Y#?rpmR>M0Iql>?IGv@$?*nU`xKdXpGI8ER1Cyj}BR8DH*d4k9Ks2XDwcE<{c2Z@AYl+=Sg(SAc z+yD6s_D3B|_rxyU5P!kuQPW)kkG#C~7E_99=9vPFz41`5WAjp>Fzv%B#vIBvr+bL| z)4kVUP_pYU`0Y%rxRHQV3;gC5kGHmXl=xkya`Uj&Qv(GrBE|Dz0z98#0qm9$lIzD| zPSUak&{W5LTrj`gcAVo}eyTxFxU8fmuNnkvsK3C7SdHzNp*r{MWUig@bc11 zY>zvZbZB-uioPJzYB-60nmxiUKGw=Wz$RX z=Wnn|^1qF^RuO-K!f4`FZ6XnXT`hPf58#x^%wW1Rc7eD_DlNCoo!hgS{=Vz{`SQU& z2-VQ_3yve7pDUs4KFxh@Ld4W(YdWfhlG7?CvY1Oz(&<5%1l%Dj>lZ|Vq_JWb&1(h; zini}uM5T8&d~)8@!Wnj1(8sFhWDS52dHYlSPD12t&r6jkEchiV2TJ>t`7V*I0%3wa z9b)Hy{(=KhJ@t)h%%i6I9-Q^ajE=Wflrn!udqZ~qidjELM4ct9?Rb>3#M2iCSg;K0 zFDQee{A&Z)*ZmPXdXHO4-jCHa?8Wa}tdA~mx%9qMW^B$YLRU98xR`bX(87eDJeP-! z>(;ycupaXQztOhqD9*W=oq`?NT7%7nA_&k>e}Rwb1LgHmQnMESy5${(tsv5prBf)^ z0|KN)A85S_T5>)l&`;PC^2Od}NGi>MQ1bd$L(I;I-Tq|KdnYYOOsUB?9c5qpz`hy2 zdx_ouPyfG}1Kj(Vh63&AghTwFa4yld%q=_LN~oh^7>RdZL$viV!3mG5#HI{FR<>8@ zhK@}OoF6tc{YbD$H7x%5 z3-%Wb7`_=Od{BP@4$Xh!?*RUSEP-p&=ToMy!3WJx1li&V*jMU^;dV0+g!pUD896_C zxi5da`y-V9mA@bwNc9(r;UhPi5O_iYV|Tp+zxTDbviG1d8Fp$-4c^#0BS2IIe^WQv zikxiP{84OA74hkg7jT8S>UW~ zUL&0#RtZ`K6YP_dZ`~3#9Ry;`X3$j~GnzTiSNH-00xeDbvszTi%;Bu&Vey!Y(Yjsn zh6JD5xs?*TH8)m%RnzVctA81mL)hJU;0rX65f51CasZ$oCi1_e(?9}W?uCTGgBq}q z(T1-h4&Nt2GW_xb{tj?^#^xzt9DmLVi306LL(4_jy@_Hs>Zhan-Wc$=1H(SM+jRi@ z3vkXHHS5LWIlT#kEA=}@mze5-E{mWB6xU8Kz02sqv&?K@5&z1d6bzy&UqO}6@XHi+8<6!C;7a&aGpxj0Njz^~Ub(fzl1cxn($SrE zDHoJ-gZcUU6*vguP=7%U-xSldq*-NJ(y8pxlF}+2t);ukNhrfCSv(42ThfPJkLGok z!eY&k(!vE!xmh2g*!NuHo1BrHvO-U5DBRCqRY3(t-R?5c!xvOQb0Eg?S8K-Z-XBT=X@VXPjC3NflBUZK= z2UFSL3nvT>trHB$4G*vuBh+6|xsSg90Fv;qxRVf2@6ug+%$wtPaTMkHwvx8oJA+dM zSY-7UfrEfUb_H%A+)K2@*t<2)u5hwG7B(;Hw&p+z$X`QC<)di_wl-I_Mc?CeUi`w`E%M_IejibLw(nVqk&ANdW>|RL4NZDo;;!ElnUckFW~y9eePq zTmad28rxI7ao50)(qk1_{8Suq_qPAMe>@C|!Y%)^WDWF$po~VEUtLY;va=Z!mhGRDm~o4 z#@_2Mc=3nX9XRjO(!5I*cS0cM!~#kzH-AjkKy@0q2R+e#zrYR2sXqf^SI$jqL11OQYUhNJf!#yWATYL(~i z)ReP|Mw!Q7@?75t2+yM~di@NdAE>|Jm7YNka~$oB>BivnTBCbpAvvPHr(X(*-YvLg zO_fw*en|baGCi$EDlfj znXDCZSS|al(5QEQeNR^W(2a*U4T0xDw{rS8&HL~>^d@w(vf%G}(p9}95mi!jA$Nl< zCMo$%JJ4atqK+wQm{d`G1;CZ)@pEDnge!lj*zC8e}pM`Jw5kG4E zkWsrNxs)gs<-@-Xr>wO`&DY+Zbo8FJzqK|z(%(1JyT5++u3zx-Pg6LVkIs3mEItkU zYxz7?Kck?Jhnu>zaAlH|T-*$6zRzO9ZDS=RX7)W|p@X3#V43@=`dOB^^fQ;6DC!~w zI_z)I)BDpRKRr-WUpuBISq-q-Yh7VoXgQudZ=PixsH=YO_)b37#S>G_>DGC6c9RMa z`9S>z42_S?7roZ&2o-1S8z(C9c~~!L9vf8%{3I&oYV1rBen|Nw=B?o{aI~@2a`HHz zQtKlOTSQ%z4eh~>cWaxxn8pvb7^)_@9 z*|eK22iRnIufO2cA6g)I@A8^L)SZGv*uR!g(b&>Ojt4B8ItIfVgwmeS9!ROjBWoZA3V2KE== zs2{1&z@wlU%)^-~liVTmw`FbaO~jC>Uo>pRbXQY`$NHF=Otsvye>(;B7gYVJaWX7c zwYe&zkN$(dK%XxUKXvN^uFZoY%1c4I(eK~&mpCFT-cn*;SkdL}TmkeAB;Oqeb@j`D z@}k48cE&vv#q)=0di$v_MrM;PjouDIH`HHn3#TcYqe8db?r~5FV#7NVj$yoxHsT=BTz?~oR3)A*|E}!kSAW4^Ia)ogVhbQ+M3e9!Edp;v!I`&@H z(uIZchm3@em=V@mGYQEqf;ZCXAmE&k`{ytCLh?TnRgf83@uw9um8ezk-@-t`yYSKS|SNbZr8vdStT*@$ay z)x;o$&ctT(`)@0{rjnj{moqLj53;!-U?-NPD)^lJqZYcf6C(^Dsoz_6vYV~b$sX^N z*|cJA2yrtBiS3JtL&+TJ71xJsSKEFiTd7PiZ zRuc^YUHOo`qYkd9Yj(m~T5Ty2wXe*aOV`OS1kOF?OU)w~&=(TReGS}(U3wGSU$!NE zt)Rs3K{d|xA`;gLvIv)TJ^E9vPhGnH`3ruJ^OqDb*2~|pHalvpV=j%}HYk6pFq4hd z*?u@_NqvGUh1I z^$W0X;u!HQOI#c1ArBS@;6xptUtfPO5dkMXmMJKXjp+smR}1$?e~hZ}mc}3yE_wcV z{9gOxyivl&bhB_AubrKE2-Q%3!P`>iiiu23Df&nPHL4`_90~IU%;*#h`&XQG;*U6P z@g0)tkT;SK-@H_=Si*cV&2_&+Er8TbS-o|31VuBrO8#*yE^rxHki27w0mWZceWe0; z%2+8Z3Wr?zqFjwVH0^#_Mb|%n!54Oae+El<>)9hIiiYm1QZ?VNzSq)l53&808^4VU)8HY24pt?? zB9b9TRD5E539Hp+@$t>HHNj3X6%2Ch2oEuQ)P3y(`)1guZ~c1={&!NqK(9c72Kx)# z{}awHygM;3Bb<2uZj<-QT%9pJF+PO^5zS@1QA)|lQ+F@KfqT~3lIG%y%GF_!0Ezq4 zb}Cdvg>1w7=_qDcLT;cc8|?kp--GxIm=J@0W%~N!FEG8?6~>E`sqIjhS&|+I8pt(H zb1Jx(cQw;xW7P`w=FlPG3zGgP{5>8< ztT%X`rvrQmG39Q9^gV=bY$Yz1Ivp=$=F#CtZm<7zBL0KKi>kG~H7x}gj$e#OgJU5n zzxzP$px8}x+`S$2T``yxyJ_IKZ-WrviyuDV6i6wcA0oe8P{4i(ecfH*YXmp|88mc) zMo>VH-#rHWNYGDl;0J-H2^@Fh9l@}#?RFg?{({J(X1$R8W;Thzy}8^6uA$irb~9%( zi5O(-H->Pf3rox~*Qxer9qKQrKb*hdLfzaAx5rN~8ahilJ9~UV?w??hI@BI2vzc_C zBfEu74^XQwQbe6FJxzM)sQ~T0u_VHb`o#{>(XhZ=ue7Voi@p%Vq5gt*snTgiAt}aH zZ%&=C3Gjtam9d-7e?70a=uTUIqng#>koEcL`HFe(6USqb#tqn%j|nqsnkX0rNL$-I z6VdXriJlVxT83Ic8gWWWxw%SwF-`XO?x)jMI@0=LQH7pAi|smQ_Rn9i%kiI%V8quy z*XIy_!PBE=+~Jz})Am%KDDXm{rHSja8>N*&40BcfEVOcbeG!>c@cT0k^%pek<1YZO z?G?c5!#UNsuB}pgM?iI+s^SVK&D6&?s34(bt{=!#*wJJpGE zNSrTP;D_FkFyB1oUm;eM&5*`qwZlWb2&(89s(cgB1F{8<3CGJ6SZg)`-RUZb&1l=k z16?FNd>4~uy0I83g|DXpGfHEX1f5e4fSB!9lHrf*Q%wg{Ol(jrVl;_U*{+%+?ya%+ z{M{Y*T>tqJ1Wo={D}yglzSQJ@z7vxA!+k$zhmJe{x-t1V^M8o}zCE)vH!%_A;@usrF&i*v-^%pe$VRi@3yXg$8>%fb& zvA>?(38{FEnw3zHr7zqlZkI?sBvVy**ObH%WzIJ80^ifA8ArfjB7iXIa>g*Fd}fkb z!%pZJa=xoHov4*Bc0J321fe=WF)E+R-$zVQQKe}*zrJjQe=D-mT}tN>si?D!Rv0(r zmvg)Rf+6XA%_?1_cUBs}T_uP1MdFnHG!2Q1d>3RR3nnPHh!3fs$@iyp3RCLe5Ng&F zbk|Y3JE~b*JDs%rl!tV(E{7ry2MAl4G4}4nam&7qhipboPGQKg#z?YQOYQbVJgD-H z&;~&6FD|RVVix_{mnnr6O*d@hCDR$9M6S~>JRh1riDn2nO8x9EsI*rH6i3ATFDFQU ztrLo0NB<@kte+3x^iy=y`pIudg8}?dMzVTHI|vci-Hwa^^Z-HdXrIj+3l~N zz5arxKTY8jg;;U((wxEmYx$rla>n$XfzewC_3wLYnJ9kU_T}vtHmi1tV}1(Q_aA}l z86-wUc;CA4ym#%wj&+_BO&ae>7scteF4W|d)uzE$o&)TnDSeu7;a3)o7J6Iy4LgGM z#+5|!Htxw6VDvQIXorlT@A?a_?Zh=WJ1@>nF|qqkD^DbOjT*a^_E$4+g*DkIV)bnw zQa&ii?>gS6%4n%$rzg&F^_q@zJw&<2x|S5$m8GbxR`U{wcn071?ip8pDCP7d>U=zP zp*L%!&BccMLb8nb=)55wfW1HmX+fmR1)rVRnsOA3g&M`%bea84oUzQBlb;Pc4Jmq0AP-JPqN5xJcrNzyKVCbe&xqX9WO;58Z)$_KM}wZs>F|HdoX>zFQfxq=vup0pS|f;L}hq$ZHGK zU%>2lso9Rz>#>i#6auw!*88G{%)DzAdCakPD9iat5`)nZEopc#iI%k z^+)7HsTmVJeH|}JhQ#`sBP<6E*&a@5|SjZ$Dd+|zD7iXm%1(eX>b@469)f$B})SmoZD%@9tFGJ{t`U_wbNxKa_8MEZg zI=0SXa!gV=>JliDn%nxX2=t!|U?4dp-L`RQ+YVN#5AHXfQgFJA;WE|yE|!0Ok>z=5 ztUbt7aTDkW!AqOym{U%@(Gy@9(lcV_F*@f9de;{;@3Y+Lx{UVEU$DPC!E|r#(hcz! zbRRX{B(siEfVg4A+mJ0UNvti9v67!=F!H3P`TjnN;yhjEfLV=ib7(lVB!vjY4xHozV20Vp8Kgw zL19wDJFy3|PVGGqtfBsbCk`KtisQ}|J~%cUXc7XbA&Lpk!fTQyxn0H@AILO6?D_?6 zBa144W>FE67!qxv+yLS0%w|#C@dRtr0dbUkkCev%)^i!7SWGmO7b2G9jl>rYL`H0bxGgf&vh$}@R zPs4IHUU`44_xcOk{6@aqnAqJVb?Fv;(D--%SeGeTwT;vB7SoW$-1?I)SIY?J4W3*r20uF zprVEK!6e=JwQ_p?Rh06Hwdp>h3$E))%`NR|np~!jX8-vM4n+0#H>&53n(7ee+q?xE z$S6DpFb3ew4W{V3spYo&2~PS@^tQYWl}X;8YN)^9H56rXrNMK#nbodK=WF5`j9yx- zb66^T^%nrZG(inH@g_ z_6myOaBt2bdmkj0l^O4zE-qGq%wApxo$tgUDW%aw(O@)R%Q1;#e%-ULePG`V`}D2% z$Kn6~9N^x^4ispxzhLb@;atwG*IyNn(&|AN1|Zq;^KZ3OT`%nF$s-p>5p=rw!WP`K z&K+f=q(&sMnF_KrC_-{{LaZ(iNjxr_Red@E7iIj#{(FK$`~}R2L8cC#v4u`$!=ur1 z7taozcGVhb#{c3k@HfSBN);c_8cL}fW<*|T_ru>NuDQJ9_mHP;mBoWo37br?x?7axNN@v*iGupI+Q(4_ z9?hDC5WsADQ-Cl&(^AMhoT(PJ&Ap(Kt^ws{MF*wjwd+O;ccGK7!w`SL<}I!nyR%$r zNi7f25L51J-fxb_nDf!l0U1BO9PyO!u)91q^Q2Cmw`pOZSXAX&e`iym%IcE?l80ZP z?&hJ!?kP6`>U=O5j$L_g@*yU83a6HMDFojvNnf2%L1@)J5tzKy&K|cpL^pu! zp@DUIM1Uxx7Juiqin+>0^Gf_Er!%;wrTD@F=yL!(_YVG0K#yM!AP^+@d3;YD5{7(! z8jtuI0fMG~J$xP7aD@43XyNBULpV$rM~biaLIR*)8VkT*2>QSu^Mc(2;AeoJi1~BL z(4rs(LO$SR3b4<>2@ZC*>j3r_;G!Kh>nO?{8bSqrQ+FFvIUi=voo4VH3NH5hK==uS z0bfs>?Y=+jP=7%?n00b_N5=%gFRbq&T^$U4jH1RVOiXoDxvz-Nfj&`AG88Ymye?=6 zFgR^{O>aie$P_Za=(6wmT( znRnFn7ykJRASnO&14bO~bA1l>7vSO^HRFPt#;ohni)EB4g(^M;A8j8*C*wFFaXT)% zB9cclhaaGO4D}au?CURZND#BcWnqe|6L~8>6x2udBvFzr%cl>xwd!L}0(#I57~dwk zfiX0R9LsWIwjbs7gLkJ2Y-y>8t|`KqDQoDvUjXC0>n|8%X40Ctfk{#&fg4HGX=^oy zC`h`Ahr@rx`UF`bp2)L9;=FUwS(M#jfC|yl%M-pz2_C1#K%rJ9HR|ztp{YiYB`@Fs z2$af7>STfniql8R1m)1Rv`Mq&HX?0NI$6sVFz^8y!=jc3ux+A{jj24qt3snjr{?O$W>^%r#hVRi@3yHz|= zfT6q!x<~JZ(uP8ieG`Eor@%@@y%aXXbN<#M-OW=&8v?-h{{_Da0~h#DDEbN=M|YUu zPQFAbW~W?cWo7t4wIsm!q$dOJ3V4Bc>zj$DG{oXj2}Up5U~^n6h*J{6_z)ZKIEj&l z727s=0XD%FfwnXAwUn~-JW}E(>1ew8bIF)&syf`x9Yc`cg!~v|*I!`jO>?tg$X{1l z8zZ47ZZrAOi2&4g(%voBHw%G+*;M$4)DOK+GF5N)3hzW@po=Uu%kg;!Gn4agLrxY; zTWDxou7?B6ZcA>V6KRpu#wwLwq`s!y3$Q8d#jYQ(Sg*P0Jv!kGls;bWXRzLK@Dopp zgg3c>)oj2LK6$l7&c)`#RWq6^virPSI{xZ^_P24n-I6`%Q~Wmje@pmJ`uSy7=f~%L z?RbIpgYZp1r;l1c){|Z4IZ88{G?`ta8(hfI?)>L4*L0We)URHrq^tATo*aFCtWkIhYu&IqP`}}V_4$MvM2ZA*_JZpq(h6iTLQ~X^z(;+r ze!nf07N3vLpxVk9LD z2uX^6oC`vyPl2oeyz4Kxwc0PBf3vpZrAJv?rn;IxEVW8`iU`$mk#q5MD9`9&cMmM0 z!(yqdMTqnjV%%i%W&0Gat9VY8_W1(kohxMPeVXuqesBZj+jDQtI|ytG*d7CcNiG1y4iUj{K?9~;)|B;1wHSCoDv z?5k;aZ}e}jLD2YL$Y(dDhcjTeaz>MxKcU!djv7$lc^1+Aw*W1>4K2RP!?1oV)^XyZ}Wti7Wd7&UU$+GblJ-UFS-V{#E+pI2XekZ6we6Lta~;{sKb& z_*XcqQJjV6C`<0(OS%tB8)IvXO37_!M20-Q1^4!l?UcR5Kf_-U z6{u1LB3*iVrv}E~ci;l2#*76Actde}HyF;_Us#eBeU5HR{efjJ;KvDaS z2cauBWh2v+KLUCV$?-L>>&9={^DdS=nfD2ONBrO0DT?323xX#7LTJCg<=`oP2WWeK zD9!PQtKVOMg@&{%G_bz_m-k481|9{)%RzZhCD{?!x#BGKp3(vHrGG2$j!~f*3r}O& z9Wo7}{Z$6_7xet8ak6xnsjEldeEFTfAQ~X1&|WxyOibyOHlE9)TeqqupNuQT)!=>% zDo943m#AI=1e7HsjL-ud@1_os-#$^o#}-lT<2XYe?miWrcNf9m3_>^5UvN4a^hzTB z+8t>Hn7NMyWXD_!&5g`D%VR&m1hCk2?`>35c8N6fIa2|ljA*m-%$(W&2xqxZrYAX|L!_Pw=#{(|3A{UrrV zH}WpsV1EJbm7}IR*iUb#A>uUaDMQa9q{&mvf+^fF`BdnEa*d0tcDyhW`_sMGU(mbj zFQ_RD$G^4(XA`(2%xPHDYo}XXJPiG%cPXM(J$3q5gt`V+H2X0Z3dz zUY*B%L#M%e$(X!Er%lP$nKQf=# z1-d}GS%E?zvbVg$HkFy9IafK$nd#OrXVwDoorf_`Y%iAq_d-^T|M?5P!2A0%7;BVo zSl>8mtYIcqT@wWS3YlGRePTh0-S9@t?XkDs7GX}*-rzFSxV}Hud;JA{&`DX`F?GR- zi^qBUm!%MiR$4MS5WSy4SLKMitTJ;3@vdNE{a`>@P%R+*j_f{_Dk^+UBiU&QX=i(Y zNlL1&q-~@$S$c2)IA$YajqiR&lxB9}vZ9<_KPU1fNM#N67YI1ko2mxil%i3L4Y{Dy zwPYQ09%*G76)S>FK0iz7{<%X^ooN?KA^a#M4wVXHh>^Zb-vi6l;#Ki^#GPwMZ&sOR zn*m!&6&}`Lo;E6RDvVEf*L3pVU!J#V>oYrJa%Z&M#p>Vw2)~EW+B9 zT+DHJYj>23b&a(?l1gV_;fseA6iL@J4EqTH!DGTc9|W3FUN`K3V0@oG+Md2zyxhtV z;RU)ak4CB500A26FOaRf@wlc|p&;M2gvHYMUPKIPOs8g9fwW{$pjYJhyX%Jp`V=SJ zy*D1bZ;Oj(y_%6v+@i%Mwt_D5b2oLzk0 z&PF({F2N-iRz{I{tMbIu^h3f2mvU_p#VsP!5oc0>dk%iSnX&NhU`Dqhn{N&($+b3B zz~H(RQ@X^~4fWgbbswt;V%qplzF`VMC=5GBr4N^9@AuDNu)k`+@S#KD+w~V1{U`qR zQ$LtLaT%S4hNo;N{2m}U`?Zq#sYO&o+#|c>aECu$%Usb*eqa7}_eU7`OMd}%Kfg-8 z|8hWmiAGZXX+D>du`%s6Hz9L#3H2xBywm)E3EInnI4+I{nJW4+O%E`N?;+2bat?MH z!sm}t%L_@UKx*1yh`->ywV5T(S>8))2s6i|uOJx>q3cWHq+-hQ5RLyIdw&5HRrkJe<8(-O z%g`w$5>lfe2olnr5+X>0pnwAk0)hepiXa?11qlHqEK-n=ZlsZvlIEQm#NeyqgZ$Qi z{omJlo^{SXd*5?rTzsy3@3Z$c=afwI&0nJ$`;uvQR(3$&{Ur6rwH_1*44QBA2zyHK zU`%KP2o?r^?U~Eit^TqH?H+$U(T=HMxBBbe4_bmY;Uj>*Rv|Dzi%372;ruQH7X0JC z09FIDDZpM3_;%+20`nJO?+hJa{sPy-X1&W6aBvz1ORg#o8v^yY^bRd2fo!_Un!5Yb zRlZ<*q5WBh`wRLG<}c8r;Vrt(*KOJQ?ERo->e-f8wOa3F_#$u|2JO1gZcK;+lonpZ zw0Db7f9y(Wj8Ae72J1_#2?Yb6E6=(_3;W~a!4QZ03+(v~w_ZON@}aM^lc1QF*ZDN* z#4hzd%R-jLLbypNgzJEaD<+vfBC``y?s~bUa-V!|u7Pe*zPc6jy_%SqQg9Os1t9O) zn@7yV9U<6<&Tnw9bZex&q1`q){+`ge&!3ovpQ--y7wpjdyDb!PoG+Fw*{2_rQ z=1FTd>Z!z{L9#_Jf9x3@oQ_N|1HdZzju%0j`}QLLHC~5+6hjH;`UIzf>yYL#5|&M~ zm!wdfcl-raVj4bjYVsqN8qW(_wll6x0aL!TOAXp+>FiUWi<}3&zdHp^M!ep~DN z_G6(ZKX=qv$!*M(1QRLG=8|<@QMCZfgK2ImGSy_sy*)BacD8SUdd1VRf>RK6%r4ul zF0Z2vz(?_z6r|>iek$J)ll!rtz|CF%gaae95%sW8)a5Z-wcm}6p-J&eZu=b#-?f3^ zxxj>` z1-O}$N8vHXS3H&uB@(-uTnho#Z!1;am`1;)++Ir>d6!{pqOyX#*1B*+;)IuZA=Y>d zaP)NI8)SB^>-sYBR5_*v)M6jcRO<~Asgjh7CN`YUH-+g3{(OX1$=0VXS(?WpBi@Oj zvZ-FoMI5Rm&(cr8D=QbBKc|v#!0y0uKtG31zIxlkBfpudsyX2`m@|cdYH>;U*w$<% z#Z@^#i}u7RVO5O=Yerr_Gc+_xu5ig_T>STOqX{l2H7P1@1ITGNj2vXs+)y}f1xHKX z(R1dkIvV_r24duWdbI|j;{fY6KS2HC0T_Px6aD-cw(rqTMl%Qqf2E(FOA`7;AkhE4 zk?_9hC*rX6^Kgh{bgCuQz){iBKKSDq#G=NfMnAo`@fPQ!dUJzI(f8NSUVp)d-!0)N zyvL|>@QV2RA759Nr7AF16wLhssTpYcp3gg}~a0RGYfSGHMd42b7OZu9m8z zNAfobXZ)`F*`aG+*x>>2 z@je({`~&&?x9(`?8~l}gc24ob7PNZ=l@IY?JnK0J1hT1{<;UsAmL+IS?JU6LRD?g zctJ+O__jFueph`e#M&9zxW%FW&-?|5z+=tpxYlP^u!P>>Vu<#*#A-`#ml$;TE*#h?$#5~TH9XLr^ zQ&k(pl@!hA)2@nsKGCEt?f9LCF^*XH>IW3srq3xEPaMi|_aNV-lSJ5*VsCFfaRafZ zi5caDr05TM1LRgz_aBs)it-ty3rx`IN^~NyCi-L0`Gw-Kl}jAecLhrCJF_T>1Y^+7 zK4nBNx#pKSuct-NLS0d?vCe<%jxhCM2@Osy7{2``G|8$(KQe()Ed-{_65?R%F)Q5tKFSB{zSQNpS0%J}scPAYxpR5%_z0ggz2 z$ghfeQP)`hDlfu{Io?J;lfewfMDVDu&+8Bcj{+FoaDTzf8cAOI^|B9SDYuOi%$A34 zUk(@~_AcS-Vmx}mLoz$yfOJdi2z=zJjbmso3=!0*d5J?9ettshMR{8uX>;(|a*PcC zYvmR;HF2qEW??KrJYwwe+b21QS^RBDrurhQvh*8O{`m{`ryNQ*;SSv}e?k6X(|!Ne zImELk5<`9SKWenbd*_`7JqgFQRKGJfuK@3_iDx6gt(HN!JSW(kBEAjp7(rT&Ha*+{h+^PVP-EW#&nFu zmLw_vREO%t)uHUs9TuG4bQUS7YE3#u9vBk63oHn8FwJU7%J~nzbY1n_j-Vp;Mf&&# z<=&MujPHRmA@d$2^+q2HP+JFs-Egcx_*>R%O1IDAY*-e*?yC^duPLSNxTuW7WZi=Nyc>%IPh5%{9)bgHRt za+sg`P~JOjt73HR%i5L5aDM>{o15&p3X4Uw+eZ-2l)GP5&^5k+Ebx{aOMl%y&**6` zU_&kxGfR5joiADExS2=D%WA8{uuaT6XYN<`ze{@jE)zyI++U#4?!~8Q)s@G6>T3UL z9f!QBp;ySwJcdQj8Yj(Dba|KuOx9OT@exv4Hz>I32-0~;)t;Kqw0_7diO_QhveBuH zn56AiiKR1AV{mCIf5>0gE zwa&z6&ECR#Vr`^}&dS`9c!8@kde~(4LV2TrO;G|#Ss2AriR~Up2-?;I6b?e~$%<3L zWv!a4`l8PoVL-$E1(ELbg;y-JqZDXkJ?I`s@#lOH*NyY{ZRs1h%*y3Ie9((nk0WoY z28>%Dp!DZgOO>}Jzv)`C&5L}Nd=I$EM;IV@ZD0Gqz5)NzG;9ut1P(ORU-14v;Y{$z zx&aFdMe6wFvM$Rzt0|O%=k(*4>{FK>Hs;gSl-+}ltnc&4;%o2HiN(W+T;R z{UI)qo*ysChe~ng(f+@wh4~A(QNw;l@~*!?mR^*F(Y+Xv^DIvL&HEo+XmN%-GeU7m zgDu+elPm8hA26HVZ-2ubG0uQa)yX9~=D%I|nNrgOMV*mf-GWXRG*Qt4EU8(JWAOS6 zczc6MafXsh{9}&u#ub1vCCUh zy2)>xzd3i-b(vb8{9F#`@n|D@bZ-ydAKXtHe*c28zw{R*>l;5+QN5v?qUKLZ4A#@M zB3Ua)pKdrcYWx_p_YLDifLf~!P!%zHFMZky#rZ`zzSm#qX!Vz`w^M-$g1uqq z)Zd-!OAKtc+XjpKpV!_-Xr?9G89KoH1*3<}I=wd5$cb@RjFK1A6p^SB>?Bc&uRC~V z*ltXA>Th*hug()Y=UlG_LlRag zgA#C>=oRG%{gZ?!5aD1-4><%^l*ZbvVoGW77>>8T7=_0_f5HC{{y!B^#L2(pbC|zi z?ywoxBX*YRd3sbDLBY({S1i!=Wsoc48o}v_>Z1(Q9}8Eh_h%gLFPPZZUqD}#d-}sH zYAE@nT?0Cn>@s$`Ex=cN+m~-??h)Gx_6a~#evl1^*=_vUGrRL?AN_bY`xkt#BB{Kr zt(ip_X2sQm;=JQ8_*g%p(`37~vT)j@mq9tq)%)oi77cZ;&8+$hTPLbfteJYM; z`StdLDY{}NvdEx{YGrz!o;=S!(V|4lQ_dNPz`9|cKX=i)OHl($MToAK0^P=MxU+wur{XqT*O)xy_d;LHwK=8`Fn{UIUy)C~w z{!u@_irT3M{l59fR;Yd`zv_ns<*@ZrX77CNJlnG`nJz=op|kzsSZ5y1&wftHao0>htvv%`G9nSxO4hZEQJ{v zfXt$x$KF?OSOqr;V!8c+wAPg>I|C*kxW9l;;&rjh@~2>dn6~SR(?$&959`kq6kZKb zQ;t2;zus_|7K@PIX5^(+F$K{VHTtqJh(rRFmFFh2hZR!$ zJMz8zy0z&A`7Tvu*7ZjV$X&5f!A~v$IuvL{QH~^zjFg)hDDs|`Xl;4@R{FhOp`}P= z>2M80>L1GIN8r94eeI7xusSeo^W88Xeh7kZ|99=ou}f1DW|X(&FfkuWE_Gp@%I&x3iou&bv1V|2 zK#kxcjAgtMIDt5U=Hv&(SAOI3<`+B=_w%%k?|L zeYV9;gNwG00}v6i8R;231)mC0<6TUNGzYBhtGq~DEe2*hNx3QS-M%1gwy;1McDG*I ztL8bgAoeyaX~X>m(yjHFr^B3ag96WB)F-3I;b;1}Uv!bi;4i)&rj%C;91v^uRn3#> z-FM#*zg2%lxT#-0+^IHTGIHFs*n~rAnPUT%dz$@XFxfwO@GxnR-@8FSA?3P z#x)r0e4fi-)W7=|{2J%o7AV$7zhTXJ*jNwXmq%CO=P-r}Eq8atI<>M9bNXMSa%IPh+27y4fR>wg!|LQTm2<04dFQq2p2{HYwR#oCkxb0SsLqk0DgaGs z$)6pm%D1+YkHOb;SOiUd+?|8?UbNi;6EEFh5Fw0exW6Ds;K3YuZ?$lu@cjg%JbPWp z^2+*W3;|Me#Hn&d;hcj$L|Ni9Gp)UjjoCb|l~4$2^y7fX&uwmqk9IaiwUo(C=6nQD z$JkVRQtQq0!-=a!r376pcqW$LA(IN6;(a4Pc($_;Fy(w-(jCt_n-^)dB<#h6BY?pg zPG5wsRzCF7r$!WF`>UbI?w|bs1A8ddwBM)}IBcp<@w_A!IHx9B7LSnX{T7#N4BZLg z^iZzKoZI3R#b6tU{i%lg3+8@fmHFx~Fu5|I&tNMvu^KVX_3(vD}7jJ~HYI7wnjN0G6x%#9;ONZ{zi zs9ZZ#L8$Gz@7QkNfPX;4`~@I5&=7AD@PERY(JrrU#S5YHhs;{EfsU-tTR)| zKYP;0uD?LaJg%soB>tJhGckUH(}N1u>ccTMR>C;x1Oz0G?tb?U2p`KOTQm!=+gLGY z`J!+Z;yBeM*)0p>KLkPKo=Dz)Xz2x9jem5(Naw=@XF^1oV8i@V?Yzu|I|wB17Tu<6 zK^Q0K|M?5{?+&2w(ZS(^`wK|q{u6(H;4gT*9yhcd>H0Q&lTQ;Dvo`i~TlFnDKx!a8 zJ^l)@B=`@Wd*p#yik=fb{;LZuQaNGybIt9 zUpMl@I;I+mZe(m|c{jL4;f2+3@)OCT)9=rWqY^BZvUU>Bw2xry|d4z#I9OW2lPORqPL<9&t0Jglea}wCY3wFxz zsgf`ACGa5Vn?O7Bjqs@uFud~j8|WjDA<*d)SSSkUTPWmP@k^L z3>~2U0ur^uW}REb6Y4g}x`BL7wB5f&d7+Sqs^e1xp>+Wsx>^ItM#BC>2e`jr{viH> zSU?@q>T-Fa;foNviC4Ftm7JQF^wuK2UD$!vuBwe5m7E7iJwc!J$I3g-e1aIA*? zva-i83sI&-1n~4ayW%K13~{)>;6-Jyu5jF@-F%0BDelXg8&S{sd##FMls=B4_VGI; zUOFJ+ffAYMH&2|Zq*2MAB-&aEf@oO2((e#Zohccfu-7HG0A|z|5U`)!X`y+&Qe}vL z6P1XWt~yDvx27k);CP}_%jbXof?be*y@w*s@Fkx^{RJcjhs}5j_dD#+5S|k!_zBFRt=!QkNQH6x7X~KI zj`cG5jwwG1up1@Q`U1&?daNi>E;4kF|DgU-wms}YmlWxkndFG`(F_yRIVOZ%w& zGs%kSMXsg`W|Ag#z-`ZlBr_YL(enkQLun)(H|0KzI)LO)AeI!|dS^kq-VcP7weTRu zNCem2oHfwbSY$Ci_1*@>k#T8gUGxHV<{4pl#)c-!U%Vi{sQ-C27+wuew$Qx$B^85y z9{Znb|1amA8Fw-^)L%ey@vv!TGaOIqeIxzCz}CE5(chRXNOPEiBs*;7QA!JL{k6Mx z`_sJFU$FR_)g7e33$B80F}S^Gf4#cHb6ocoUzv`f$fD`MOyfyWr%#aDNHldWa!+}b zImRLsM4;%^X21%M!deUf7;MD#pjhJ$A-_M1bG%uTiaC=y(+D``CukG1%#Cr@tG(eg zz(rw&*ocv^LdtQgkh`MM-v9-+!BdL?R&;;2JuNG^(l?YKHri-<*+nuFx zSab29>o#p0jZRUw@5&~7XrHb|W#(of0v(+%n7Z&p#1_5po~Q}1VptXO*66-^`-ik9 z|GA2n$EwXG7$^fS(r`9s;@7fh0yiZkrQNZa3J^;A#u9|F#5$|Dt-8lknCgnf(xhZqMp->BeVR$-QD;43zmMjgmbK{%Sz}-2iafChnP*- zSY%bK@aXf1;Xl8H#dyw zGa7>dcNgu;!vWfP0GsMrlV?&9p;tT;7ghaVz4gKBCshom7AxZC(-C%qM8M<&_ZP4U z6Q$d>xhfvZSi9Ce7gSQ8d1cDUmgD30&~-iU$+ocrj;54{FVvh%>z}StLKF$_jE`0S z1YX?m*M1>5;-oZd`rtNj_FfUfg+>pJa{4SY-$~*AlNT*h)XUMr17XLv$KOt{V)efA8EuWnaLE0;yFMa0D zXYTnT|LG7aALeiJaX(b~?Dz{RK8l6UaMHJ*S}qT$?C_K_@V2gAey>BtNx0CND3kC9 z_k-H&FIfIf4}=^T933tki_v8M*AhyQ+P*MVP+41QPlc9xR*KjSf7UI@#Vy>|G-EM4 zQmFaQ`~?tzXqt(%P_;=KR1ycA=4ObPl;NCf|F|4#swm;kQ5>TH95+0kW<#*nT1PyH ziaHe`dsBPV)TcgDwvL_2$(y|Q4eYW7?k^BEEsHgL34KEBYr+d4a@1QOt%6?vrCYQb zgho#HJUlE9C^V|){3K2;YYwXkjmE3%(aX$zOUTdPUmt19ja3v=o}&Y9&k~|GhCyyT z2OT>KxZlOjeDydpcJNaY)6@<`N#!083JtCxl>?L?@$MCZMP+PIoSz&~8&SzLszKKG zT9WM%mPZ(AqmqWv4fhvBm@!jOzC&JX=?cY>U{iM&(^=x7JRDoC>^P)WwR>%41kWFm92#9EJN zIgn{OSvw~@Z)gJl^B3$g{OdiGZq^;TVg7>P!={@atGD36dXgS{M7KlW(%bDUSp!St zcn{4nMdL_P{*E7!vSlRIx$l(#Iuu|1mlm@@)86{=c4u3Jx5A_#709lQ$&)v=y zGrV<3^nJ!)dbKB68jfq?wYK;l2fL#2Z~*C9kX2;RmK z%w21kxm#dZ!~F#_cOTppB~RgGFH4e+tk#Ir*?j(SjS{Reva4)~Sr2gI7D>ulm} zxepP&tkLJpX70HOnLNXfn|6|xls6>Ein5vn60yTEK%D`Xldp|jIK?H1Rj)H3{GsrZ zxY%jIFt%LAnt%R+U*o*n0>zr`8`cjG8*5rC5<6+!Y8UI|k&C4~t%%NJ)YVyMkfYY$ zk8`GlpwRA*^9~Kab!l{0Geyd8iI&+@PG&>!2 z(s#y2(ei}Ik@g@=br<3ZF&SUL;X0@I@WSU3`P22WDuUrbRd~h-*Ml9&O#7~WOf~U_ zKgzT@A7EqMLE3b9FT8{!Ww0*GR12UnMi@^xJkv^zBb1#)WuvFE{HUp?MasW^q(;DH zozCLl{R?*4{q-J7HTyTJ6Azo}W`@tBqMiec3Krq>joRo1jRHq)Zfk13Ygn~q?^FHY zwLjHxf593YWiz>CCBw4yo3HJroam^$-UdpgaD9C~LM-qSB~d5H8o3edcJKAsd;DBj zVlIZF@5{o=3F+rrOZqtg*U6*ezM-K$%(&$a`B)A(g*pAo2+>N$ayfVV%@RcJ!O}F` zUtoGvfj6Em5B%iV`b4@UZU%|K6}o&%db!s0MFpc51+oVOx?0ry##)VIpKMxwbyk-b z3oki=xKx%gQJkKhn;`4S;C<}_`v&YYw*EB+|L*zpbUUfII{=erO1fk|$EP!dSJccGIn8CUQ zyg_^Zfco<}qS64RVJUo^3aQWry@LII^OKdCG=XIX%pHAWag7v@j7hpX(VSw8I zNp5WJp6u<@lC=^B)rG#DC0e)iF+TSczFY-(EIj=ZQP{XgvrB7bv<%Ht>p91UxGkM{ zWsX;8+Ya!-GVK7&Uoc=f@`BLdO{vd@C8Z79y$b`pJjJE8IV~^8rk%t<&^cKRr-|#b!wg2raZY+>kq6e>CIe6#X#Ji*T#W9!I?NFdnI{{ z$M^ART%qcdOmByPG50eX-kObHRx4Z6vljQ2X#w^WJH^Ul_IKr<}{1c4Ba z1mgF<`v*bbzh#9^v4f!hecu9){W4VnZ9({-S$N2o=sgn%5cnHm8_?I%4+TWXj&tE# z<(>%<=n~lOhwXf0Xygx5DL)*1JI(iD;2)}C2T<6L?F=1Y{({$s&3fp0-@&83EDQ@z z)i2Uz>YVH0{QjgHa9%bl|8fQKiUE$?MX)N>o9wVkpxYv*IlpU@~uBD#x9pOb4wJPJb`?l16= zzn(oGlAw_Burcs8htSY~i9~6{2+yfc_b$zTJSK*CzW>V=gd8S@ibzn zE$%`Rqt6>TD^s&UUR!qH;?iXg`wAg*J@XTDn#EDZk4#L7#BMCb`i+h#2trKW{__|7 z57GZq0Y#kaOFoDB3+fJ=ab^=t+Y0U;%^)JB`kZ_d1}i({)_Hm#ftWYiJzxYNVVg=}5yZN`oOk9m zo=s@wt&TChGS2zOJ~3{e&jp@^-Hv0&UvOfh?5F_8{Wktr*DwkLnZ%Z?@#IuC>uBOd zXzcaMrr3KfAO#|dI896~ zPlo;V>Q3dwbksI$Zof4B90oNZTaim-^l1T)N6ZuzgIg4-TGwwOe zwhG`R_BeXQGT`NyyEmoc~ z#CQRz4FlRh4pi?tx?N{vu6m92yzCP9eTwzms*3+Z>GfI{JX@JxTwCrARv>#Q{rw>r z-uz$m1N#1iKOB7D059*OpZ|&f;|i#Lc)sap;IQ?h7agfTex9q|5Cxn3`Xt&#zg0aw z@=r0$@41^r>6FOs@2{WsqE^I}_qVVedLJ41iF+e%|85C~HpYiK&c^!LU&|*g>h81n zlP4KxkMkthH+q$m+=(mHd${nrG)W3Wv&35vF?Q%eN;1oIZfpy;DW!DSa}t@zdY=)% ze_{HmU2;@IDS*g)Vcg&W3F6Y!2!%UgMgnmEUC@}vO!wB@#8w5;xiFY|;P!&SMB8U8 zu1O}v$f0GG7i7&MZV(IeqGpn6s>}F4r8@DsDwD z3PIanKzr;3NC>~_fRF>#c&I{yPJ$NTmJK1j zCZVnq%;p<17D4aZ3`ZW4l2qV7m#}i2LTzn-mBH)rg8 zmu!&%vgr}cTkoGR1t#m`T}A&LHkMl-h26dXS4lPLWtz-EoL z{oS4Rrf}Z-I8vT4y5aVMyn^fz?#slzQYo~wpM0hx$igt0@c5*)*Jvn9sTf#<4@h?s z!{|2dS>*JfvWP%Fc3%Bwm&E;=3Q>Ld``xO#kvuv9vy0*7lJprhjTGXd#^R=^R=o&A z&zo^&W~l`-t<=La{@Dw@Lj21Dlm$8lzEPKX-(Le?!eYDBVhTCs z`O1f%H)hCu&Q|@QBQsFbww3d_kYPEykSM~`o|mGRFJ7*Hl4PtcS+|)i#iaM2z2KKL ze{Fzb&Hoi^QjEjKx`083SMv4Mt_OK?dK=GQSlRjFYOtveNdtm*!wlC*tM|uxkG%j1 z8BX=jpIQGB5g1gufh@dKCZzE6budy|;FAO;7NMq^TU_iweDq5-g%05O2UbE=5$Pn| zaP2AC+YX<5sGRE~kAl!XVCk`3677BkOV)6EL09Re2}8YO7oG5U5R3R|mvJ{r`9r9^)%aAQ_ww5Lh`vB(+#vOK z-B)`7061thju2czTYF8`-Wzjqwx6akk$NM}&dxpUC{e*BnGL|6G2-M9ukMS>>IF;J ztR7+4Ohoygh@pkV8(z(!$h_zW0~&5GC;_V*RI5B3z7!?ngkfF{?AMYUrlDz8L2ww+IP7ZQ%kb8nkA8U~l{pDu zBrkInBYo_G9gMLZRiUzd9Le!jp(6sSoVVvuRXf)?Sf(9-O}4iOyV&r>qf91q6*>7i zw~1@b2Mg*IdQ#p&s~@x)IdahZc_EEU-J?hjX%Xbo1he;FeDV;R&@*#hzoX*)xyWs8 zZfd~$sXkaRZ62Z$W=Hxu$RaF&a6ojMO35K5^1&L5Z1rs*Lq*`Gs7+N`(yMAs22o00 zdUxgRm1`JI-ls-~G)6Uc?smF6wZ01m7`ke|w+_1Uj{-*6sr_LN;>*GA%Kq;AK=6}6 zw-BTs<`2KLL*UK7+w6V_^b`mPFJYVCyudqcr*=aiu)@w6e)#k+?XWKkuZI<&unX-B z9ia9CQl7(ReP)(I{sFy#{0PCrwinWdOZiEsWt3H9Xv=i-F_4u7pYG2(%wB+m24$UA z3j#@!G(u1@LNEfU=o*2I5I{yCh_VqPVz7!4v>2yj%z}?^gsJ)UUPvSi$IH8oU$LK6 zI&K{06n0uqsgP$axSD573Xf8m{5&$i;FxL8nX#bhhg`fJ&X_X=pc=0)Kz}lk+;(E} zB+cnu7~*hyfuW=N%9Eh1(HX0W2^FsTzJYEPZ)sr_Br1bhs+A4vgZ|z-mX_|*S*haB zKCUl0HZ$&7O3o3G84$GEhkn8bNJcATfNsc&E}d)P1i?+~`*wrwSh==>DD>br&28!p z^p022UjMTfe1ZBmJ1FA9U-CKBUO;;Cuo>@YnO-DhufRhsym*n^bGA%FSoDHau;a-h zEW*c&Tf^J?GY+#CAffMLFNg)0d~U`OUlWYGHbjk<4GQix$d_ETdpJ?%8i)Fc>wzHwb&u6RmQb8`>;>-_Y9DhXI9EuMQubtx zmRA!|yfV#seAC>E$|?An0qa3;@7}tu(B!SFi+`2OAl1eq(EKv)NmmUQ&luojknQM2 zl@)+MP3MKReWc^~z|vN!83Xfm{bLyq@0<&J+n%R!hl=hr0G@c{apt68EJ(4@fAjRz z*p!kAZwrB{i!HCl4Ge>)Yx|6iAwS0e{JZKsDH8PaSZM8TlK*AE_YL6S?~e|%48{56 zPR54X3rM97n`YG6k_ZxwY)S2S!?@xjeFkDJPqb!<$FYuN&3JW11~TtY^B#Kv62@=m zpass7t9zS#l=$n_osG+zAMc(R&z4NfU?EQz;wTl*IRlDxy^xm}n|Pk}WJ)%- z75bGzehM5#RG5C?w=ei~;=27bb$3G`C9gU+Ap=4>3ZjvA*oU9}0zXsGIGSY=V+; z$`IRdTXUXIuH%>a1E{=1WZdlw#)xks^TW?YEDgQ2Qpr6>dbt`=v3ZK!+TdXN`31gU zc>R9*0YRhwXO$BE&fji+qaVm`-?(2RR6io$^rLv#`Z1HmW|k{y)W`Tl&5R@4gU6}9 za3+>Jm=U2)P&tc~K!1Pz?74jb66Wvh1)fzp52b3Gul}`so+VNYXgafM&ih-QzUhzm zY2!tizN3|aO0mzyfh&s-ZXg22+*fhYdfmt?g&v8ttdwYez~XDiNm2Au!s!xGbs@wA z%$w~6p4XbXXiYtoy%vXe<@UO3^4%jeUdIq|HSDx+HpAot_ZJid`z8(2E@xsPUMXUl zl&f$={}f8ltf0>_Z`UplTs`RBKQ`=9-z2@D(yITg9`xGCHS21RoI~sRdvV!1Os{5J zuq1%E)5*-^Xpc#H*<_h+OUWLK>kEEn+IRe!-(`h?%Hw>6z{G`;7Y;^tPkSp@+w~~I zF~gz;Dbe~-#T=;kZzLID@%$gl2Rdd4?T7Clwtp|5{W>o2AD$B)29=NKH~DBCs(g0* z1<58`iq~~dJc?&E4)=UsHh;WHVc8!@a6K*&ot_oc^J;$u?e!NN`ArXm9N6EubyDv6 zsiS`_p{uG^_T+YM=}Xca9%p$j487O;c;}==@ML%HO>^6E{EL6;FCb_w5FERveJiW) zbd?Lo^-`%Ny-ruq6KZM1r+3TIe9HmtdOc#N&)q#jc9(oFtQ|SVLI1hoNXAA%&HJqq08Z10uKjo8&$v!x! zh|{&$)p-P{I?{0^3R|^^r{$NE-Vw^QmmI;vifUH-jjCo?Vtyy_3G3&MR2>YuH?k~XlUFURm4P9DJF-Z42e*pk=6Br3@Vnqnn zn1p{Wzg?A~YLdHUWty@@oJzVLc6;IlfQ#s76Mvi$7p?70>%BKLOxI%*qoVvst*I_-{DsilAa(u4v>%hv5(_R9=3b0)C}ecB3C zv$t5`52}az3yy6lVc2W*`;X6CNNHn*N}TjEJvRIPK`$lE)8cw8G5-T%eH!T%g!P8sw@k8+)$}05TJZE3hAFj-u-xD^jRB%n1s{>B8gxF=lhfhh%??1^l38r^q z%&}B0UcGqZiV5GP*Ym;8Ujb<8PpGZ>>nGwul4{1CMi;;*(oa3PcFX6i#0EkkYU(5` zS;PGWBLUKsMY@Xg7|iSieO@M%u|qXBijSW$iC@clH>F9;e?Y2Rmk3tEVmo>~@E^34 zPwNz}61)4iFc@rMx=Qu=TTL?nPEwzxg)mmN*(tqs{c^aiPGzJdW66h>vR2t++!DqT z{^u{)W%t*6DAnL^R6_?3zYg7L@!JjfqbD^b7-j~bg0be~-A6kd@IC=v?2zs%Pq0Ud@2JQ@OH;K7n9 zg#}1rXgaTIm}`_U(MkBCBonWH3UCy`FE{P_6E;H}>Y6n65^NajX2OjkqbQ~I#^yZ28|Um>~dt)oI@HA*We2Qc2@Mx1* z`YzfOZq4T~O4GUSeQ}089^v>e{RI=NNCp7g8JqB~P&}~`fxMn0p|;1S=&&HD=Y_13 z9j1-(b1W+lm`yiSoV5_#l`uU0qjK^cjdd_TsCM43tW()MVh&mfx?BgaHqTe9fUmKq zmM>|p<6fs-M$*!rdFHxeH7%C3YO0?CrxU2`=8?^$*A zg08zF8Y6%xLC42y5ojD2rUZpxJ`X)%>T0#mm(ZVJjb}sCpb9>PLzk7Xm1c6qA;L{Y~A0|9t_20kt zP7!?H_CtC1EU=FUeQAO0d=m)LPZqKu@HZgthC*!t2s3BJ9)eg2mc1mWwp+H%e+!;B*>;*B0jk*xFS5o3OVF*#If-{d%9P+)$2VW56mu4c?*$3D+rG5&&6T!OOoUOk7LPB)0uIlRy+L9v zNO`J-{vFa&4b~dffwIz3KJ>>eu0M~15e|136er=kqb3oKamwav=N_?n_o`uv-q6=( z?o3UBrP!P{>H!HSR!(hscdFxRj=1u1-=d7AjJ_wkkuYCoV zZP4gPb4Hl-=g-b)8FEqa03qP+I_c$tBrq4T!Thwws=b-d>Xy%$G})sd|?O(8ZcT5^j) z7twB3O*WtEenWSDlrE&;gNFMvpmaDzDna zWf7R;F+|qwox`KZJl$Hi`ueu0_p7W5YC7-;(J3Pz-7cejM#hkz{P}-OklzP@;Rk!# zLElIGVHuiPzh>Fpw*>DN4to#9`OHqjhPexJ51VEO8In)&x3O-C&0oVnr2ll8H(=s& zO7r7aZ;zr)o2`(JvcLCvV)^?BrH#zc+vb#^E{Po%n@s!y51$;!l28LPtA~b(u zqvIRald*w#N6cpLy(52Jg$O*$;CgRK#BJ4}eQ*4>&av`Yb-S1OS_^Ce+#^r9*Bs^m z3)zJNx1oG_bP7uwgl#Kh2LWDD%!{S$n#Bo)>G( z%g>SA62ADP(P7(A)2dhR0Q&hgzhHRNkN!yr=-2W8)3Qf;KeYc}>PPyUeo78oKM8B1 zRH6L6p12y9uiw^8HVix`+z!O^+jz5Y@+bSSL-yCtUUvb}?-p?GjXrg6-|&$6YxxYU zQ7T4UMO;jkOSad=yi!L%Bl_}&ygtg@rw&gX4N7}N;OKJNEkaT-wX}22z-H)LtoQ?! z?7@O=yMimJXVbPu!~wA1(1454w4mtcS9Nv4Xd7r~4*IcybK_`LrjX;992_wDz}*EV z7c+WBi$!Mesbg;XnlGquF0#GNcQ9m#ziRMI)8PHq0p;`HD36*(Xl@OGI}%mGrUOET z*(Frc%zE^CbqmUMmK((%;Dk@@-811IT+0pkZHLVEBHI20&jL zf6}9TZTXYJ|D}9nzR9QZQ024ZE?B(tfo1KOh@9o@;wS2Ap$7hZ-QIy$%OrkHQ-4L>UhO4 zYM;7hHQ~ca2o{rj;OHY8pY-xb%iL4P0{sE~*(TE(^s>fhhM##)D$UoNv)YFC@p2)` zDIJIeI^ummJj&_R-1;=t0*Mm~cE{NAG16smsEbjj{XdZspwLSxeG?k^XWszwv&vuA zf#EG*g|<@w{WAQ&H5`J_@PGP=&~e_`9ihS81&xO)H0T^CHj1l5r`GLxF8`07uhGZL zZu&O9NO5kG`6!#K^FSuAa(|V<-327S>zt@Q(qA7mW&y#J^<~y67RaDtXM8)uvS7_p z{;ZAo^7xUG$X42N?ff-x<%O{OWjVl!C*x)nqpumQ)?KXnIHMk}G_5&$Zgm-AN1KJM z8EXo|=!Uxs@YsJmjFC!<()1~J|NvL;Ms(a4%TGx z$x_YB$A@ompM^NiJZoxP^SI>F6*P1W2q!pl|K!t@7Ed-j_T}3*w0g`W(mZ*(D?E5Q z)_C7LF8~OJxjB+T4YmT~y}p#?{Sa0kE9{nNkv{}(Eubj$A0 z4RaTC9X8!Q`nOEXK|X;uG6TxdTj^fi=n9aGPzvpQ#;WSSdgL?r%Tod zAHWdCmn+R8Xc_3hbM@vOv(GW?Syiqx^yf|0MWWlv?HFNL!`%g2iQrj=KC>A5^FdkN z3JRT+x_OUQo9Be%<@@fgnXwEX5NjT`z-ae=JV%7Wi5S7!ct+c?DAer>ZSVRu=01QX z*o6R(45gFJw0UZ+adc82dX2LTLQY>>GQgWj-CjGvJVqh%&t0&;Friq>eZzYAu(7sB z?0R|OLp^nlE5R{`rlO3}2oemNWr1ZgeKRh*`E=9$vEJ)0AcHT-{_H;7P^uvS+GTm? zsx>z`y1NRA)LRTsv-~zMHl7&b$*g1UALDOM08rh6D^_~7y)&*LI_OR%X7enaM-wL^ zIMah-*`qPa&JCj)?k>>nV$L=&YD_DwtC7<_O?7{+gR`17j?7d9i8`@6UGku}^X50L zSTm>xvHH&tRC37a4EI+1vfA=-_^zqkU*>Vfx&lxLAb7rM;;Heq&GJxeQgb_MI`{6b zBS~wAxD5>&ZmG^ccflV~eeN68Glxy}`O5(bt<~qFl<%Qp)&wpuJ@4_2@FU#1n%m-X z?4HWvA3Vth?k*sQqf9((BsnUEE7~)!ib}EAkbMF*F!!swAQo`Zr18Np9?R*MOIKra zYxHE+ul0Go+&{1BcAVknNop{l+lH@yiIujkNZ@qko4PrEE!#S-s}0SBUWC))M^W1c zU_is&1*J3(PTdk~j5noNjk}R@!>+H_+cuQFZ`(C{7S)UPuKfYiv{d6}UWn_X$5cr{ zZe`R)0VD%OhS(Yj+2oYfEjiUm#`~HF_6_(q`hfkWRvr#C)LpRhpKyM}r-N}H+hiJ; zOuBiUBD*KUcsY)%Hei8Ug)W9rOQ;JvvJU@9hv8*;3E!l*Ya#cE_tWwh;Bf{zqV}T8 zDQuBB@AtnsJ>>;V*Ceqb=pJW8BtGe7RDvJkp?_OXCUM@i_Q+b&@w}Mbk zi0haoiNlV|S=rDQerMt2W3_`mk3OKhz+`JI&psv2!n6K-@O`^ia>vz#k{aV*nAD{1ie&^iZb8A7A%?6DAUV{-oIkc5*h6qNOz+ZFY z4+gT@l(rUkm`m-{0wned+kl=8HE04g!-- zkaV=wL@@r4G;7X$wP$S}sTDucYLZF^yuE;J$7Zvhp162e;$$al312 zn`U4EJ)dgIRc`bd%B&MtFW}gSz2G(w_(9pd^qhH@rJCc1tDM{`yHN9ShjHC5LY#&W z7b}59z><|}X^XH<*JXrFgd#3}=ud?-lz{rPi)we8gq2$Jh%ZVI{RRCC)6TM6;lu5F zKb=>Rz}i1=D6Xat;g!yFNEBAQdYoy4h(~s^*v0l<#-8wT#4NqR38&jB9b~S*DVkO6 zQlm3fgah1q4S(~pa;m5w?v?K4YbhTrHGRvhdR81kBYJ+!l@t2QU$6@I|K8#e*ZMM^ z)lAtK!_O6I3@abC0X z-6AO9EZ8M^45259AF)ZR7pRZ1pA#)Zbl=9s6+(kq(TSXT0U@#hIzo$lxe*1ix*Np# z0Jpf8Ix1K2l`2!rY{(=Ovhv8Krwg<9W4fM+`)5BG;5ybh-^qyEV!%=3drU-PQT}yp zVsD`L1!wJO?l(6)l!2UBy&-Y^w?&_m^Z&ZPi<{4M`M9lz-F>OqJDwiX#>)C@YGeGc zP4>Z%#Xk<=#BQp!Vf{n6u3kcsMk5y)bGBD8*V1V~wB(^DBviuxxvY&$>`=nGIS{psZCZ>=P zRQ0}a7Xa|0jLVZx%)fDw$%8cyH+R`;W}llJ&RdL97D5Mh$C(lILtMSU`2OXE!mvt) z;E=?|U6(F^_R0HINcfgf>4hUN{&|W)euMh?+{R-cHSsZNEQa}3j95Qsg2x1%_=)!h zcO4-uO}tK{z{1&)r=?NF81r}LjSO!}wxzm1U2$>YWA$fOQ^8i&*aCD0+8l?uZ9ehdO zJpEwSo&a7y+F$j9pYUJX{cVI@ZCcl>7}iT*0K_;K9<|i4e=uA;-~77s9~J12($dcv zZR}J}(@@qAxxaw>M^iYnG01Z<_iVI&T0YL~7J7c(bbcVL7;0{8Ik|M)o&OT#Hbc?C zNW!pk{}dS5<81rV6=9tv(KlAbG<=IS)A(+Fl?1fnv5Dg;rPFnez`-LYG_axZnMNf+ zYLaQblj@E}w%e_@x`wg%7cmw+4I#*f=r1@bK1=PmJGK?n9BTX4_>ykHiw=ly8}DpT zzo&s^>S^B%%4g!Z7)L4RJ)JO*c4dV7i2dTwya;+hMKLpbqKT!(K?cC3IsK-v)Hq%H z0-6yX-`Oav1Rrc~Km1Ba`?GOlVg5W&b>zOYtSQ%p=4~+o`QZsX+zCB&jn4PmPlH}s z%l_^@NiLsNGQMmvWbvAi3ce;>y_X=PABImNPbB-_y28zq582`H^3nM!ABjzs56NG! zhe?b5%GrxgYC?>|&h3>bd|3PhQ959lxfEF0!ygkfNLfMT{sQO^u0Yh`VMTsv219M4 zKP@5ECZ@pA6ZaOx_`*UpI+x+x)^%zQ2NF>c&uZf%89p?x*I(dcXXq2uG0CD^CwQ(U zI`)sHk<<6QW44>TYd8o`d=jYwc-&RfV+egf6Q#l2k*#Aqoq974ZDk>1q8rt%rwv4- z33uj1e}R0PCRD@6HqM16@A%9?hSKgmZv2eB8BIs~L7ju%74aJsnu3|~3*^6RT%AH) zrJrl%?3~;dopeYJ@q}aMS*OqnMhR%rD^pflyspAHtQB^Vp&tLPzCKy1}a3i0=KO;0UU|03@9ie@bNugnF zrAWGg->A?q6plrx^K?ln003=aH9MfPhWuTN9Q4QlwW5H`1(BB* z*_X%MGxmB$MbiViz<4Y3EXynEk;-~Ge@hg!+$sG`!6-K2>;OBW#EpqSH_=~k?H)hN zmbm~{`6D8#DK3YWp(_`pjcGVllFclEsJ?cy4buHvO6y!8S6^Ocrx`Vq$iF(96TOgn z40{~&?84^}?N3LdQy@D&#rEcH54}0T~~& zVX{>h8?ZOd<_f0VX0P2b9qu%(_7Fv0+UoMY6$l!p$j}#CM0{I|=r2&5LmOSMXxZTv ze%eIYIuHAyw^QfHEYcL0#I1#2oNRo9SfhBNUwKZ2Yu21MQ*)>YcL}Pk5oeLk%AysG zsnX#Tt_Myw2Dk^j)0mEOt)+dG8|githy>r>J)h29ST%P@@rLIwf5E!2*8hsN&Sqon za{vr}-DqENzT1LJSXiZJc81qc$kaLbL2gFJ&7kHtl(8oF7w{4%Wxux&H<4<#&)E5` zA^I)gGlRjcA2hq68VA*Ts88Lnkiv@AKDiPND33o?LR?U~+jC_qgiZECc{ROneC291@fVb#;Q=j&}zcl3<+>X9^*?l#FUq;|mk>lA7QtcSY zG`_?Aq>^}AN$>*fq;=R`uyTix>_Xd<2+7Jnaa}i{ zzJm5Cm-)+}S;$lF`4%>_@&JT^-O@_(s#2Doz3*$!+Ni0Ze%a7L59afj^GW;TCAtVe z6a5ALitHcvfjG^vyZi_J;jSGrSZvYrQN_J9j5NLkM2K9uB`XzURhX?Gl zaWR00l6-pW$#JRb2}F!_vN%Qi0L26fwKZiN{{J)Z)dz|QG$x46_8;MV?R`uB&~E1P z=Tr*lx28E+S9Zx1_v#|{yq7{v_@HLQ@RfDtiMOfo1*kGy0!;zyOx@+~#h|2}-I=Ks zDWwn`{N)~sI1~H@lC;-()C6+1BnB0XWYF8yd(sch_U5#%`U`AWIGI?33e=y5-4U9H zKUNQBv$I)=Uq-!V_Rq}9kSN_Cd`}eR%Pf|?tW%C{oB1p|0-X{Znuyqz%M=+9P~^Wd zHv$Aa&N>`AblN;!&y3k<87P9c7^9=Jd)p)fLiu^4Q{RR9#=`YB^-X0L` zleX6i3hxo*y52VQ=R463r$by5@b)WzoWNZIRMQs(7+Z5?4j^j$bly3mg6I7>MtREi PgC@ZCACEHDK6>;YypJV? literal 341951 zcmeF4bzBtP`}avfkQ9&-mhO;75SCC{x;ZRL1j=vkrELBX_XEM5hSDp z1d);y<(XYr_4bba!u`7cd43$-GiT1ZW_H-?v)5ejIkUSP7!e!jYrrYtk&?)^E*yv< zPl_4=BznYxx1!itoJ2;0&R_S9u`F=BRc(5>!PMtb*i*Zb)*k9^($h4FS1%r2NSFNv z?MYBbMEi>ZjLllRw@?qgjH_5uYc_6@x=KH*1@o4eG>{7ZelHlu9k?#R__7ea_z+(r ze_vRKTDjo;!tlI@QP9TN} zGbb=d6E{a&Hb;{S(lUpjpFz|zh*}X*%Oh$5L@k7CJImU(d;ySQ$%iS@nJ)_f9u>6XcnYleM1$*a+zT)p9P4SCwH zVMEf?ro%etr&iIwL3SfisQGzy1Rp;Gq1AZ1?mp$3G5-F&-1}{D-Vv(W;p(%t0@>Q{ zTTiZ`UzZh8daM>AJX&BtgNV)X^))Oo(9EHr_O6_pGtXVuN;iG3 z^gf)@?9VlC zw!fwZwdV@gUe+95E#nc%dg6lQ?aG(@lHI4n4-eG5oqEzQrpD{qy|z#7B_vBV*pgGq zsI{=VDu{IZ+#=_1X#s8sugEViRBnpT04WPjG%_x6+vMs=?^y#+74sz;wh0B_n6{4n z%KFqkpAZlftj5&3C-c_fG^24v=mt3hZ0!-s69O7`3SLu{#?UC zgZ*3~{@H~6+K=eQ{W*w+7{uD`00YvG0zrfC156l@IRg|8jUNrHel5tcAAP^>0d4nr zV7sGGq!rlsu_0u)6SlLZBGTr^67iS7LnYR(_Ar&$_o6+IbvH~tHKGg@1%0-7e zw?Ctodv+VUj4txUQ^UtC6O1z%cpvW<^(Q|x1|??r2UCC7u3+zR5X6q(c=ph*RjWAfOoG}ch3x+o% z3yTND=IHt~K!s|fd_cg(Cp0!Xrkw6qB<^^=o)o9DT1&xL01^-ymBAT3$`lz z@cDunT2V^rJUv-Jlp_xp|7~b!XK;a%kfgfoTpzVt$QT+ChVtw62YIw)e~lpoIOgw< zK9DPnWEmM#dVkDbW_7vlbuN3nkDK41TDu`Spf^o_jHXpLYdhrhWPHuKv$5g`%`Ki9zv)KCbNWKY+>yKX`Nl!*m0E6_6}a;qWQw{WkXKI;(lZms9V<3Nln& z^|mWrq`1CA!qNA6j=`)=vkHTKdy&a?UBP>6R%Nr?G%9E*pHrr zDmrbl`ju(vAq5;n6bRPBJ+p^6ux_z_4jUcZD6v`iiot4EfYTQ(_XKaxY{hR8 z;>Xcpw1B(TeEypU1M@wajpHfe&_NSNx}eCr7+TZu1^njUV&?$)!c>FIz@4SH=5o zeNvm+r@p=lQp;f5d8s!lvL!NwyAY4(W8R<{IOoE5g`0XJd`ud^B%J_WsrqKl>&a<#u8|IArFZPg_C6$9UuOP?hAwezhXoSFjs zxDWJ&&=>A*4x{d>u@`}X$aZLd$VLlr=1R_g72(44tlX?=oER=WX^-{owdE0X1`!>_Cn~r|kP;TY$#rlYap_3kmv60Az4k+HNdRpPjiEj(F=+BOW#l9e2^OD@xU$A zdTLt=u-5nAs(qqK{-mcu&)i^M(lnC$l4T9UGYcqM;q$ z)gGpj(O$F%A{)*gk?n(#)zQ>O;;?#ILxrz~qP5R#??RVc@ZE1(#9WRTs;Sc*VmuRM6j2ZcfAd!vz>(zm_ijNl_`>9k)WN~67y6Xi3 zkF6ag+rB0GMWH?3r{n|)-M6cKbsADkdx5VTE|_r|b0;tMZXfSaJv*aUo%#h(C@dvs z4iUi6n3Wk8#UiMWOUyRBxLQZLd7oYEo#)XxK!??{uQXuTpMX;~Kjp-`NHn|0rAMlS zY^YsZra#r4EJI4}r+5C(wcwJ&+m&1uvGrag2O=Bp-ys_ES*$U! zNj-F&c>a7>)2%}}uAO)+Uce>JTaTHr z+0a+c$WJ|R=EJ*??!!`0ZDGjua!Nqoe0s3d`QW%!GG;jcg5i@@?UBudg{oeieBIN7RE zrsXIN4%CG>nc^eV1#{kmw2;X59|aavZ*r8LMQL#0dW7qD0t?LfPsnxxK;?r7q`?M; zH5}Q-pSfk>u#P?~Fyth(u2>Av7diE5#mCfH+@Y8-x@25npXZph$ppHnq5+i$XCz2F zJ#tjvI1rydA~C|RW+Y7eOhG^j;J7`#Sd%5Y$>0gF3w^A=Uy;;0yT*LTR1Qm@5UNgQ z4dCB!6SZU)IUN};5fs%q=IAS#>-vQ^V!oyHMyJ7dbfp6+Af#{v5d}c{^4dcjShrud zJ&ZuMd8bMSil$|&&Q4;vu{fy58zj6z&qTiuSYF~cWZ8F(RPcY zVO|`rK^Q!4L(!14W5_6uSnlEuNJMbD`jZK4#~9uZK{R*mce>#<4E(pVF?0a3k|0cfsdi}&`b*c3CIoE3(`mw84Pq(< zMeRj{KI>lidqTKf=>fo0=F7WlUzH84N=DwKre2(g7dSpa3KS+g5t7A5KYZK@7_mzy zAKkK~l^!tVcyyvTA?9sH-TJjLzPT^6r-o!K(Lw21!KIgcDf|)s6ECt{%`Q0>VK1s& zuFq%MUYvLOco@f~HqUOq;qPYZ6}C@obI0pD`rW(bMm8}t4xM949(ppm@oMD#)A&fB z8#{_K`7`+iZt=ibv5{l0xrrNtDvyea(zo80N;YHL1K*Qm3!#jdiyqLTnE+Pi;8^*o zKn`mxruPcQDThV6yAQ%2LV85I(yJm4y15tW*{oX~`8$;R5#Unr9nB1N2FD~>L&}yu zm{-Rrgod9$do)59{65O}j_L$(;(Wki*V9i19}}P@+Vk0U^t67QykIHr)>hk`VnMn9 zULM85-auag2**`)=$MO#lh{JpPYpJztM^!P)^@z6oyTT|*gW{xgI-d2&txpdVmLr>T z=;Zy0Pom7)Q5qbm452b&gvww+z#b$9Mwr7J7>E9=2=gNd|4jp>Yf%~;`2Lp&1N#0? zOeX;R&1?Uo`xmCOf#2#OsMAn%Ld#HpgCz7#J5lDQeYP{kJ?2g(y?ZxJFHkTM?=#Vf zmB~1}In6TgsKA!TTBv2o>Uz89v4!e-x64CT{BEsUz!jXnanth<%sSCKF}LJnFmoF$ z>ZE3b4|!~iaik_bI1Z3dGu=4H5EH-HS1XXwcUX8{>G^Hld@A8^>_j=|z=VSvF?SLj z5=0yTnJy@250L;4bp!~gQ?^dWS!fZ^b~bKzN`8?rz|flgktdL#K%E4r{Lk-p9CT8d+TB=H*}J|NdM;C#HCd zF6tf<>Mo2uA=<>6S-8wmToN0+zR6Xx4kQ(FSkHf-!9IsQb!wkFo5qnmRe;@8=Yquz z3%M+weDNbmvaNauZ@J6B7nU?1Yv9DavYDFMT1(xdRVOPQ8~HwlXwTehUAyv_r~|J! zQIHn!pPTJXsv_!jY15clwHh-Z|9*>lu1+#>k+9>LeGUEGLCitK9Mava286oey$B72 zI>LVibpY@w?n92lLfXpR29E7j>S={0QYMBfpgbq>lQcc)vDa$=p?{_q*3u-ib3O&S zfnjx;b9bNVqoW29M3Rq%YVY2YLqZ)-{Aujrg6h)u-yv6JVQYgyn8ezYY!jYKUUgKX z4&)L0)L?@&zRBtJ(09)Ulc{{8YVObD^p)Ssy?b_DO^lMWa?TDQ5W@ca{Cf2Ib)GwP zA2vSRzdUZGeTJjy`Ni4V_^k`Ki-4qs_pfQqJeA(O%>J$?S}|O2s@>OqPHiZm+5ZvY z#UeGh1|eVwM}iS#w-pTbLEr-j*n%&ADOey-{bVZGFC$W=Pe5QMs_ zy=V}GI->nSonGJEP4f$8B4Qa%`2&wJyiV6)vD>q8r#l433NfRhR{(~Y^%^Gk&Ovy{ zRi(EUpJtjm(_OGy?d))u6wFuebT&ak-Qd}9d&4%8^=Iyfv8*dL-Bzz3$`AKjLw_F- z=i)8%cx0c_)7V;xh8DPC>WyF$TJ|f9!YcT%7heukeuasaNSly|0>!vdwfbU#4a}sz zv`u0v()VkPyVC?1pAM$7M1O3!xDI?eSy{ip86Ubi-R1BwyT$kp{;Bh3r;>C_G9Bus zr+9)7!XH9p{{W+(gUH67zNY;fKNSlrP~YnC&mx-Sk%KhEY3gB3DwXsIQGpv z{0T&gU;z+%<569|?jIQ?%3r~RVHg{9IR9}I-nTchD^~Y&URVhtp|0t6%z8wZ;#aHO zLPuJG$KZ!<495tLbvh1uAMbMV22Z6}0POQjU&|E_ z-7GoxAv5(lgJL>I9Olav|8;SulS>_{_YOYjiAZ#y6o@zghq}Q%L;^U}kszSX@7YTo zza<;t*tZXJCy&%Kb!jT*{Q`C8P8d@~QqU~L>UY#;MQSq`mZpi`{Q4q1-HZAr}ntg#lnZ>cu^oFyUY` z^Gn!L5u?&0bX>wUY$sNA)~=M;RGu;g*lYE5J084mr-)c@^$DK{x)62a4y40KoJIjH zNL|0|BJ#Wk`^85{979f!{RT@vETL%3VEf0GAg+P`cM5EGdJMUaSR(feU%`Ta6~YJx zHlSGW`tJ)2J_JES(}6GdRG2o;NsLipge%@vz`pKmE<@ z%Ob*2gY^akJl@KJJPZd=_lws4rU{oG)vokFsN39&^gyU1LjiSZz}j2)>~>Gq(uWlu zI6@Z7r(a(>&US<8{%UjQ(p7?D(pJFPQ^GYdDlpl+!YXUcHu0lK%d^x*aS8RO@7}Yj zc0h?hsKbHZb>+Yxy!5Wk!siuUwJygHmr$6)&9Ftx8?-pw9ZIHzeWQT+ywquiK$-}m z9O`-DBbHBSR*TUH$-5eOROL9z^!tbZ8tQU2raWP1W%^?mnZtE&KiiTNiPYYDsbMW! zO{hQoG`t&S-j31$8Q}iL1OFGg;VPp>s0$a1XHn&UNGnuJg>>x`A=m zY(qf_oh zX&~59{7bM~)e9NxmG{@y;kr3Gy`G{}+bFzhPcO_N$WOpH_wg91js(lTTmpw zd``X~v{z`dRGrr)DKq1c$0xhK&}_$7%!QWlzqs zm4eY3_aZ`&?2he^>~v;6J^YYpmrs*%8<)>ePB$sR=A_C<$wCb0qs`T;XFmZ@>nV@I zE0NcRA0@KozOz^54P_m5OO$X|^4CnsQvC|Q2NV3H!vHjzKTNCaGXP#OJDg$SIrfdV9$ln(WG3m*%%IFguA{P)cNIN-!-^Jx z0E}46)yQeZ`rhS+V|K}r3AznV?banxN|VuVS#vQ14#;jtWB*jcwMVzBy(;2hbnd-q z4@5gk6lezk;iHwq)72;AsQtcBakout8AhT%;g+$M3=pEL@X1T@0W2TC_O(?xrhE^( zBCYS*;O0r^d6K(0G0yjRw1opkjr);kN2_03sT<{>qOPwgR+hPa;VOaDMRW)?Q~lXU zG4#?HC~((8UvX?TBK`4A9^x6B8zdA46h~KPL<=ZMOQ=b%w0!t$v~!uF{j0txbvYqF$V7ZKcSs4fXWNMqupWh+PTc=fJ2SvEb>#;%M26C3i9$Ee=VU? zmD6hyrV`ud38%iE(K+F1h8{6-U{Q!;?wM5KXEt5R+5y?&s1Q7`kXb0UxEP$gO6$0%<9!@Pb z83%8dCCQDvdf9%XPclXBSF{r{_wMa>Ac_}ZnIDc6EXuo8SN$%5IQb+)385tGaY;af zddzFtb-9LJx={V*c_$6ps*yZZFLjwKP5Vq z`!So|5Bjt*UYh(&ot3dqq1}*d5sn;4V%*j+B%Ze;VLnwyAui@!XRp>4*};37@+rW3 zyf67Xh7OrQR9xI;-5W1OR*0`1tA6fa(7g5W=sVH=1GM|!gvPL&*}&0Gc`r%>(T@6G zqFqp8r{~#a8X5!FG+nVvNi!bCc>)`%536tC07)?7n6p5DWmmRHQkV zpWJ31SNG}Vxqi)YejZN%iFU_sy*44|(kMPW(oDU1!p|~mfNhf9iTpBtnkf^V{{4o1 zijd5ERLs;d?PY|4-is&gGB0Sw?@An&D{`3d^|*NF_6j>7I+;JF!Eh*X)`X6Y>+vgF zYf~xWv?195ZCanT-oAulpw8`icT^4YeHC+Qk87-_2j=hc1WIk7g_M~Lq-Z~SaiB@2X7$hs$?&uEu!JiGX(*uPsKp6XT0k?$w z?EPs1_JjYS@F8qCv_Zkc*d6_8gcydK{INiGR(BB;z5<^5Y8ZD#2uC}ey@(J*JDUBV z9ROTZ^D?EF!ke5shkq&O@xXW!{#Baj?fkp=;lt;6iI+G4LZYCQrHfyVM4Ep+Ja=9W zT?Wf$_PIoQ#L3VghX=I|14y)EO#JwL!LYEf()0}>r*#X1SoFqttN+bV)A9LrLH9(R zeQFQelx483TlzB%ANE?+rS>Sst9kb~$n-HdavyD%4aijhTV5B1Q>ke4ph}VI=_Jms zf!R?+;{p0AJ{-=*T`{K#4$y8#YX4Njwa2upJviE#?L~Va+R>s!JLgB#cUQ`AYd0CC zV;l4u3@KZhpBH@{YMY#S`6g_-{sSOJ&a83f%mpVIB_}H_9v%LHQ;|v6jDTbLq8jhY zww&;gXeZ1gVT^Hojbg^ph2Iia`@yiCsqZvf`ISXNEs0jnfJ8v^up#kX1HVqPvz6NA zPikhe%%>Hrjr2bqx!$JyC_&}fU!$E=Y1DUq6XNsVCt75#n^c~zA=XU!zPvRVAns`U z&b9m<%H$oT!9jM92yx5^nSt58{T?)iM7#ee#{AP;13re*wZ#-@PmCdHVjP)U*@vK~y>1{Q7AZU9v;>dBTx|WLFz+c@1?I zO7SPiU#5+r7Kyv|MU{MVbQB_cpB@kJddJPhaf3nZ1s5I^^U_+BLGk%<+=5ehe6EeA z#YNr82WW@Nlkj8*9gPJM2@q)KzK2)GbUxSN-MY__09vXeFdIjKp99DkGY9=kCOBQQ-J?f(0 zjU+c*BH+Z}x&CK@(p&6Ev@@WbYaOnqo;S-i5D{?e>f3Cv)}%B|?;E}x+Ip#9>i#~3 zMs_d3z0c9LR^h`tyI1cAE6~a`Y8CG*VTG(qE>9S}k^mUpxdswNGL73ayL{#ykKhVI z$MP4KW>v{<7fE*3$D%GPF$8WtR)fFsa!%qW6i1^|ZC>e{h z&KWiR?Y=zQ6Il`#STr;xQUPhgV-myPU)9p+?^A?=lbBVo?WIR1IQ20#FHKZgyRT!b zSzC+hDjiR)tMSVL0N3?$Mi&_YXUef40hvL%?AwB){$l!9Ro^Us&VVG?099BNMB~Hj zu=T3QXUcvS7*x2dc9*fg!*Dwu%99_jcH0r*F6ivcd;OrJAB~?81Bz(k{+u8~02XZb z&dNWK*w2l?{m6pzWj{B9{ILIpuwY>C&w~C7YVPnO;%G4bV6<~T=k7+kyNB+s{?Tx^ z^S1?jm*@x)vh75>EsJ3E@V#gdBs+RggY-K8>(3SZklykcNQ>~c;(eQ>$4ouyX%>0q z&p55L){>cW!|2yu zRY9JgP;%_XkzDVQyi;}p1AV7IE#kaqiCgjPHQ(=5h3iIu_*kNW)QQhxwVk=j+Z|o3 zhh>vKNh{fwe9fuPPy`CcN$uj<$_3MIHB($k&cN67N}-#Q(`&Dl_V+WFZ5mr3rWa`0T}?FeR0N7t`{V&Tx^yKg3cMNuH5#ayX&L&u%^ki#um2 zSOB04d;Va}&#B;4=B40KF36Qktq1;YMLdy=cYO$EKiAX#HQB}AGEc-l^pxvxTPCxO zy2jg#w%#sS$Ax50rz7eO+FmIrb9a;m2Qov*i~}JvFu70JgT|0#_aDWW-@@^qR#EyE zrNM#oLFc2@?8F$j^Z!J4qQIYC_TM^wC%d7mci7m9JVy<)bXJ~bXZ5{F3cNmzc4OnM z>zS@E>w=p5%ylfqoZ2{m58svR7$%NTVGa%PxO`P#^crs9-crQ(xWWRQzt&tsl3JnG z_5KpcIeRMpdajnE>GDCn{=?J)dCt@y56JE}$Nj}BNOn-pom>ZgKW4@rVga1&7!YJ< zuNYA?`#4ZkHo3%X40egN#`du453&P*h|N2x74Z`)F@x{yJ~QQJkqqameiA&-aPAq4 zLi+2G0D!|!%~hvG$M~}rLpGL92^-*5b- zh_Ylnfg{2z_@TbUq~1!>qND8MX2WbNQPZZDv6rbw6SmCRi4Vx`FO>CL7hGsuyO|A~ z?DF@bG?45V|0UVYdC$GROw@Y*pDeXhB~addpAD>&j$IB;%%aE=RB2GktqIRrPq z<%xOGr@tAz{%lcJQ{C>0>}(44i%?qQr^QIJD^9-U7AjouoaTY_hijD8reCZ)TuIz* zt_Od~(@HJMS=y%vB~BcDB6jgsE(iMTh!)91f}mK#&?cDwI9l_D?>YJSlfZlT3*O21 z2}j?V`KK|z@A+;x*d^xlCQ%%_>6lEuvF8b(h3<3)_K|T3az2&7OiYqw9id?hch2d^ z!}=qK2`A3t{CetxAiaZuY#^7rBhcOTAD_TOjw6@wucL(gLTf+lkp{mk5u}EkYx{Nb z4`BoOVF^ypf%9@;fkJmP0bUY#T| zHh4$mzf_@5A71&$WP(TX@x+r3xvFPj-9d%ExuEnAw_}ET%c_mn)Xhwws}ii;r8?QP zaZ;JLa`ldu`m!y*!8Ue^+v^aSs$Q$-Spy_+xH@)zP91Y=ibL^)Zs{B$p}U~WLHzTb`0e`~3HX^l0TTB(a$|g0F2ov8hwV z_7K1e-tP}MaXjysxRXCd&@AT3YyR}6p~&I0Hj|lzVTHAt*nJPG@PQjiuV#*>K1_2X zkS6whBdnDRD_c^@WWSRoMQ-hKekXEC{l@YWA)u z*os+>;Vw3v@-WYF;|e?4U&zinRk>$y!MCIQ)Z2otc18lc39H(Q#<)lH&C_R_U0NLg z6NkdQGfjk_gXmP;+VSQXzj)W-R81d!*VsA|U}Te*k0iUWay^!{=QXc%a6L9vs+??u zQWb47GPT}C^gCu*Qn=NdR&ZDq=0MAo2aFjuLS{G7;#lg)?0DbWwRm4?!qlH!Xu-S7H*VT3Gv>Mw_`u+@o84HE4M_u-lWjB zd9_awV!c*Po({f6P_Iwabw}RAy0kT?{mRiV4$6uj7>dfwssWADZIhd}MzD%#+cE08n;!MCi_5_K%=sIPd7_OP-Z!tVxqTr0dTRbi6>rOs5o54#xDD&s zAE!PT!dvF>8UqefP#&-9H z-0AMNkl*(a40b@VfA0kk7O@}L&cN-kIB>8uGsy2~$v16sHoKlA1 z#JZ_I;6FC~i~~t_!gs1flREk$1}@(aI6_7FxWalq_TxC)Mb2maUloPU#_m&kUx>~U zoM)g5HKH$Upjx4CwYB4`nwW0s@#tn3C^Dvs2J-kvDa74FaEo(@n;pf*#Cu6X*2RQe z?tFY$=-(st(GysaiQ{PJ6X`61Tu@+Ip19>e-Jd@t!zjesmR`hbPEGYgdOLpkzgWYi zC$Jm;x}Ad2zwAYOAlb2_AiFf+%@zMEF>Q`-Ogc;b?h5w?nSJsXBaQxgJP=*dxx{~H z0w66J^B;anqmrlXYv?Y6QQ$>Bo7BZMwrOoh7;Nr1qm3lHq}lM#!QxbRjkh@0BZnPj zpO>Uuyx@4*AT26&rQ%AHAwUtMJzn;t31;r=_GnAjjHUMo-@^2lFDh1Q8*e>E>!Saf z?1-~0obLOx$`$)xFRxDL_I5lOCH1+9JN?3hxx#H8(lC^{J4%BCnIR&aAVOwfa{qk~ z8bgxZe-vX-z2#B*7Nx;~^Wn~i?!*|l^Z!J4;s7cy{7!cLxH2cWniVa^J;u{6YMzLD zt6J2t@S*lbSwqpOY=chveV%Yu1lWBZ26nQN7lb+AiQwR+qWT6ftc1NIiKzZ~rTG{g zP}*lKC2}(Sm7xUj(c;0YMpyKjNggpxs#M8X`JqM`s<=W$r1QOCO5puKM6( zS5@G=GJG9+^Cmi=)}R4g_m*BJ*72T-yMf!OT%~t0JXuuZOxcY-OS{KWNU~#{Ivl8X z*KLm9J?lb;gv#pe`joh#YA3dpSCOfURa9pC6k3A%af^YAG-cu8?7kv?H{Y}cKpS5e zrqwOvmOm2ePbvfG-#u)8dW1h|K30*^1YaxA!nJM5ht$mxd$HF97C9GwKz9F|(1dq0 z8<6ZUNcW;NknA}BCD~~vj8E!Nb+e{?P;!RQMV}VG6Ol%!@1v_OWpr9}Mt2FYn7@5U zeg*5K{z}CmSi@FtQcwLt|zK-q2$jkK@oyKrT7i#yeTS9nikF_sT)T}Rd#}# zrTMM6Sjb!4Q`6{{_m=q+_bI|>?IC`(BwEI{PXW>M^C7`>l>TF`hurz0&rS;+7wtE|`hz8s#Xw^y*e2c`gmV;ZX9s^- z|9bP~?k0#EFMrwH?hfEA20_Dsf{R^F1Y3V>BRb#*i99k0q6bm!cEO;?E*SjOS0fUN z72pRA2FVVCb`Kg1l}KquTMtyxS|2bwPiP?R z*PYDRe^wOWC`{xuSz6y%_~0&ePao|XF=UaZ8z)V{>z=sy}!{VMgE^qHO7&c)s0?%{ysX>b`AwjB{UKv{3m|7l9Vuq?$^G@2N z$zUpUwF3jBJr9Ai!X38JT%4&~X$c}c@^ z#Mh@?=BE>;I2XiQ0EJO^tZ7mWm#W2$?Mu_sekzasy&N1xMH19mtEsh`szKHx-9Y~e zxOPUJ;YHjo|gSJ>opDH;I(DcJRGSP&fL_iirdtCRR1aW4Qt@{3G~NR$GA{t z^C%7e81w$#DgPgj!1X4E&>JimgMTk#oX1x6g=4Ge<+D&nAe-BCIirNMy$5VXG& zao`Gww|BT~V`XpQ;0(FsY~^GFIl&L0a>EbegNL2?4U9|hMCgXeQ3qM5LVe3KycM-E zh1Uzh+_wf?@ZGXC^Ls5h0tokc5{e|pn;NlT0I1u8&Yi5(BQ|w*rdmuCQ8$a&?!Hca z-33@(_b9$Iuc$;5LiQr6@>ENZ<@~YBFWJ4vw>!;loN?_2)}p^&l&*}=8-=>jUaQdI zGUE@#dzO55TllT!&9n~zn+K8rC4qq^zLN;SvG3$w!UEfh?)18QsVm0%g)2wghwhj~ zNb#VDg1sNX5A-Ec8PKr}`^}+k(|*8*C`@JAHJk@$`D)OC;cK|f_sCx327V)fTu?N5 z+BTV0l6_vBNV{J-io$_B0U&!n>ilBdH|m0b{%}FEuWO;_`J@R3N%}Z$kpO?)5LYl1 zU4th-Pjt_#7ytVd1U@P?BeK_x8qd27AZP29B?ijhH z5l$`fw=@-Iv@WTr-&sL*-MnOBh7up1a&Ru2<5&S9V zZ{@Sf-`AE=i;yfG;fPio6fOY53^#}f%O#bLT8(;X1LzDGgr*cXofc)SZayZyJpL#Z zpvnnv$T_ElMx<0fk|XQp~E8DmYz!u}Wqaew>p8|X?X_+Y?9EKttR zfu_F|2#9WoZNYvlunk44`OV^wtHgg`JS5LyvH!Fph9MCV1JMcDxi=GmhOnPz|4R$H zV+2>D#7-21j>r32>WZF}A9I1wyWnVav${jNTcwxCu~RfU%zU@PETW)GqSBP=xJqpvhfg!g9%;3rCjM7ZRb$t`X~+l81qm&=l??sxZX}7 z^ajF!g%)OHtiG%-9#6k6XqjWGzg98O1E`shv9Jm7hjDsftRBrf*gYW>ff#Cwj zL6{UpHf(m}@pSJqLCHfa8!hhC6tu$MzUW+8hm1>fb?q||nm^aq(xz}+ZOdf5gKK7k z4rA2ONLPf0#$r|2dP!Bb6nGKz`SjfDkLowaUTMKt8cPB~&O7DxvgwazqLrrA14IY2 zfT-MwNXwy8JBbh+2c7m37RW+9(bk>T9Z!UUEyQ%7Wk~?Ny>Z~YCNTtiXo$^IW&Hd2 zkLkcPz%Hqt0{BjpXH$YfRBxa|-=*t$e_?7^AxkgzQBcEu6@bL-!qMwYP5#2NeA+q5 zy|!2UHBVKUszsgA(ICvDSHF$~!I-b7PSFvWRoBiiqY+3I05g1C>T!;;Mv6K2j2(ns z_j^b2BMq50lTOK;(y}KZJsG6=39O+_SBt5dJKqyFW%}xi19wQylXx_R<6!V7s8bie zHO#T7^(7z=tUAGRbxwrCK;-}g|2M@+@1{L)5WKt>$&q|HT?+?6fl#nj_isQj4e%+& zdU~x=QMez&I7vx5`km62kRojOoOc~2$)nzU^>x5adzDG{(CJ&*>C-|&AvD4)9O`Wp93&zV^%oVucj5r@{s{J%dXf@rln ziH42~HV|#-u5Wi3>=#EvcG^frLw;}$VTT3Xowh^n4zz_K?BT;ZcVqq&WA51g;U-gf z7ZiN_p+NtzhFF38(#8MlCcB3M4nyIWC7{rq_y-3;*j~g)^bGSV90UbJ!Pd2XfuJsD z4a=1{+OxfK=Ld4a^>j9~prkEZnu9OuZ@lF3_qz(H*pmiY*NahJ!aBoYV-xtD7kcDO z?yF-l?L7+b1?4&TkRWJfLQbwo@FBg=z(P<@I!xfn5xn7oqPv78JUQ6r{mUBr)Sz-$ zp2L*kNXXe5LYCv&3kCw3`6iispD5(&E@7#f&$|HbnrY8w`Mw^jWa}or7&epN=f9@) zq~>0ic+JVJJL?zN4nXi175+^Vu0fgIhzJM4t9ub4LCtnK90Y|z!PawBAjtN(SI{KW z^cx!1+m(K+y1u-7ciEGaNXzv)-Wn6qJih}t;TU0)-3~Lg{vI3szG$pgtuFBP7n^{= zm&Qr;sUf%UksxSe2gRLMY4?)5PyA&2lH*aoT5ghJ=63?;bMKVSlJ>C!VVy6w609N3 zy0&N8tWB_plzLuYGh{a&xztq3kx;2c{?{PrQn8g#X|3#%>VftmY}BIN+D51|8>LlCX3J;hyjUv5!(^G(>6#D{Es3Is+Sw3gHakB zC;)LWekbC<74T;e6aY}U;U@^f1JLfg;1%lcETN%H6J4xyDRZw=AHJlrrlI#;;M-i8 zY{|5j64>WSXvmTw7X8(G?XCYCVFTv{VZfmDsqkU>&uVXPY+o&geOY0XjLM=L zP`zYv-=UG{d07Y`_mB|R$Hf>R8}*?0WL4&4l=6=1k=HT5D=p%~{ZD)ekT{4p@qzkQ z+x@X`m_U24*f+|tk9w3>Z&t6j=tSbE0Y38k)ZL0;o5xMVsVj$ImBCI_Gn7~OM|n(B z<)G)dE`FH_Zsq~LQPdqWEFWd}x=@e(=IZ4j0}-+g#*D;r6P+aSDEn8f2RQh@=}vw( z`GMo$gS`mvuo!^|90x^0!ItR1!9f6MX>FQ&f8T_5mE>*RnTwcqGc@C$#V2UL);@HK zzS?(l3y`s$$|={YNI#3+zr2uP7in>3rAJDsSJvdyh-wD#Q4fiOA-+$${BC0|9mW8^ zp?(xgd zzV~=KCxBkE*`AZ@IFqf4E525O_Zzq9{KCsz+ih>Hl{5C8E5ZYm8K{LBN$PVmo-+vP z1h(cJ;yo3_kFW6}YE50Jy66=x{KN0;*vZOf#aZPDA)qUxQ}n|%;rmPqyFr_e&(S8$Fh3*qO`eR(Y;BPw67<7A(gnW3WzBsyIEJe!EW%dOQt%JjNhe-)=P~qzn{=^n}s&QmTJ* z;imIAWi@k>4>;w({owI~OXatV3O+rx%5DkXa1buOe=cwh zp4p9va2%}IiwJRuK2yPQP%IQ|QU5az0zen;BCCe-t*4CdmoBIqPGYlZgg!Z}{8XMh zN$%c#?!!MKlA=}0TJT{%aoK#d{cK-1{DgTzJ#%QBo6wBVNUjl@@609 zu(EJc?=JLZ;W2?67yFFCNqX$QX2&(4?&=p67F&gZjna5kt$M0}3GSEz6#G4I%k?YQFqW@9E`RBLF!LKx+dVT(3U>xMm>uf+UH5=BBrs0)Y)_# zzSkVfrbL-VF4Z|Y%~chjBFaAFwBBdP2d;FdoLi=ox|x50aFLRxf%9Xs-8+WVT+)tf z$<4l&ZdC{Kf~edH=0}P-6X$mJe3#5Q-rp0!&!j1W}VaoP>&#*c8@di zl6&E%kd*#?7(P^aVr{3os>;2%|3$i%07lcdY#*q2T6C70f(h~>?l zoQp>T?c?>*9f{Tt?^AA+B{75V8-?EtXr3VSHlJCjDQGpaC1K}*ge|jk@!bV){yZD# zNysOoDpP+|mXh?gI+J2#2;)7abi+*g86pz38wV8lKgdmKH{pRJ;Jdx(4eNQQG8_TH zdrO~ps{A_yWV?8^f)xtY#&1UJYz=>W%fN|#%t5!!j0HbwtgQJ2JD~A^vH!h<)nwc; z+n)R=cRr)L>SMV}Q`YPk(;YR{=sLD;F+A)W zm3x{u%8{qL-(S3TdB$OeXIZ^)YPejl+*-2l*k(yZ^y1OO>(loKI{YW(2~^@P^%&I?EdtnPU&UL>>RJM zQTw1>lxs0O9s0>c2*3UDr`KzcqrWm77JLN@h6Y1^rzT=Uctwl`*=>U+aI3#9NLGXW ziRh4gi99mc&OUY!6uB94zsSQwgJ5AH`+i@-XEgu5z?63)ADsUt_M}ASJW)9Rf%la% z=ZWo$|1`$!T&=$c7hb)dA2q{5RK7q^kzNu{yWLIFe5@fU*#^*@5<436Nae(0IfX<_ zSI1kgg5>#?kTxUMt^P_o=@Iys5IMqQ;jN&akJ>#Lg^zX5uJS&g^)&a!Gt;J`P0|)f zb(DH^>gYaWp$%qu?1akVXQdwBS?4XkMzuinu5xRA`|-@s%){PKE&$@%^f;1~0?q2O z8`0NzB@A2TVIRilg97b(3@QyV3Kj14GU)JnNo)hZ19z8h%+1{>r z{>fsWVv~s0KeRO7d%nzm_=+=Y$XnC)%dcf9E6_uG3*^5NQ@H@w?&|PuMRQK0Yi5}g zL(3WW3;RvT=qvw>hG5@)t`CZ`ku7Lo!Rl*HUCrSp_ts6ui-hL z`}>Z?9sC!=q!pN{X(0#f`8iWow4P|rH7{5s!`Gw-;mi}i{`i4CXq#99Y>>)^54gm#YCHstUZ-usIO+1}ra4LJf+`i6l^8@yKL{sv#&JeL3HVfF_^j<_4d#OrQ;m0^HZD^Sf_mSn_&ikjFxo6 z(UaC5UJ8(`=)l0^I(?bx>-N?-AppC25ue;)P>R3?1`VL}Or>V{!Sxd?8|UcCYBZ!` z<%t5Bo>;zIwRA{V=4qAMrxcfHQpE_I3@I7?92kDg9dy4F|SRU?E6-CYYpYkTyVel(f!R_y4;q| zf=@<3o8INr%&+T!4EptywM@ORmQ9xKbHY+8t@8qzBNYRpC*rzQzxO{Fr~xG!wShqk zeuB~RS@c8MZG6H=2H9yQqHi05MCPy7xp{0ksIA?<`R!Ap*NW=AJI+)Fl z3a_ZgTX_RHm^gEFPgp5H&MetpOHWDn6dcvNl@cuUQ=)(2z5g3sFm+@;=p2kmu_ui} z>OxR!6r3%9hGIbKLc!UBtN-+)UlrZXt4i0q+Yd|6zLJm8jUZ4Q1h-Q zp3LvblR{s$e5bZclepEt#**LDOSMlqT0Zr=L!ruE5r4KZm3|^+(8GDc>=^x}GzYI+ z*|y_r8Nj8M^(?0LQ8(@s8@H;&e!Xak3(SN86U#UHBz?cUb)-7DGKl&u1=IMEF=9gY zqDJ(=(FoLPJiqQB6AGF}!qZ3-%aOR!8ohHg1$hK{hC*x>a{1i9Tskb}G@jOGG&oLY zL2KYfDjuSDa^TiSZuFzp_=>x9JR5IH}UfjZVMIv!_tL8Ym48!Xp|?6QM^~Fec}o)R?Z;2RyFV zWcz>Yop(G|{r~uFk}a~ccakzPa+T~A*|V}`@7-muj3U{NO;)l)D4P%}n zxr}sw?)w(^f8Sr{(S6Q)yv}j%>pai->QS$Wt74Q}a`Wxj z_7qB`-HL60U9;mXxnG}q?cq;{h+V8^G^yrPt-q^x*G6J1?TPzx*tIDgT;`Jy1TBA^G%#+ z1x7oEv_ah{1Y5x8!80{}stZ^Dpo0Ka-0fM#X1$Ov9*G`vddfpjPEb^?`XHLqQ^7=K z_V}+8zSS^WpldrFVYO7Gb-rp68>Q)rTN76Eqv!ehEN$j|P=MB2U&M)t>P3`jLOKZ% z`Wv>vUWq6K6D+QndxO%*)K|fF4gmYzkg9^mI9#uitiQf_vUpA@b`E#ALtsGFN0-w7q337~<{I6h&C~={1l$mG2=)Yc|^|eF1}w^c5am6e3@`!@T3xz|af2 zVg^hYtPq$(?0N!hfd%WRc5^?wtX-+Nx?A#3-#24W!vqGp69n0f{olp^Y<)ij5pFVn zTMyeX>>qrBe<{AV4rDK}SG$vk<@R^-Kn*nlbT1yZ1S4RT&>K7b?k3DsE+2_i;h3Pq zS%sO3h!3K040s>Q(M<&pae09~A@{<4oszC?&pzLnl!z;Ra+BZhP%UUPVPc&Dbdm=f z_4~hP35bhSK~xj(*xX$$nYwpXvReJI_)8&2_@!aN8j>(zY({&LAnokw%x&E9tU~Qu zo&~9f?XMVtA`N@bT4N)cVy1^RoZP&dtjy_aU@1h zm%TrfQP|Lxw>ZauMX`Oj>0H@DqP=VOvY|kdspje}NllTG)(tEMrpT9uE6j-3i>ZGv3SpE0;6l&8$<;RykpFDX2u%~)n0_L~jVe-dvu zwqlEx64LE3`r#a8&Y?*VY*b6ukc5CBhn_@->gDz|-Vz4>k9C9OL(o?JOsb(*SDs;U z7rGwz862d(_HRc;thS%?NBQYU(L&Q(Q&y^#QG=OgbUD2SLt+37>h>&q?}txbNUmB6 zUu#vUt20~mcg?y{s;P2G0aelE&)47ofa!r^sPayu3j3J@vf>ed_5MCA|2w?0LzAjJ zF5Pd=QNccSem&Td;ki`F3C9H%+3nUSN54B3tJdX~NULUhPty`bgl+gbXg(>*giT}1 zGg+7yc;`qk$P+ob5)t5Epik$zl^_LQrk>uUYC)wQc=|lBc?nnQ)CAG;V_KFT!D+ya z0VS71^W2AC&Oy&?y5;t>w&h#a%#=xT_H#*f{^}JVw;F3m0zhpSFHir(i{AHFh}Nv= z9}O%CT=G|-{Yc97y`oIu%1|E|{2H{S}0@os{V~}!MKB%>vZ!Ixw-)f|? zt0~zUiG#JW_7)d3{GFB`53<+Y(`*cQFo2rYSFASovEu?aK#zcB1Zoi?YUGH9r4znI z>+N@&TJP1n`6tT*iws~|)V79n8OY?~M|vFFskf+S%tHI1x2j^u;~Q60Wq;UJ0s6s< z7}dvw%j2{`r$+TEx2N1VRw9HPCuY3p3~S%asYZ?!uEt5Bvj7e~-=vP|*LhceT#ZYS zl)>R;JkuS?5?$@WPn&`+x(WW#q64kpKO~{5yWKdbMTSS=tI}QM(6blDmo$EG4^?;l zTsdXS#QnsWz!;GW+dN;zUqs*%Gzeo0I<5Mx`iOZ~x?lTm-lW*VoPx$iBYUkPZb{Q%xN-VP02vUQhjD|~(Kv$8!w1yMFFeA1SGkqp+L-h3A&E=e zt$bj7D$SJw;lwvBez6vdn^L0DwA)EY4`SE7gxMA3&s1Of-==cck$yS!2gX=nLo46} zh0phAdjSWE;c`u`?EGt)V=tperQDYSZH5HES7x1 zcP$(XNr?D*`l|b?tGIU>+1xS2DhYTLGtO{ga}k+N*=p(;_Ug&JTlD2|o-|@5CtH~o z&fm%g@Pp(TC5Ya2lA(p4s9E7z5HWe5$Rqta?L=C=pH5TX8Ia*TutlD5m>Vqt$-JTYZwpWEdU)URF%JWH$X8n^HH$Y@S=y3p>Quw>&Ff z&7qbOUWy0Upl?J~RanXc%mOUft44X-5^gdEda{v}m)%=`^xXE#AF+bLd&mS;-))gW zv2r;I#9=`A??8OB5v{&n{6wyQk9^UgEuUkvG zSZ=wYXZ8=UP``qezn8?v=!#N>$EP`zKQVFd~+BmolEp6vPIXLqKq zqkkgXhGLi`2k|7InfqWTG{RciuGGITRQKZ_j9u}3eJRZkt3#iGwW3Z+^V7QNXww~O zrd5>9@rq*-o1ULC-gr7aHTB9Eh0Vs=&FK@vYH?E0K`3uoNP9{l3AcoA)!XntBK^yC)!NS=W-GT z#Yiu6Qh8AB3DxOzyE(f+AIx^CpxM{&N+8eTkOgdb>V!^WDk1+sU-@=SxL$u^f_*h= zaR0Gb&jiz_son^Bf4|~JX6e^_JuHoFXDR^_&ERDW`H?WYk+@uwE6eU2b;>Ht6z5~! z+llCg%|CAYBUFc<2~e%w?i&=U=%es;Q}n{2XD^JeIzFUc-jKHVTb5WiXK;qYCtnP7 z-ue~~m=5DSHWC+z1cge<`=%)DX~oAMh)Mzsot0~b8?R%&1yZ8Bf^bKQZ}fUUzp^4} zscdzrB|$S3FnzP^e@4h@M(Lu@8=`Pg`5=Rm!tx}wm~(4VNU2y@5&J^rB(aS&FeBP) zjkqx_Z62wrulS;??ZK_8i+Um@Pe*;^k4bZgobf_(p1o7yZFfUOy;=#C`LxfHe27x_ zoJGaoJX(zd$R~*-Jp|JA>_|`d46h=G=sA7v`no>d@o?1(Gp8^x0pB!+R{klD!+AW~oUBbe_ zHTLGar8^7gUrYb8js0M1SPB-51pB*J_Y1)U1VRnoIseN9J&^mG9rb}%{|wsMJE-=yLpi>W1{h3X08#Q9R1I0P2~7H8MkbWmizBq`j3y z42~gG01$N>LD=X?o!IPSEOco^>@!SVP@y_#Qt$E%G<_ldVm^R}sfAlJIA<$Np5GS8 zf1%^-)$@AD>BC4Y8|KoOEabDhNhdFnzvNf_0=Y1U(@M=GDb4Bl zXPoLEAU#kF)!k`xL7^%=3a*ea+TX$Tf|%7EzLX0@Dn#cA`M5o+EDaY=Y|URkm()@^ zusoxz^OtcdSwwG07#%2A-Q#&Hr(Iowwg&6Y5?}13n<2ACf{jxl0=_Ck+iA{4V)lc= zW1%b(mQ$oJo}Rmdb^6L|<6U<^r8FQ z*6dQeuT_t4KV6^yi2GjUoN!4^RHi|z+XLcj2IGvIvB8;IIWPT>$*lnEAihM9!^M=7 zjjQDCNcb_rQ{gK*pU+4HW`~duw(%bNu^k%lmurOshx7lNB=jbnTbNeW9)(&y4+Ef0 z5^dsLg-rNfFMFJ*zd~Lf zrQg;R=0xXOnzG3f*O=keo`$wvJ}i4oxZb*Xq9V4+%Y9(v@Ea;{7R*%PrH}inI`RRE zWXF4s&Jxgr^&@-6Ihy*9RvmthKyU4K;-FTw z9)+*-7BPpOy)eEeSzY@Xxi6L}o;E-8wU>GGrB!O7Lw&}1wwuH+-mJb61GP%d`@BW$ zf6*!c;CY%?QrX*kK3dJnsOF?$B7z?q26|qF_?%3%$-AV4dw|~9Qv`h_ZeO8}FDzRE zGRpnZenFTS3W6Ecw&iX#yWvBIBjxuXY-hkoqnvK-WZbY2N)r|-L3kbFV9>>OqoYKS|3Z{XX;YGZlNp4n0BZvUmz$7EQ|hjdYzOY)Iu zWxnTUGEN~C!She;1pR1w;=Hg}e-yO=h_zOToSKM216qDF{uI{!PjMr>{A4#s`D(>q z6~qaj{i9XC;QF^2sNQai3~JTLQ6Od*q5d6+>Du-0#rLse&`5ZdjfWzcTa=hkui@(3 zl0S(v?Nm@=0=4S8H^T_cziSl$*h$~jeQMS?OkC)bfQ3S5As`nc%=@u|a`L0DZeZl~ zXMpH!h89Ab>R~y4%8_e{>Up={3S;@=#`#5C%`nnP%D)47g^yF+c;}nOu9k)~SQmO{ zE}|Em^{QvnwdF@^C9UtfifUMn`5Xl9E( zJKO&l_&oX znm!7yG>qK8gR6LR)Kav?izKz#n{y2ouO05AJAc}Y|E?u?0+BKQDcLJUO=6@zJ*3^boj4Yg7pmWR?w(uqz^33^bX4aOoAA)$3D#T`r5xofcpDUr|sa^YU4{#5SZj8v(I#Jhh6ZRlLksMr{-KkcP-a$6Cd-_dS74XUzu_z4>dN^u2ygx6e<+dNMYwn zR}oEAE@Fj&`5wDhjh;hj>w7}AXpik^lQA@u=d5ye^z0mc-HS3m7E~lDALG1@+pF`Z zjtN&!ct9XMPCL(OU6&V^A4bvPq##s;erjn&_f%pIHfiJqyggQg&f`y&4Pw(Zj?duq zyU_Z2mvRrywG2EUdfHL1^+%|FY5D#(12x?3zCoedJPKc=w;B&UdtrRdH{iLoY+X(<(E7e0ZQ&=hFI1|i zQdw`gTY?3wyXEl5IUaCV@ku@`D=W@@)5=7N5nX&tn%|nJ)?XCQ__A>)uBHBy@8UiB z$qud?jiKtiFDbNnEhB+$#~b)G=z}Ehl%`ByWvQ!;kE1J76NOgb(7IBbX@1fL^d}HZ zQiR4$CQ%i~&z!R{l<`oB)VrdICaF}TgI#5c_)AlH(5PSsp8CMS-;tDqB?v#wgk(SP zpv3GH{-Qtoaqt8bz6IE`nZb#AXC457FaKc%4+Z52dLUq5mZ(98+7F=yJ!?;{5D;h& ztno8R4NF3eb~<;MP@$k51!iJi&EJ7puw_}d?z;)oXR(dt*U~Lg3r^c%YhKwt)vr$z zvAb4-lDu&&*qsbSqv)1JB)`!Tn2><+?n@<>igFf#;S_cK^6(}iIg!c)8ZO+ zscChlE25Cu#1Z|3<9@|8X6XBdxvMc8_Q%%qRqO(9Zu5hLdXGG|509lq1vVG6D6 zOhaT1PcFEG3i1jcr)u6d@J`{y&8|)nzrh+-=A+v&6Q|TNO)=o)EMAG(OL@#Tm+MWJ zJ3&8ru*d^{HalD~$#XMCp-I{=FTTJ>bZ~tmaRI>e;$7ViWPat8(OW`l8cyu}{n;bi zLiI1PT`prNn`__xj8pv+r9aD|COd5|OsG)Ej)E&bs`>BWYINQvpwr3~nZfR!v-tRM z`gAds=p-Fy7dG_MbI4^anZJxvDI(hRo02}WeJ%*v~xWv7Jfe! ztC>|nY48thIni^cQJ4_k&xq9FHt8t$UA-tnRieZ32w-ZIx0LA2z^-@Qpw?SkxeBB$ z`n)aGDvI={$W3{0Z)9JqicT_`PZKv%eYKFPX_0^4SB%Vr_nk=zeUxp5IC-J+xX;{Q zO)%GWA?}CXem`#*`YG8n4((#KLukjj7p4BIQ6VpF5B=B<4fy3+;pegZ|0xAEg>wt_ zLZLqjwKyMf088}`_}{*0Y(Dp^Buimg)Zw|f9YTz*6(Qy?*4mBdMGL#^z5CJwoA$P4 z)Hl&5%EZm7Mt2e^$0)hwbSf0(jU1wkeL<~4MU5m$f*?yzCC7YSkG^jhI+1fza`o(< zR&mo3ahBKsD$M-}_p(K<`Zst^pxCYo-D0oYxO+)P&Gnd#V!;*kboX@9ZdoiG{>k*_ zGndqc>l1}3m>v*Gi@ynRP6nRDD^hAe7CqX1RaZ<~)p3dK*hX*6JR}kFSH*B!f3EXK zs}4U+pm%mVahO)2a2$m%Y>eM(6`LD=lzx5isTvTQ?fqQwu;50^@J-0~XCWaKEbN4M zAA~`zQt`&du=sCUC4&*fbM<9s-XoV2Op|XTs~Mk1(OVMSMeeyfUL0AGkOJT=l}_a* z=RTZ|l61;8(An?WRY=6VB{O${MsA0+7@v7Y*aP@ry+pUG|18ktnY^RF{ zwF>s^k^S(0NCfPad!ICxJ+v!=HI^SRIpwPK;yrePRfd2NPfNZp2V&%#Jh zL;gOfRjRO!$sI$h0Kh0Pn}HVn5dz|tl_2fYvX8ZV!3!i+=$I%b1WC|#Z9PC@1V_Xb zDQlSDiSI!k@%>~p_IE43JOnYn9UG%K|9&^L#DQ+@Ln#24QuG4EYlS zBrDcaDGUq|MvsrlFcO!%a+lJ*ayvfN?TE1ZlV3GAK8Yzk9{y;izRKwxjR_RBUx64} z_qKfD$1>=&7`^0Abm~nH9#>}oP3?C0(E7taTD3>qKQE!?yDc)PRacJ!@rjKOzXS0! zhuaT{{50L7mBN@Og|?Ee-y3EOfB4{VG?KjgEM#W|YL%MziH(nkX_d$2RVVcJcaH!h z+`RW?Z#7&=ai2pnR;yoXy-m&7_+kq%^YS2@+oj2ZJ@g@U1PSe&OC#;438l^%?T*Mzz`R;Of&cpz7d}d1An&(CJYocCz2SD60s{ z)G&HJg;+(&8?xXymAW@ZwK=F#MNtere=8*b#w&d}zxW zi+51JA>+Xx1q6RU1-tn9jpFZl`H?n)=)MvSc>0NfwxffzNpF$K$6~ks8o)^ zD|-4A(6m`$bL#mgTW4p+Q$_4*f;}ow!7U)*ln=Ml>yg*IgSttm=x+wl%H9TTo|^ra*WB8`qyuUpH>YpIb1^~Wx% zl2OmTgG}SLW4ayQDyqfY4UdEj33^jlMiZFEi@&@$Yad%Y7uXrE1A$Mrh)M#g0t*Y z`QU58wV`(|YYinyJc@-J zN<+7*t($ZF`hq@}sWg6|-b^p*ZC*LD{261cR~8fj2_cV$c}$ca zQvwWbEXx6pmxZ3vi#U_Xno0}=$S(&U`b!R;IVd?0 zSP9CW!NAgB9zKDt1fQU=`7c@nffpmeTxlnDz_?&FpzrMdfrIMv%cvB%0gMCNgn$J* zYaJ*>_@C!iz_)uaf*Q6Oe1HvVz0iL6pu#ZE<#Ffb z?GdYCpqY`SfLd(QHOgA>TPIR>$}PbZP0M34e5(P;F#q;@xskMc5BOG+^h~y{O?`W- z5O00yF_r%u;WMBK*$I(kYmTeFcj6Pa??wl)Eqzhct#bvku|A&3pS*AX5h{@If1jW> zyDc&(RCY&#m|E|5Abx-G{e#nAi<{n`A?3b>s)wAs>}oR5V!0A)j#-c$xzYv-m6kUu zwf?^g73}^$%83bsyF-#qwC!{w+%w-PKT`1L2xbFuLfm0>_aEj0H|HdtuJ?R%_QcQT zq135HxKoup*Ry?N!pK2l5IZr$X4WN*DOltQBo8)OS~G%=tryl$21v7L}x( zYQxFg=Vy<}E8FMAuWGa8_gu%RR_fjK)-^6aWjO~);?abxUwPKXO$GSVX56P9b@W{P zPVn4}bjkgF70X$JtBbrGZflj~^PIhZ#;N|DRj?LnyVK@^LgjH3Tv6PHeh1h1hM>Z% zHk@eg>yu~frDiC5mx3B4SUt@f(9}lku_p=tGESw9=#Anw49XP>zqP+zgH3f1L*Dbk znpav#D*5>E`>9wz6fLjgV4PArwKhHAjat!ql>&q#YgVefcB7W9PTH68C#%T=Zxgn@ z5_z>*czzYFRpyx73UDVrnvr8HiJQ9?&xK}a&Y#v%ABO4Ds2v-+P!gB?y1QuX(0ErcQOO@$8v^prZ#`w{$s-R$f5kM zTclWA&nmWioL)lihiC%M%Cb7yH7?VFNlT$Lz-9vn-Nomun`e@;sdh>Ts~>X$q2$UX zW|}*$Jluj`QT}Mv;im)Cezy|`wd%o<__EA7^z4Q4)sHc6-=!I-g3z-Taj%3SilB*r zt~7*GENNNwEyW#Td{C=&ypb()|BF@ufHzV92B#sl^%D&_K6)H!-|}%t?aTM!i!KA# zjl6gorvRKW_hwrs+|#)USgt}M@`2AE&{1pxS6g-IdcM88Xnbp5t2$QYQX1FmQwpHz zk(D3QPA)Am>uejXmvxk@`N*jAe>x`3ue2JCQ=fXB{cByRXf_O1lcJ&3Rx#lePWbNzZVK5UdC? z0%H$Jeu>0))r=4FQV(beYtPr|W^S^TlZ0iSh5$dqh#e-TE ze*Y120!r&|BBf_n1Sgm2^)N<3-`?#5hFUrGhFO4l11arqcp z1puiJSufZWhsdp>JjKLas(f<|QDsEHy2py1+f!n4w*!r9g3n`3mqzVH1DK=lZOb6|z9VSqZ z$?&i`d)@tf>6GURY3|omUMxKGk7>FlIwQ;?hufm@NSGKPLna@UZA`~u#-KEMnT~uT z{jr?LOKzO^d{W*?gv3|K|7g|!BK+49sN-&n3~E)zQ6NT2{;lV!(~&GSO!_GzcGVg;edFG7S2kfiJfHeQ;_4YK0sr^cAQlrIE5Ts{ydbaeaVl+$ zxLJW3ek|`MXVdlMd|8@NWU0#+GIGmttZ+!H9zQuIuhI&%)#8M&)U3}yw50oiPxKe> zZhqu45$vFRbB5=Hm;(^eKo=JwtXH32a98X!%hR4!a-LH}vt0|B?OrbJQ?LHK8|trc z{!Je0wA1E-TJ_{exXRA_&IN?xrAMoM+xWIUwHX}wk8 zIMpq0B-ziPN}aVPzFNlke#vF^MISki!ghZSTBWBX8zG^+Q_f zo~F*p%omGK-BXDG!1D0N&>Z@S{_R!5-yGKe!$nYMII%FHdUhmUQTTxS{sJb*c_xyL zqAsNkk2}s6VF`Bun!v*j*Cz$498mLVkqG?qU$||Xy;=kC| zD9xqskUuY;FTuJmRDAC{*w8pwvIjpd#_GN(s&`$S>xqxQtNPEwG19hDPy}0DN`-NHCUkED z_;2$}G|o1D%%lU5XA3JVXyyZogObR6ldYWc{gj2s%?P?ad>Yj1!*6;1s z&n$fVV3vBf?AHYf^q`0Ry88Ye{B>2R>rUqm3RTCEU=IGR`>EW<-n_VxxrW!CdBq|! zwpM;%mO7mz$Fl81hET9btkxzdRC?Zs!NM5O*E#B%g`3Khh_kuU&Jsy~SakOnM zEooNSkP#^zcmufOjG_o@I^5k-xagcFSp9OSnq*OF;xj&qkgQAEu$}0>Pz`uh4-VH$ zf9s(8e8w^|KicV*&B;3?hz=MhND(!u5_0RKG(0cR8rr zZi@^GRo{^y)3bt; zp#S?gRT98kPh{Vm^DNT9|76Zbei3tI6T2_V*&{@~xIf5cTJ0494^IX9TAn<&o0soC zR(ak3@$_4)xeIk4tZ7q`Kb%!BT?cuEk5dIoK)JGeH?*?B{$ zPZaCU)X3;sQPuk{x~_EBi@Q=|He9B`iN5xiaVi5uZ$yq6P_AB|iVvUPmUY*Qyb$V=C!tN;kNI+`_d=*YJadvWjb5D zM(M3Hq7+*CG27XVOWb0H-!F#+2Yf}nAY`e2UFqrjCx}UbHQLpq9xcG(Ut}Npx&F~A z1gHm`TbNeO9*No$1b`Kd5<>YH_vBfWE+$$_U8|ERRUh1i*GgmYrHiv|M>=7by|_&T z54lWTrpFx3Zap{p_Ql`oJmqRAi9TxiJI$`=%50!koj{Fr=@6kPCGu&D3u&6g=;X`A zJgXlC)2bHm?Gogo=eXPy-lb7mnXZ*d)yXfNv>SrGZnQ$=Le^5VK2O#$;d*kl6nN+T zX2Ra(QOsbYxxIomrUI3MssVyTmUmi+n?0}~%_lNP)TJ>uK_;)$z-Q7+c)T-qp@Z~kNaOlYk!^@Vi_`G(vz26#Sb+IOB#h9UQ zQI>W5g=~O!8`f~h^`%wcRc}MDsjvSYN@QS z&Daj*E}Gn1fOWiM^RoOP(Z&^z?VE^;wlkbDR%}#@}MUnNk{^zoz0W;_HZi>zFK~AY5nd_FHYeY{7GuZ#IdB^vjS0 zPVCoN{X6xcN(!mYfaizgnU-}E*{pV|57s#-oL2-SLcY|zBVR=^(N$xzdIbz^Ae$IG zplV%;&Hy-s%N%_MBA+Mr=g%=EU}#WZ6+D!Bw;C1>K|n=-Wg)vI zum#i;y3@nMtO^z3C@i-j{|?Jcm&64g(`8qsO?Q1?r0GoaffScrX3MwrtCPuXH=|Zx zbz5~Y@@hjl#voM^;1LPsGH%2})>6Z`KaXWGtVQ#zI*6@AmX`N&^x29UF@RU|h2h6Z zUCh8Z+L+e{h)y;wAp-AZrubKZ5CKpjAYbHSHWtq%>d4AIrLkukCxggk|2HDRkps|Lnig$4hvQ~;oV!u_Vi z40BROIe_#%^Noy!eP;SEC~@#xO6f=`kv4+aeX4? zTDC}~zbMd4G{^sXD2SBQzS#m7TO^zHDoQGAZ8&h%6B~0I`(h$WJ_MCps*~c5THdlPt5S7tlJ`af(o%ZbZ}T0QP$CyR8{pSVZn;QYMFAYm1V9 zTN^Q5#d`COSqYGVlDqFxoSQ3O+|*7gSI`FXaj7@d^+4aqoKbY$YJPINfGRLJ?3loA zvEp&@#ge<-rs?+2NK{EZ8+6M%s(jun3NgE?=o3Z9p`X{G0e@Tyc8TB(2NvpuN_7-m z11JG=hHnXo&o4B~j#JCjrPUY?Wje{RVN=i5z6_uXsQ-5}@ zkSn}#X4&J>r?s`=Z0=fL{gP$Sr7%$=b;gEeB_Om(IZ}Qip&PAVD^$tWU@p}PcHY7K zJ|abDseK=w4QwT+YO}X1vh!}^n<6K|Lg+%=PK!P!T=xmk2$8%l4zh`(lM$S=T)f}M zoAbf)jI%eA!xmkHj0!+Zsbki9(aHLzHf!bOADClG>w+tbwH?Hn7;=QYOy6DqxYW;9 z@1IgoPoLe+8zxhz%tztNk?hd37si)r885W`YDTJ|Lx&ZHXo6-68ec*SCob7+wPSq+ zlH)okQ>I>yJ=S?f%9Qe8U#dY7(h=61v}}fP6I$0iJ9=b zv#}+*2kl(}grR6o&paN=zGnK9KCe6dY$!VSWvV5+9j!e5?Uy`6;&gOK8{O+RLR>RW z#b=+EW2tkNFTFe_%vALVm?dscw(H4lXnri^dDIR|p|ILUNTMj%>IBgtdHA1k1OGSE`i4BI>;hJIv1bi#z_6TQ@$ zldeqicKTN8UP}o#70BNmZ@3H{5Sd?NuzqGiu{jX8FHt`uU@0&NUIUjL$S-y93MgzF zusS3#5H|hX3N@I8&2~ny;H$$55cd*bEvyQxMLAdomLHt1z%mE+5|)E{`tEe?piFTc zg=J&N@32hc;d^R^^Q#Eu%U1}NQ=FqVXQC)Rmvog-o4hW5pRY{_%G4b%<15FKsra$6 z;tj*IeQwJU*C!lQ{FFN)c_JyUPI+pe1q=##c>=uD-s(SW$1C^}5-hkz6_EkDMD5B2 z;%QgjQ+2dIFyL?ch0B!et!o;g9Yj>ld{y!1aopeE&WT_4M0qk3pFdJPN~_mw$)hQ)Tw}@eC{&m3W0hpH}l> z@1J@<6U^a4ZY~%!E}ebp4k%M*UYf#($o>S`WQ`N&wlx?pmH6f+3+a+WouVo;{?+QljA zf+nS?IugHA6J9&z=p$$o#nz5XaDOzRIb4tan7Hb^&Tvllk?^i`JMs16Bcuv`mayhb zC*Q9DcyzePwA~K?28yUyC`ZS1fXq$WZc+qUx!Tl<32~}-siI#$5}g~F`ZGTD_lO=W zg?jq$w78&5i5-Pj`LW-jb&6-9sKMx*!v$8pjJin+-AiL#XDCNT*y)YVNKAzk=&f3n zTOh68@yD_mPS6RsY>wz9{~i=8>2*d-LrP1&^3U;I~noEJSTBcaAyB zWP&Ju+Sfxc3E8yf{A2a}?iT}!vJqjcfWQsE6uyz@N5WsTTi2m2SVPrY#jQB%Kh8|a#{X_NY2~Mfz!i=qn;j{uuEQde0JZGd-@FyJ#v|ubzc6_ zB));?Y98X_+|GNg(y9tWsK4(dYNR~Rx$ua}P%DD(hP3iu>D#MTT|XvVTe=8jL(~~Lu5IyeIyjO?82e`%Y%@z9 zb}zs6>O?Ta14z?{n{v9QuhjWn!=3+RbmE-0jEXA>bW1qVd6snOVe}uRI{Zw5dIs)x z;h<7!9EC3-)?s33SJ4D$|HpL6iZ056U@gF+zu8{`%Ts zGT-*dPwV=RLx0Rh*D|tP7N^JDSE{imJd2}hgyZ7|Z_(y%2=lf3ypZ#xzWd|jyE*{msi?7nk~sX=qN_r?&cLR<@@AGZM|7!B)|FQIkGe4 z*N3S8su~9|fO`xCrXL3jVQDDbVW|I7x|anN2UdrI^fTb#4+0^uxSut9Lqav%d#DoZc!;A1Ei0K5neQ+6rv`rw{;ior|# z#8rDjLQ;%eJ84%lQr*6X9uw;1TSO+`H1^7n|3i z+nSTN^KJP0pH~>hz04Pjna=`Zs_hm=I_U;O>B5nMEl0ymq94f%=3 zf0PPN>c7sQp255AF{o4)M`4)bw{G@-^W|lq6XjfLiQ|;b5~*aCpCxPdg;Pnl&!N5= z|NJa=5L7BFFOK2El!_KjoMSlN(*UEe zRv+cpZ4bUktH%qwdTInU4MmY;ob^Qx0dpJ=ASh{odlzrvBkw=_BW8 z3+joinL7I zF867fqACXt2a+jc%BWL|v|pPHVot8pyPH}!@Hzb%pZZr);gwL&dpj*Is8mizp_S#g zexXXu84^=PMNW=>`Wss;zh9CF$QuhF7f?9A?f<5SjNEa;#{CCop`ey_(A zJT|5Qke}yhy0^~DxIaFX1W1kApD09C$iB;7%E*2JFRDkWvDuEbl}}DD`X*X=ZUmr4 zaQ$`Qr}xTk?E$1%KMY48GBNhjaz!oDMU5SSL^huLGWBEf1yU5%WXXw;3W6qT0m;V# zoC4Rp=tQiGlRQ?kb$yNr?6Od;w@*UD6m4cJ#%)}YO|3U-hEe)yzNrxcviSl&-w*Af z4h{GtQ?N^f5IC?fnesXcu5^cTobp$*va?dLL`0D7rzz>4i5#`WOI&tZMWv9h$+EE$rx zYb)n|kY7y@NctmFztb9q?sndwOx-&QUuR4YJ$qq%*HW&LJR`XRm<6>?+Do>ERGb-n>s>et+~b-U;AKG4$_tOLa2!|M zB5+r#|31K{_&W64qdVJTc)!z9{_0q8ullQ^90);z!uX+vYuAC&y=P3oieMXnARb^0 zo<3Lr)%VoJv`&U;p93vHeOUj?WvW(_K7OK%g67=^d(THY`w^#$C9ZL zMPiYT0&7Za0kZ*!#p*_Sj6qm?({;jV1s{Fx&shb4>e5wbiPjLYn=uUfXFOMEi2SgK zX#;4GPC}`DOl8iife6E8YVdLeo${No^BxY&&a;7J4rBe#xYC3%tyXUrL^$CrI2{w= zvIzbMbij{kM0`|uE8IkF$(*?zs2{Nu}KC6Y_K4k_lKD+5NZT~G_k-F;6XNtj#(^oVh zUMWJ|z)GF4=PaNSk|mI&rd;NiE4D#THK$TatR?vJ`dgIu%DI_rI5>s-N)?Ewup!y) zLwv_36^o@*U|T&#CgEvcqEajR{o>I<dvh9^$4fbo;g3D^se(T#I+iYe3g< zP}bn7L<=a_?L$AcLj!*2LSg>^|KChNJs-fag(+3>QJ^KT2EyC9u)bc$Mw$*)x~OMK zp-5cFfM(b{;n*Ng>oEPgH3N3ZJB}MvLi=>#H9Hzq#Fyq$l)^1vvVOdB$l#5eGX8ey zup3MI`>;_XAKtu8XixIg>K$jVJtsn-`Fl4!%sr*zSd&jx@H3y<#5vlqtK zniny;uOCH)m2yHf62>sXWXkM$T{YFw(`xq%twlmVgX2>UUU>BXO{qe42+$uF5YT^4O1Q^cXdfb^(fR?9Jr}qUjygoLE@LxYSUicphLzngAy!IFxh?HI zd#QnRU#TFLHCn7`gA$vQ+Xz>>VlVi=sJo|{w&6IN-es$1mO67xnCtn+sj+4RhRJF0 zRf$f%v&FyX>1Q%-fTljrho0-s&H_Aj>yIVUc+<#2V1qP{yp9y`W9H5oUDs!d%C7k0 zSLi)}Gtwhl^(cNnIv#THtqEd`az(F=#>SV~n=_SAD@>K?G>85>R%<8pN_Q2+LzPUZ+k z{JZf^pCU1Uty#hTBW1SuYEq;d=exRh?vE`Dq!7?Qntuu)K#p~&-dC!Nt+6b19V^p_ zli%4E2@s$cHL+{5Gb8fAL z4Qq{b`aA?Er3O~?zY!_Sx)C-~Yfpr|spa@w`l$TzqtKJntMA{{{86esR{wbk^?bP7 z9)n8NeH4bVw10=;GP}zLZ!5YPtn!MSEb-3o4b97naUp zO2vyG*}u^DG`KmSSF6E#*tTW(HntLp^dkoba~*V)i0go^>xSDMQm9d~@zVNj z8VI6ytCt&kIYGw8=IMu@y){yxdEdbCDQ7RtUQnZa{P~my0rurggA68@^Mr^`4NULu zk52(W4i{3(Gi5F=wgkDd@6}%;OcrDwEnsv)To{d)xGx8t0W^N3C^UB`oxak2=W3P> z))h-9E_K14QrRDb_^G_kiDCOPMQCV{V@tfCeU0?}njTRP-yPrCllfopYUzY6u1I*Q ziX9VJ4hGj&jx_p}mnik;UTU6d?%5J+K{LA>sOzXfZICSe=UEhc(EXPs)bkM>SeQ&r z9tGDE&H(M}PEuRM`E2qYZFOr(+h=T7^f|ew!Zvx63r$a!Q!a#Vq40{?w)*KEz^iTN#Mb&d$>iun2GNr-vYOU)#n;(ACz`) zDv$V989k+7q^u949JDLo&lL$~YJamYQ)S6)$xmL+k5k>PGL2dw>@8f zLN>0JLD?~37DTP?m_Ip`WMs;Zg+O%Xk&alR=98c&VVA$0IN^^UWdl(Bn4GnxtbCf6 zVNWScLFb`my)7{6R6i@-F&WX9^A;7bT~9ty9=x$Ko=L&;Mx~$G43jb1585_?8iu0V zbF1hHT&5uZV>rJc0Q%Cd0qrh+{s3>+USJxvTZIIZqP?hJwgN`{sych71BQXZ1^G+- z!7TJ=#%m{epk$}u;9muI8H%*?_wXP)^}F-^9CX*;U@vUi>Dob=T0aWQ=vRM-b;9o9MI z#5*z$R8W}m@I{q65lmHoK*rs9#xph_65OI=mBT(L5J*BgH4$A|&9Jyo3{nNK>XYy* ziM8oIiQ7A6Wu3|B#I6%3EJead7^hGF5ZBst2vZ1AZ_bG@d4CY7r>$a>7k=cIjVp*H zsiz{K@r917lZg0)5^i4LH=Vwvw9H>pB`7*MiMD)|E{BG~FXDEWOI>mwLoQ&T(@Pzy z6>X`yI@7~A@gNbNQvZ&8k;x-YO!>HVOq#?~nEH!+|1kpsC+z!U2uz_M9Y@0`e>6)a z&qk67&$6vOZkf+^G|OGD`-tP8drO59k?@J1Po<>KL1D_%7scQxn3|1wNVD)mG01jV zGkH!;A%C%tX(Ty{%cP>OUwo~a%Lm}-i%RC}Q_!YOcGphGDS53)XEf9NMr-A_p5w}ZBp7N{qq%8^!r6PP7k5Z zt8lKOlx{S}TaLTRel~qE@1*H=9hNhJ=^bk{!-;b}CL!m!Fg{)+d~C-f|7D83Te7~H zLDp&9^fW&8j|%-uAqf0v&*MU13I*plYDJFx6OZd6jr5!gi99_c{4En@e8cSy_{)yx z)a^8HB{}YVyrt{$xA>G7yf1PT6s$sdYfFc=mR0y`5U>+Jf4L@Oy1EQ|e`*s|i=Z0i zaW111@(X1@a8Yd}BBy2W?MdD*eoN^!ek5bMIk

BUlu*YmpL9FoEf$3g&@_?xj1~>XJ8%0Z+ETB z{XEnsO*?9XdhuIy%Ie{|wSCZ`NOcA|+E%QPl5xPO-HuLSer+j>$CPb1_yDQyN3oWr zwB!u!G)DN&wjnY0n2YT&QNU|-4U_WGpuITh^ZJ4_*WJiG=PxsxG}>>gmP+$zvXGYA z5Nm8bLtE8mo@WH^UF{$sL#etjPtWB+W+yCNi4G?t@kuX6m#*vXqG7GlDN-GMeSpA^ z_lIx@QlZcuOJCAQUcC@~b*=I9@)SuCTCS}YlB8Aw_vAt(g->A&v|NW)qu9{aRi-sTR>)ixCiE4~7DI6WaZ$yZC;LfLBicomy<4c{du? z;riAAjk5Tg!%ypP-@=yV{phtEig}1sq=BNJ&Ze$WG-(rJ%dFxkzU)L>>Jd_qq-Np(hJEAw(! zc6B3{*o-onIeQx4AgV?QhGG@Dd z9%p{>lpyGN2SNVZPsC6zfrmb!2iwDT|FMCEe?SnL0l}(ZSy+$1EC>aA&JWT+9M))W zb7&tLzf?j@aoAz@!b`u@fPS&X;Sj`s$^NA+tT*VdANfn$UrZ1WjsAQcMRakPwB z_a|D0`$#)G;c^jELh^-5cOmEdMXuf~_O>>wRF+p3{-HXoP^9wlMXWyoQY8c26n!7{ zztxW|*yL5EhV^!8dnwQc)y$3J*sin{GxZj{ugpZhJP zCM5^MeJy)W$dd&7Yx^2*zs?)=90#-%7%*k zeHWOoBRsz+a(9DY9O;C5jdH1$kz!bQAZ7CfGl9d!U{_WuS;SH# zL+_#JL)BubP*~r z*cV|O3Q|o8ZayhHx>$X)W$fgu%d*ptA}tRiRLOw9i$>uy?lT%`bK+-ISq7e{W?SAM zGpA&v=t#EA=}wIVyu!swR#4ak1{lv?%IaY@ewtVgJQ>F;c4UKJZW4YuiT|fe5I6;GP94h~XXH{~^>JulfNo9Osd_zJjo0)kT=&>8-sFFfhJtCZa-HDAg~1G2vTEhxs$cnvPv_g!=ea7m+p#iEG;weEDPGpo>s}eSHzQ|2Li@^$1?q z_>eBj8?T*CkoM#2c`e=LdgM)nlB8*ZXAd9C0$5LiTcZZU-(Wu)Hpj|}V}b)S>I8JM zG-*B|{BSwU3%0OC6Li8UV-;SCI$ZxPJ20zBm^$J$Ik{)C39+U)SBMfGJiUgmbV4@uC(rJ@W}m+gyqYk-?96_>?zb$LSs9) z(oh=t8RdcE2$ZMbs3AWf$j?s}g5S%-3ZWIy1fv&(Uk|WX4&6eErGMuB(W2fj1^q`Y zB))V|hy?qMAV>R?bkOP7wtoqci67KZgTQHfgFBR`438sbc*wK!^HfA+otl}uell}W zjT^T5@{?roHu;V4rkv&P%*tpWVxHk!lGST|gcF$bZPlesU`&8l7;SqS{MjT2Q$-VG zWCdM>3hd_#|LH_PWorw**LZ2!H6&eGOs))w6O0TOlmkV!(zr=|el& zSW0d>eb@cv*p+}+?5dghuT^(!N)_^?+4K&93N9|OH|#4@j#CEdi1AH+@>wy5XI?~g zHWmaU;|?r!C!HW@`UYphg8SkJ+HY+958%@(6Na9?~$QEWqB+S7ygNegWB=o z{W|UZD+X>TxgAa(w_JSQ^741Axq1E`%-`?WisW8@Wn7=p2!@k zSJ)y{?G5+IdPa}ryv1mDpjx>`a6+G}lsH_@hMn^<0X~ka znJ@L3q{=ltxGN!T-|6CgM4$0l0*b-!LvA>^VHGa>*ha0ds}+qEN=r|C`urJ{Oxw`*}T#K*iYP3`a7bT5HG2E5+bT{S=^WAAF2O=#c zXM?3BS^u^O71$r%7v2L3S5~wOqWp~JY%^7UEn20d4Io5|%doeog68^f7J~1Hos){A zzV<0eUl`;a&5Ju&onQUxgMBqZJRqX=!sjkaRK^GU*3FgZAGo*T1&k%h?Fk>k^P|N0 zye&M$s$1QS0_!fGRIhK=_e)NGa&^EX;B*pR7u3D$^|FI2bN7V0MKpb!CuWi0YdTMV zOSYyhZ_BeTjkkv*q7ke`1(dMV9Qm~!+29nbAnyib!gLG4D$ir7RuO>H7}IU(+>&Tq z3a}VdM-ELw@4M6JVIbK<*Lrv7f0pz$$0#6swjys&tgr*z5y2LtgW; z+#jrnw*yeqvd}*R9$!uXr}Xsi%33_dsy}gxS^I-H6srP`r7zkeuU?40 zlH$>4K1r{tM>BpFYQdSW;OZP*R60XB$nB2e?Z=0g48y7bUwFFz4Xa9WwnQ0R$K>Z$ zi0{K^-f8Dm>+J*AVS&Bu6fwpoq1?l_uzxfQCj|n2Lk^kR^jzJ$;@s zNpGxkI{P9gu(8pqPn4f&gR(J0rcU{$Q<+9%;C<`L~V*~16 zyNE+F&|ep6f2MU1v;dmKAwMASlf5Axid7NE5HtA4pJ=&(O0b4l8ExRD>XOmB*YfqF zHtbfu%Pz~1c}$GER7CQiQ7Z5~@a_qq3IK}Vw>y5(p=t$3gcr;y*G zjmnY>kUtBsZYmPJMN|snsq+7xYLzX8;bhoQ_Hy3QmCqVo4(S{0CN&rOhXFThWrW_* z#DL<|_a$E_e5XE)Hc5T-`IhNbz)DX>`>g4N3QksYk=~Jhb%iza%pCjqfv-vCjXWL8 za{iO}p+;iPh%@UFvBCHS>ZE4t$4LOolho8n16U+ej^(Mya4W{qG3` z&ffROP^d~cj)W)vM8dUpKM1IzT}5gx*@P-Rz7Qq%#iYfOKnii`jsIuSrOUUW&;5Y| z!IMWJ)eFCRT->Iavv)1DjfxtR*Ksp;QCrtD-^XG{H2m18=K*vVF*9Cno97H)RBHi^ zGdacg<6?u>i~Fg`I{#GS0N5~hpAM5 zp(Xy>3Dqj|V#(br`WZDXr&JRw5*pQ;dAp~?sg<|wY3qGtH1I+I1#QEgKzkYP-A~P1 zV;fQh!p%JA32ND9NI|k3o~l>ZPGeO6U8&Go5IASg-$Icp{WxNM{wHF!27lb#HQFWi!G|K^=NGftZ;%?;Y7s?Em zgno%oncmtVZdb+mR8=Nm;q@pq_FC%l$Zi*J#)}Hf1mq0%WWd3m$WZYPBFA`jh3jtl zjPa+EhZbrxMw|G8A7|HITJl3tN_P`M5SVve2t{7Q2p^V6&&egqCRY!q(PVi}Li>Z& z$xsDH{mKdT`Yq*A+;wBwd#*gDZ&s|~%<@xfJaWs%$mO+5q+_n9Qylqy9ogU%r679E zh3OT7QU%9RYaT%7Nbjb8xm^rdARM0JDk81OxkvhT=BBt+SGO{rPKUcd?sTvC&7)_q zykODsiq^%t)I#QPhqYJt>~^|EcFso?_J&VTlEO!hM)Lx#fAt-c{(3hjpq)oPQ2j$E z5`?5+%lSTA5w>Sklrcm6GJ^BHBWrNG1E>OFu7e#6V{{H<< z>`lz)rzG_!zA$fpzSBAa9SP&JIUjy_y#xb# zo0v}Hm)cO#tN2Yuc6MBmpFE;;POl$)=&c=+#rez|>L+0wHS7-`u4@{SOwh@$ATwE3 zT1YI|IWy}-C;R?!-GtZ7>h5}IN$^v z1)0tF&LSH347;f!@VY!G#TNNT0Xh5UpxiCyiI@u2{`Xn{AAyl5y*5YnYes+#OZl_U z;7AI1nqPUPwCKv^wl}p?Pz7g0?@a4Z&ecjkd@9 zCA*u9!@;WU9p@RTE@iCxbOffAJFje5Y?y=ZJO6iz0swjmsT&$t(Z%(`(~o<~KHnVg z<`au1TDXm~F?4aBO}PWOFuI_gba(v{seaT~`dd2@j|bBr7e&2vZP>jnOFQC6P^}hk zA_xJ|LUdf5Kl+(1lw^KP8Km*Fv1F3y({@O=> zg_`rbNJm=0j@qp9?fFV3xC87~ZzTgfJ?xTVgr&U{7Mq!lhv zhr7N(d*gQ1Erbp0gSDr?q41Vap3<{*m(wT=^O4(*et?ASFX{pR?lJ7usr#l(*Zhb# zlr0_$5*WqT{y>(deI6$LqPjaKi0WRNJuSfWUeD3ad+z#+@Kw3U3f2KZxAC*Ik>M%? zOE?4dzLjK9&FX9-2mpP+_E{y&N)H|zk5YAZdL6Q+jPi!WPsC*;nOw;mQddu?S+Zs) zv~u=$S*o2;p(F7cvaHHV4Hc-U(j3NRC4#wC=Z^f+j%;vBQV^0Vf@u~)Qj^C}tUdsb z%8>k&Zem?D5BZD6^&MJ4Ia|w{Z#R7i{W7@g{gE{wce+iJa}n;^pLg)TZeC-GwU=$> z9cE{ubVeVi>jCJlF1SKLiU2wK+oosWbM&_>u8;JuhNi9eSv(I!fq+yCbm)GE%79ob zGLPT)BCI?1bJ+^#`nv_;kVu*_;w4KKCv>|LO3U@KcLP-eq9dcjleU|ja}g}7xp$Q> zFI#GxmGRolkF5cYmew1T?;i9gK2d!6Qny_-&Cc`k&HU6)tz5=ed07WIPeJOhuJ506 zAaL>i&TUTTGzr&x2}?tQn1I>~zIz#ebiF*dK>)ih1W@NHHEwK0LeOyVhF@qS}1Y%bQ|UTe1l=4Tgc4||e(i1u>~tZf9cp@6TqID?R9 zi`)Y)vDDaRPz#r4#FFOSG=!o)K@rOzEQJ{2uo#cTU(n`1!b)g=0T*g$e@W;D_Fy3N z!*JoyU~gFMQxr9{4wC*FuG%a6%a-~u2RgK1pZ>D~^3gvt(9haSe>*e?T(UQ?Low>x zF@!AlCqmBJQL1@fYs%%`lFwxH-5T%XjmI(A)AbU5kAm;o(mf2=@Yrwzhs&P;q5wej zOajnu+gi5ye!J_DJr?$igevv;g8d5aoOsNYo! z+;vg3=FXfn!e*hC2wIxUB#rDcSh@Id*U~g@TJ8Wj-((WpuI;?bR5T6s<~Iy?2}rgz;?TgCG;P;GLGt>2gw;gy7UiCz&Ru0dXB z76MVIXvdH$|DQ-T@Di?aztN6Tg{*NkQW7~L(=5V{3ZL|yBr9TGmnu2M4XceC;79=| zI5Ec1*tcv?V83%;GgyETQ-|kn|!&QMa1rylbW=LCsK} zkdK^T7C_1k@1g(77WwfUKC?*G9P1SY9>S~|WKXm)6NfF=CpJ*E!tkj)5H-yc@s-dS z@<^mtuW#EFu~MQEnGC zrGCDQchfjWeqBd4`1_r~!72PdvYVvB2qrp{5(7q zp9qnogE);#qg8?Tc1sw3H%wX=%y;hGIKU^Z@*Mom?fL*mpJ_Ma`;|IIb(HdB+oP?z zwK2&`g4rx5l$K5=H(Qd2|Jkc3SB) z+@dWwV@zZ4L*X2Hz)M8eRgD(2>lF7mMp%ZDYGL>k#s0tGQ!+q0gUm;yV1LtyyH|bT zJn4MXFq7Kod0(+xF^Z4tw8c9C&Lvm;cpMa&_VTuo2&VP(MyfrZXP>^~DXqaBCiH(! z19g%xd@35=8n`*+bwwvywl|X=4j)GPSoe z$La%Aoj)Ep-=F$q+_NEE7xLvgR&kHkGd)E*!5LCAUy@DsJtqnJhx0AISN1p7fP)u5 zfGNm)6x2Wa?a|3$!Cp6L_OR%0HYB^3AWf;^#Nnv-`ROO-95jIJsejsgNB(6U;Jqqn z@4vDe66BzegaijUBFuv3{+=L51A!~|hI9x%p)wpp$Vaa(4-xXxy^OPE73_9ykq^VF z@q2u$47&z7cH8Dt#=h#^;SV&1;nM@A6W~)az)sP5S9vSj{c5Cp>2R{2Gw2%!hjZ@L zvI?$_h0K{L96&_O^os8|VTllyrn)J4Tl&k(UH;Z4xAh`xMjEiNi6C>6p35~8xHsjld|#W3uy{M3WC^LX z12l@qgiW5)`$H56;N@fTf%`#$oSTq?$CquefJ(lZ^c;Qwp7sHVpvFe`6rcVs*#FFf zz*YO67=llzT*nabAFWTZJ{{s!xcYqnr|F9|t9OKjCYWK34m~v&8{6et5DUr>44%$y_%K*mRtjtKd-7`Gb{NVgA zpAnkQ1!%&RZ#zUUsFG6D848}pp#D>ve=h-ntM_~@6rTi-AyukBxoL3WC6<2jZH)fv z98D;p zHJg+dko%4AoNR`9?X5Lhb6MZnas4H#PyF}WzoNp;*ajZ?r5)Me6rv#a2Q@IwLLf@| z7>XqavJ$*Q0xGv|db^C2W8BfkAU(%}gsLwr!C@4E<9L+@jARaT1&FU2&TY`|qbCgz_cHaYt>J-9_ zUFkLpBzJKlD`-aYH>pW|dv_6Mvp?gis!7hB$rDOzmtY}j!RUuCL7dKtAlNMEx`Kh@ zxktVaMHXIhQi;?j1H0v1chD~=!IuzW#Nb`%VpML}>h2wU(va7fm((iu&VwjK%YKMQexbZ;W;`LVy<1GJSr;!F z22ru3{|iI`0OJkc5fvlrB9Q*I2%5I<7)Bvql z^PTG=rm1GkGow%DeK<7=mbphmt{;+n> z`b#b7uo7<1q{cn?m_4F82w3gsV9k+!9`c~^&%^xXUkn1*?G5Zuh|)QRkdJ;&0wQFA zHfwVeSC-~4b>%sE90Xi$RkuqL%NMPRaLGo8?ta0BK~x;k2_Omp?jg%ZC?-XaGpqDA zD%k8sZIvNKPGV4P*J+a57}l_t0%|I&TyWWn_5~}cFV0<3d@;OJy39LZZb$9RqV>9- z6E>+7>_I*vg#a+n7m^@yxv<2|UZI2ahk{_kOoSS1=}3Hk3wh`X^}B`hu_OVGIj#}s z%#AEBnlTl?hrP;Bl$YgO?RLc@8+>45Y_Xj-c|*OC#p?lDv-)SmEb(ve45%+xUCq8) z{54+Y6rz3=?LXTfaQ(g~hC-C-F$7HbC!W|L)#1v73qb&X3i?{4tf6K@FL?jF}&lvFF_wLu0vk|X0k zzM+A?`X*SlMI}W(ge0_Vb}TKQ3P7yHe$*of^|i1V6q#0*h`48hAuStq9AP3STdh2X zos{S3f0F63GdY-mvImz=uG)lg2Q6?Khy(=d zQ7hApm<#1tiRa$ySTB&tSeh!~E_dY;B`|HooyMU4S)ad_g1}Grd@U5BY>pw-KU$=h zC?v&*e(A|G(N9_wZj5n>_-DsHQco4E5WlhVe3g%b2#Z0*!`9`M^GA~xu0JR++-jU5$R3kpiK4)Cf92W4M|r}23l3#kcINs*-nRx6q&dJ zoZAA=tt|>Y1UGFd;jK;EqqYsbZ$5r%M%wZAqP_!H${{|X_G*(;oT*vsGT=-J3kbcn z&6083`(3?Wp~-jfEf>Ru6RLHvcl#c>pltLgPC~MbFwX5O;rv@)WZv+;^z7G|<|sux z^2$E4!6`mL@Tmc&RtP>hA499Szz-~)tpGWJYG$|6@JMaci&gV(Xm`k3o~D|CZMs$tJ0A*6-UKc6ESKO13^aaswiMfxh2qmW z^M*uf65Z7D$ukG|G)jb4Fiv@IN$s<3EZ>HkEUSs=mHN>R;+)G79-8P^W=<$AC6d%` zY!h=sB~S6`SC{wCHVE9fKWIbo38II8ZuYY@@W4DeybL+oxtWW1izL#~tb03h zqx=T)MenyfOEF!;6j@dGlm0lN0mG++Gye-dB?EL?d?KOuvziPwyzPA)8FR5g{5M|Z ze%cr|R3a}i4`2iE((WZp>6L0NXa+Ixv-)I)80Z=3<9pn`>Z~U617jiJ5TDSf-4GLZ z>(_$~T5pe)dBwH{PA*`gb0#52PNGKybjqAi$wLlirEY3$s=Bop_U6@HVp%A0Gw`fJ z23M0_1%}pX!vTGYSFF3_+XSvYLfgE-K6DXRcio|1NBz~6R@W&LRQhtDsg*ka(fIuZ zH3gq6{DHfVn>RN#MeoCX{b3_ZeuLQKxItu>pGhjoMuzoO6o$Qu zfZ@|4oD<+vGQdNpNvRu4Z`r{hEMZfIU?9stU)B2Z9+8uMoDGj&C^bNWEm$6x#D)HF zJ61Bm=>-ls$1haj}M&zUxkp5oJQ6@6F%0ypn_VkkaEA49-ceT?^cf4DDSMuwUTnBm-FkKf!l_aumLbVEu zL4^;n)z?l{jXPx25?D%Hn3QqQQFWOdBE{Y+4R!d!m3%_2a_l%*@O4Cq>3uW}G4~sZwd==lEbWIbCBawPbGO#qHA= z)S)DQYkM=YPGzL{5CQDKz0eK4)PBHSrGbb8#RAm|1dr+R0Vj*9{@|s)J(F6RRAfV+% zp+!dx2sQE~38(8g8)yJRD<5t>(TIB=a1AecoR7FG&-wiYbbf_+JGjPTb@6j9hY)qq z{B9*eX^Ye*Dy?U5{ypyE%*ve%+^d{3KN|Tlw;I2mP_rMh=(XcHkkdk>vfN#3%mcN% zn}j}S;8$E+GrdIfgFf`gFYU+%zq>#9?HT_6uL1;q4$~|IqMjT>u^50>qGd$&jn?Ta z1dH$$SIcQznG<~w>vB4{M4#T+sAtZB+#N(V&3$fNk5(DV+3D7B`F2gkmYMl}(i&4n zk~FCyDpdg#qKJ^Aduedt1KHW8to_wzE?2Zxu2Y)$9YB<|c<(^u6H{6L@6@jeGViWM zer2Lav(p`9q(95hcKzN-=ZL;Xkn)^mgZF$v%-IXH4}0j(b7NAcDB6jC>o{wYKqeJOyzFZPFSC`6SULtW@c zUbzr;r8G0Xc~sAF`GMjW_>OXvF-me2`zNHi#hVxr2)dxOXE2CL!ua1HievD`=l(3S z)!UN>l+xO|4<__&*1XY0aUK~qT74{ZP6gy6XDudsc}tW`cc*+@e26otkeCO?0{T5xO^JyYo8yPZCzPl%ind&15nAIf`G!DM{KzKqYNvpbq z*W+f4?K~K9t^L7870&KrTmhV5Z+jQ5MoT z>R!Rar)J`Q(XU_-m5hD@hys9Trf+Xu4)5q$EV>#D&%zUW?}rM9`ubh5 zODLw#FX|XS0vJX_F7L!@*ePX6>GQzZiw;L5I3Nza&<~XwqfDN2U^s-REVX+60A7wH zL+v+!i)r|$hPup>OzjD3y*_NmM~DWrC)Dqg?&=p;9NC)@%%=TQ_dao)jN`=ve;bK4UR@smsR?{)cWI&b)G`hp=y6?fWR&L zo)`*IFaK=;cT#3Io~;)&x3ft0f6*i=*>sMoB@T&DWJT_ICe6e*6&OST=tn_RfL}9b zvz`V)rA``~-u1J^MVcCM&tCrkh!MU{D_+&f}E z36k$)AL-!UI`6V^8iV?+L=P)K;MP4~3x%kje_N_Wscfd#dS0q4GuL${HAH8qI@+>X z5@II&5P9oxZar=s7K2Jbhk}!*eqXQj&iLiKD4#l#=dkAVvA-Mr5`#Lsid&?{TP66w z?gN^#1taH!vI^Ph^YvfgZ^D=6iJL_NLJF*wf?qpC+uz-2eKMQq+^pNpW?-~K(MmCk zV(B(hb%;-AhUe_Sc#j-Eye~lV>!#62ov3<&sBw*slI810G(Aee3DtTv@a%bdS?o(o zt$d>;!Oz33^B!awb`;f>6~ns=0P*Qx2ooq~SU}1K$fhW`flh88~8v zUNNLtg+E?_;uA4)v`QuS(>Hg1v<~TiTGFb%M4Z5s6m@`4TvC8&Kkp;RgB+uco&{H) zAit@_lpqtkHaKVUFe=zy|Af*)|K`e78C55ca}x`{5jP;aVJWyjFCcPY=`k;cX_Ex(;L#~7%6?+j|B+@a{$hvD^;lF*QihnsHPYP3FzHj z*Eb)>pu9#iiK|g15Q_zfBPA|-h?_P%HOboIjO`4L(GcZtbZI+_dFMX-%zc|VL zH-}*7d_O`XejtE719XsuPzfYN^^=A6c;sg_HKZZ*=g$%~q#5*QKm9Vhp7=qT_#Op8 z^01HEgR-CSbkGdC2SGQ#FcP%Ye#?Wn6E!uo&0Ye5JNAZjC_a7uw}reI>dcH-PhSPc zH@rI!M*;*01fVSrnW8m?V`#gY;@sG9-*5$|L4-`D0|I488N;a=!x>Yn=^Kk1!%-Vk zgRUCG!$+tYL(CLC6GmKIV|49<2ThWN-4j0bYP6?Q8J~YpR_FQ#51!i^jDGdbfV|fT zcc3dE6DC8bphRkRS+&3|?>z2xYB}ecY-_RTm&2rR7sVFJ4(TaDTQ5t`2rbo#n-E@N zx8;+U?L&%e$8$Byv13%BxTwP?l<#OPuYn%-iq0oTdNYD3jpttj=Hv6>imoMa?FtO^ zrg{O)Rwba-fX-X)ycqf4`3Z*nAK9oGzM7I=?5Hy2cW}IWN>2wO{rw&U?%a38PJ>ARZz7lt!0+(975Us{q7P`PVfF`*{=0D0QzmCtd z75!R!i5?ah##NPM$DEXYuzzA4_x3hA4@2qyXPIhn&w+EjHYt>yeh{P6lH1qfwKhAj%tWbEt!s7k6S_yhZM{vuTh{z+#hU0Oy&pYCqnvi>Ij;=cGaN@XBDSyhfb)=!P&9+PPz? z>kn2bb%!2{GS*q8F0&CNE3X@ z5rg#Vr6$`=0V!$MBJgTQt;t2Jq0698g%1F?-5G_CW<^@kmSoXkz-)Hx6w1ovoow{T zHnUw(_ux#ZlZ1t!lFX*ECdeGD;NKA>Zb`;OeF>#O&C$BqKD$yiO%=a1dO{~j=vEKe%NOhp3(r3W(i(I73CoOh*_`IP;fZsVfqL!SO)>NWh;z!iVE{pAwL8Nn7 z;eAH!`x!Isu|Tg(@YSsLP_}yl*#55rVr~ntj+v=!%?xHu7bOs~%@RVLB=vuuZAj?m zmq++;VOdx!TwD-r58F`hBgtX)LAdFl2?B((kRXj{_6o!g%K!Tw@+(0THEavpi2pRH zf13NB4*E+88tz_4@t>u?TGYQ}_Ftv~fqV7_b_heEQUBXQP70%^Bv`jrzUAW1{!-jw zT#heKfRVNAp+|Se8md6&H&_TN>m&>X0I|upL?YU6ns$s{_{3DH#>(t<*E=Dd5~lz& z*{*Gz5*}bwMhtGiOy;bylQ&YE(XxUsjBYd6r%k^}Qc$a2)_m;{qLjmIFFc82wam7P zVhX#Q8+UGu@9QY(YzbXto#_y1i{1(KtN9Q+xN*K=*sTmTCbRc^7XBN%(QLlFsbv&7CL_8vdy>xAEn;eFWjt#LDZ8YASxNS?AV;$ zx4las85S%ghVDkJM%&&mW#QsgHStpF{^&Dp06%z;Mt~d+hnR+RWH()3B9^mV>ZxA_ z)^vP-y)^1KMW|L`A*e!839)B^QyHq^mLjX5+wUltS0`lDI+urDS}>jaKD%>5t*Wt~ zF$$gfqK}YolGd<%pS7p^PHsCQo-NHrp{~R2ACBx20;NhSY3c9_#GuXFKdzFu6$mQ2d8! zcyH?v5yJCxWsL2x5L7k{ocuSvk%l!|De`ERy8YeXf{C!vUj7n-((sN^Eq{40^+V80 z+b4#eJ=R&ypZY7_&BflZrOih4EdrQ21k1iKnq-u7eVa5~#W8BrepQDkxfz0#!{qz9 z4W;A|pE61*+;b!=pJH7CzVm4l_i0FMBE)K9;a=1eJ)>ks<9R~0j(QER*3^A1eU3UM zQMTy(fSFc4nSeo0HoPJ?FozAcQ1X`31 zId~us4&Vt)BA!rM6%HyCj!zfRP32JiY%O+cY053f^oQas?oS0Pydx$~2keoB;wHJd zGe6^3%FN+{ZEKhlUg~>{mXA4E;eABznK{L$qZJ1N59|-xP<)aKdPYjU)gB!6*6$$NW8Nc3y@l!S=W4j%V#j?v(9c-8sK$nyJ1iUvb4Yrs@zHCI5NNyqhd8` zG<4hbY+ViBnOpN?h*CKZI5o?IcH>8clAh`O^WT`LA?N>)i6D>!f`dMPw~s5(%3ss| zBXZRHWuU`&4|L9QNT0(4{o3i!{`Fb9!@`3ukYoKWLDtCKs}kSaL4Ucsd**+9BtE1Z z@;Un*4jLWoLEyo?AsvcOYX7#7?WS+uk+HqKD8qrYL8v$xUsQ<0nEYYltJgr+CKDxo z77U+qPliwTTP($(76kk~j$0y?@M7Ic};F zm1-2%rTQaSWsG}pxievz`JLhBKE$VwWh|XEVE7M-w*m#ZMNnHDZ+Z8WaIZL?(N9cp zx^QLng!;YFZs+}Ign+AgE5dmG{$x>vfq&amh5|Xadn)`wJLdpaUfG-raCa@o#NAhN`Joxfrs`zF%+K+{%rwE4WA8{-=e}-WHy~x zBY06(wJ4!`W4WBE(AIq#G^8^N!>7C>@Cg9;)Y8%8+A6DGObmnv#$&&Aa3a#c5*Nwu zxZ_-vo;|PzxWc=}Y02V=6FpT#`95ix5SJix?lu?&Y2Av4?hyuS5>%^2&~QIqp;mjCx}IFderJ~Mt+E+OP#Tdc zUH9u$frYx}CcQA=%XeDGugHN-B(Fs(tkjelsclfCFE+9Wo@ZAD?RJ{cp2nbluhD}V z5O{dc*Fy2h;@_5PIVLONml`>*vG!Qs3ChBk)(`R8UeuGmkIj9$?2)2ewqaXkgRo(B zxExgeUJOb?foAr6mhQXV544&$=Xipn22(^~uR$SsaHAJmjs>7aNB3iMYh@;I=Ws;|HG+5eu!l;p&Sb`je=$ z1o}WvOW(n_4IUX`IvD$6~}J8UiPex_2{k8 zarTs*-&yx@Qs9l@e%qo5VW_6dyb&~V3n?`CRFcYFN)Z=?-)|d>3D9=edYJ~7w2nbT zP-Mu_g(QOa-}b!Q5_pd%H=Fd`AYa^BQvp~O5}snW381)yK}M3?$hNt*Aprkx!xi0GsqPqs)Xl9^G25m=52Zh5es%HnW9xI=v#{T@|`zB>+6F9U|0H z0%s~mh{LN1_D#7*SIZ7fw75luwISW#OoL7(Ar3p))UfTI4Kokm24T+--y3()See0#`myXtQa* zfH73zNf;^_C{2I-mb2|ETk%_r8>S+9q}F8ms|n#mylqRlLRyll{eV$Z`>ouY5BPlD zw={WJHZrIRP$aY$T0)#hyDVxKHG2;kYBPI8Jc@*fjm4O4D=B|>yjalkg`WJRd`hAa z(TJgt>2eW76g8??}?!d74~lnIK8f>;^NpKTa^)F zl6(_h2F-l5;-fc(ZeyOH5(Ju=z3I%0j$o){Aegm2V27-XLnA!B?796J8`~ZsIk?St zsRVU)y*}{P!hrKggX1j+mbJ6A0ib83T@TMz^J9d(2r?Ke;l;CKRPcdn6&8Y8i}#bo zP>S>JUPNF(ow!x3F9Jqf57QYIU3{j?rT*sS3AIY`Sc2o*Rdn|ZQI*y;#|L>kYKXom z&tGM>C2SUHceQD?;DJEp({Cna7%qva$=0 zbaibg0)&28+enoDaG0NBV`lt(ZLPoGm z&UD!fNFcnhe|~o7nmJ`$@V5uTa>mq|CseB+_BLmAUM0IQVkR*e!p)C$Z&Px=OEIK?N(TTo*#wLQ8~Ovrt+jKlu%wZP`g4@SGtihUw^$-TyU1>MRW5u=2$m}RWzG6 zLNE|6pK_D#K8C$R1PhwxO})^YA#xuLZaTgq)782C+E}6=;J&Tp=@7PLP%mcxJbQse zKP+*GDjey|*mb|rB^?aptG{)U;(HRHh6a)L$mzG506}WR;o!LThzXj5LxFDKAPecj z!am}#+#y}<oZ6Px~ena}%g=Gj%>f=}UKsMg&KwQ#I(k7P}dDA&+{B0Z< zK9!yfpSpT76SFg)#ko&8Dv1;60S*`ew>K{d&*V&uq<1-Ur~}LxdM~^=F9hl4bq`v)8^fB<#%MV(6*3f5o=fos_Y1S0)Sa<%Vpo`H%1)tqgK|ay$!3Do>|{@B|z8+ zDbEw+z-0hVDYh1!LR+K+7aXGZ8J{Z8`)F0c@AcAzOV8OWnuU=;wF-L;s*4kGo&xTIZ%(My5hiafryJK};JhaqcF|aqZwD-s74cDW z+oW>fzoZGb1kAHM-^?ly+k>1oH9Iozq6fmE)ff^wfN6s;?q$B0Q9TVo{Z64j%R%6& zJzopOr}lqasvh5BBA!?TiL~629*HB0=0MMDj{ltPFrO6>_Si}RPxbGwL6z@?pq4It z$+#!I;Lf9IkR3oXFrUHp0WzUPBRmjaTzvjx#Y6(Z_(%0PhFdBm>s8(;-kW-?t+`p% zzI5_{*7RrWkQpx?&UZGn1KCeG83=CG2n{D^{*a5nc`$MN zx>A-z?B5{}_5avA>$oVo_hHlB(%lFGA`MDPD-8l7NQJG_)TgQ51;b9I4f*L3nK`7OMK#DMz8JAi0>PZSWCtwvl zrZYn(8YN~I3G{P=)?6&A=!yqWn$ddbrui@N2N6C6_?CxwVZ{W~F+N+;ThFJ>WY`lh zga3m1)-@^rnFqqS0H8%FiAT8`dnvX}=DQzHKT}3<0sZVM2iEe(kucy9f4`s~s|}=|8ajv?^2@xCM4w0zI-wm>{lpX#YXjz;2S?h!C2VW`U zN|`-bxTA1eIbAOL>lL~|C93#z5_P?~^>%vwoVH9S`n<5eenUd==HTqU?}JA-9>3K3 zjGYdMN3#X8sBHr3yLQ9IGF}EiOYWVL7pNxUE;^4ATW?%GmZ$)yI#bGyR5%nq`~+Xs zBzsz>;&QR&Rr5DBJSZd>!nUX6H$jVcuvVhqKiScF42FLxVz9)-a3#*5`^Ne`cia|v=EJB-8-iCX^M z05`L|5prVOCu|6L|J@A`%Ib?SB8?85uV?SrJ!%(K+1w%=QxV8g^LzS8}Tq!s;Dl#_n>xq znq?4tmV^3dMvrMggs%f)j#fyvraX&sPjMeAX$B=W?b`C%-g6?vsa;Hdvofaej%ef1ywD zB|sPDka+|-Gci4FKaDPJ?r>tVT6~9dB$|D&RW~hEpUVD|J^_Gr;L8xgo=1^gIw(A8 zD74!gs=Qeg51A#tFJ^|s-ERQjfGoYP?0(!j2%py$p7IGcEsgA#@k?Sh_ub~{RGlc@ zFd#t^s!xjFw>$HixuO_g1ze2xXeiv!H*Q`Y^1n1%-O$$%bN%v)r%90xZM*OChP0yc z}4$Kdw$YT|wd?N}PS=zbwW7ku8^o_o8EAKlRMY!u=X!9kMxA1^j4xCaF7#S#4> zIzNU(JqqQ>PL2m34LGzG(SvQ#!xFMZ4OxNjw+B4{r2f;an$_3 zOX$ObejXzVeHNn7roXBd$bITyN(bu`D(UYAnE_{m!3+*tn&)d3t-|G+$+rqO)-^o| z7VjEjf4%;7ixH|%<)_gn09e4%diyW{&0cc-w(pe<%3Qhq1X+T(`^F_*Q^>q-Prn1G zH#M8&{lB=IwfZaIZuY(gghT|`=c!1l($SUX2nf86^=XM$QEb0e#C}Bbm7#*7K|lZ6 z8`ZfjSG03IwlThz!<(n%SKBVVx_jojQGCAn-S^EK*0V3+hy$Nv^S;@w@FT>Ms{=+V zu1n`up}zaWy@Wrc@}%C8B)_c0J))hBv&k7?n29bEHm~>; z9sGXcFb9<+Ew7P(qr^BVzO^`#t~XhsN4|9NMxRdCl;H+pSt96^tcsk&;6}}%RzYZ2 zFxMV!ULIH1>Wps)++ozk=Z(SAG6E(@UfXmL?7>zmSBA>I^!lXH=S8ve!5(Kz`BNsr zDD}0o9Ms?Q`Ex7CedZw6g7pcN{dc2EK6{N0`*QRCdLxO898Z&W#Jdl>1hYpAxR2%6qNb5IQ)VS)^UchcI5@z>D5sBMU) zGvCXfQ)z2w#TPbX;!*;fB6%V)?9?e1bP&$>MLe297kD-FDX2b;G1of4`$eGbWfKC~<#p>dB1TW1Xoy5n zA%;tC2oZfj=iUJ*OI9W2v$@k9dPR>Us!?WoqYv3uEIrJNrg6+`#Q*d?r`dcNR;?5(e*DR>K+!6x)JGB zm%tA`!}O^=*r3g6pxw3am21HauLNh}8dRdH{*y!{0Osl4*28%YiD~A0n=0WX#X_b# zq#1Jyk)$;*e1j3j8UZy!-HB3S&E7{Q;a71#P<5@jat4iV4+l(e7z&`K--bPws86Z6 zSe5Fgm}v-{#W&WUDh8L7x8(48%6uzcbP3o}qC6$Zh-vJwAi+qtjpvUNW;VLf$v8qY z3pFv^98iX{nxv?XD=KkxI2FxdNtV}6J z`R36GnqPZr>sKz`%gecU9}g-~)u)lD1VGKx!5-(~`Xz&n%LA<2r~^aKQ9ksWboF{J zJ+@Gm+_MJEFSOH&l#|)Q>MN}|*XL3NwA{k(ge?yxEC*fptwxJFmMA1C>nEca(F~5S zr8N=~w>0j5B7E+U9&4bbRGUfI;&a-2L{}v4eXrOlCqj4TP6$mdy*R=d+4pwWsJV~% zmaG{gDIri4zHRY^>FpbTjH~K)HkU9fso@0PJ{xr%4A1duW`B)+CQ%SV|MvoNUpS1! z5Q)0+y8(Wo>q~8zzAM&gBc96-|M1QX!UNuVrk%yIo^zdq3nK|oiF$K_L?r+Ldk6}J z4+2$#ZO8O#Z#ltkDqeH3io=sE8}fbA>lVWaT%g%Aro8=9hbr(J^}t3myfZ6SbX3^) z0?C!AdN=WuMF^|V9MprCSa)@+IdLj8a{L7oRk-bXX?NAF)X8`CtSUB;Ke?YW)_S}^ z>o8ePRE>T3dSJuDDQm8oV!o)g;7M#y$24OoA0wb+^Xa>E*&tC=@D)$WOXxY3Ynt>^ z2wog)Qgg7mmu~Q%<)HpIr;uKd`{F^Yg-Dd%??x5;N4X_il#0sm8+B~aUZz^>sSCj9 zO>IGIbol;WJ`sPsc}rR;#w@PYEo`M+|faZDG~@*G@r|v<_j;*F(ck@!;j3Y)^e3| zS=S{x)~8;SEKv*|@_AWal7^Rs7G|Tp=%1z{#$p>dSe&WfiqxEvR;?n;YTwUqCJ^6_ z+_ro|w#{EvBW)t|9qIDa3>=nh6w8TrbmD+BeFD?E1f>uY-keBxK71u% z!BvNy53vMCYlf_ho5JI}RL3L|HdvozyXBOnhp1Fg-lM(>&$BctL=^~9WVl6rN%_(g z&3YyH=No!^s1d`v7N6g`xfx4e{|%w>VN{33OH|mqNBVSbcV7&tn=U3xWnPEU3?9|Z zvS$!N2-=@(9^S2~urHW(}x)gT8ga{F~jiXS5z94hg3u*_+|3sjFlvODFZ@g^&@{ zSDCu|6Ek};VuX1~I+>_a!9W~VFw01^4WjxzV)F{xhKqUrcXxz|8og)UpA)80DS{vt+`ZG{{wix}Q=V%rE-&}C0_=0e?_Rl$}_XjyB zrmUgCy~psfWpgE|aoQd1f`ZWB;R}w!^1k@|cC68LQUVXc~7j!~X9@q99Lj0IhT zD(r|SI?{;)j@};}UBLejOOX3_D6?RRivJ~IZ{FN2!=%aD?;-W-vdcs>u5GZ;G_1DU zp|t+w9%ltY14~rfB+YiWlRQ8BprQXQO3V3ecOEve%&0z<{&OoFBOKtrZ}#@kAcntu z9qn&kWqU2VPxrZsH1804ec$MjL@kk!(V3!+N#^G$Zi}?3pr+?6EYwqSL}8L%x2^Fk z96n_@BuuJZA!d4%gl@OO>Jn{ozn2vsHHdd`Om$Rw>C!t=6M!OTt7hcVt$6tC_525! zqqT0;#@kDCpLVf=Y>J1QHAv4S>g3l7$bJ2A>V`;E+Aq=dFC^;fW~Ox-pFq~^jaogK z2P%Sfebvn<<>xO&AhXOccLwf3C93W}N))3^OS~pSK93gd&72Gulpz_plJg9*y%;-` zD0(XIE=&T{YMTe)%$435lWA37+5y9NY|h9;#yeu|%uFbaugQFw zg1Hi?AuN`0{;p}&2fi|@3l5-fT(&-^B$=7~Rz@OzWpOpLx#BAEg)qzycSCeD@kp8q ziGzZtGqQl^Ca6N{g(#>y@jQl$U8OK~OQK^%FDqVjFvovUvuokJqVIVT5+r+N7w&QVc-svKPZ1zD(H@IM^baR7kKlq zdT=x`rM0Umv@d9a2w{e0wOoq}{6EPrl-bqBYD4m+U*H4OF7n{c3? zx4;hwUO?^}2NOF)qF(-Pkkw|kLKS?R3_o3c!Q{dfe#v!Ygx%1XOtWYPD`eP@`Xf}L z>Q5t40FWS?1{=i3JIxa_v}Pix(fTDmf7yAr2T4PT(;&5C#~WZ_bfngVyS0ahSQUA_ zA^XzDHJle28+WRgFWK6?5^iHXmMGfK-^f17n%{fFVC~cUxJvD#l%7mfWUTJslun@C z*B!G{^2?Jj-t24_Ss;0P09m^>r1P%pRg;={(Lw75o<5A}`UybU7B46JZhqH=-F;Iy zy6Al(hB2P9vDo5WAs0@6vCGD167@5$e_sc=ZyrWsh(wkBZh)VQU5`=trfkqzOkC^c z#WVM=fh#=Ou}KWx`OZ0$hg2V+67}IkiMskFCpML&H*qpii+J9=0=pNa->;{s<=pFq z`UFPyHu!C%?jV^+O~LKuM5p*i^MYOVy7W7!XmVIGE6-A%J9QXBScT@Gi0%+!W-Q%@ zvGY-OgHaJnr1mzVmrxG)M%F6W;X1m|dP-L7IZ*DqV(eDEiQ{+rzS3a)}sG(DfPMxQI^rcKhO0Uv#qjMBTx6|SiZ%jGthWRr8)w3Mb-}Cu%E69E8 zAl5=8s`huIdiBPlZ8@C^f7U`i7lN_73=QgiCa()tn6UZ zzf4lUP_F5NF8l`L{N0F>;~Z21fNGj+zyB87S*elx=?&c2JPP+04ve1dHFGe^(yOM7 z;eg)S5*n=TlgG)ZPetD~Uj_#5!dY&*72J`52|$SoB>eGfx`aZtYXz#^+Xe=P#9}my z0sZ=Bc3(FZq5}D+sI+5r$4^P?U7-?gwJ=M}?2b6@At6@T0japEt9)Ct;{I?!n34A4 zC)&}81J3jb{LWw-N-J2OT7HSu<|@IV9cNsvg5K{HmPKaivFo4i<@k6n-7q7%EgMIb z2i7MVoprk`-e96R5phHxeS1HVGKR?>P5|9UJw6rLKez&-Pqc{P2qm2g)Z++^{RpM~ z!FL>MZc8S09O+YV+@+xFJrskguKOabBQc$NaXfBlF;^2t&D_NJa}%x2i2#h)96zIU|s66 zxg8g5qm?nB_FRk_wlOjRGq0wB`5G1qkyxfsEC4@;=PlIFbG_9WJ4hLqcc1HKkmC1p zx3CtHk*Fw)s0YXTbWh*3S0L37vrR@zhumk6h^K#klm>UWoMmWa`uWml=Tq_<;`G&^ z)h#w`wyj--)eKc+PeALM@htHzC9UD~e1WSr0KA7ZU#OD3jE2`959oVPgFyt(tvp9~ zQmY&qoX?pTOwaV`uXz8t1?0YW7>ObJH2b>&wx-QxSjWBwpOo~H3^#gOBhvpP)|w;IYBkPQwz7>ey{#+2}*fEek96H8UeDY^6xQqtkRwmj@n~l%gB;Tq_1J zH1ar~dwx(;?7efulRMm$M!3dCm|vsii70~`WG}QPo#mkZOy*y=LGJqpu@<6F-+wo% zOH{+GrH@-4*bcaui?a&)FAta3m6#JGZW**bFPr?{`sW|&G7rSZXM-DfGx&@uEmc|WZg^$m#uN;cD#3xHOhEPBvt z%9P)Ewh$@f+aKJ$HNWQhwb1*9I%V>Q>?T8))!Kv;1e%stgl-T{M=!l<#j9+W?9`SE z4Z2?Pa!V|&(lH>pspFK)Muia9b^-|f*TZqxxVs_-1?3xRYAZTiNnxl!5&YsPCpyxJ z1CHMx{PkM?|Iz~TfQ5lV3-UmN`6Xn9UrLs8qL(F^$MT-t^*(MbAHF$h z_LntTutk+O*hh}}Y+zTx6XsmS?!q@?ej68RAhTCx=h7=0I5z;XC_2P&=>av2@RnkP zB!0WMc=x}Uzbtv-1h%LTn{Yw^Y=Q2l=EV?y_G`+*^J$q+l%*V+EG?0GdB1p5)zbDg zo{}x~FIx!ISKiFcWbwfEsz)s&~uRC9w;u?S>8N&x0^saRG%478w zj-yfEz8Y6xR%O8G{rs`LsShPoqgwu>M)}z>ZY-njOK`?13A)-^--vrc9Ihz=YfS2C z;{Pp*yc!U_bZt0|c)DwgCQP{VMcyNKDe7-kTTf-qm%8UaMKuzGgvqK+xGR9n#&y)m zuF9bO&L_J5g0yHOC;`uYVIJ|JFi|iJ(5Hi5@qAj%zTWgDe*G0B{h; z0sn(^A0@ATJ~Fr&x={aI9BhalG###wMFwy0&pfI7SDNJ*wiTyG{!{QdaE60^q{zh4gR%r7eCMZ zmzV}^qoq>(lFdxB;$K8 z4N|^s+a1k<0eK-#&8^u01is~GI%sDe1)=nRFJOS-4`VS{q|hjTH^B;^-Ai=483hRG zd<`@hP3YMTx|@QvSV%1icDS(gjJ41_RNDz6l>oSoi&F1;0D(+=L!TmD&GaJPoYy2I z@mVx5&vEUb18D$gpX62z7HGw~M&I;NyhZWGWo%_hDq-1;{*07R|19J&1lDXwv^8E! zc0`|_?4fm^*c0|Bp~z*yUwA_KY|Tpxvx~Xp6Xz*m6|$YmKxeCNK&^|4w&Ne2da?Y9 zDcut%q5MhQiaq*g04T2F#KXtc>w6M9ZPxUWVl@MfaJ{Bnfv1shPyCK|Sld}1>VJa@ z=>@}za1d?5CWUtXce9#Th#C1>jYDQ23_;s~$K|cRQQH@IJBx2QSO})idOSR!iKupH zBI-r=py76k%}f)OaIgy{@xx*Q7OS5UQ3!9_LFcyp5!FgEQ)TCkQj@Pl&vlRCX-GKB zcjmjX3<0`=d}D*N=fm=M`D>#u^H?zU6=s!`lv&@%CM}(iX7oRHsK!EVxeDTnjW4g| zzR53v24Lo<2aqo&jj?wtUO=tmbvq@l0m|Y;~>EXuQkyARp8D13azbIQs>C81#KZF`->-D zYhoZjC0m?3Pdz%crtgJR*@({a42^LYPN?TG)TEGI{K~EJ(JmK2^NnB7HnLumD;ns# zL1p{xZMIFRk3w6RUml_Z`HeQ8Glx3)bpU1y>2ThLIMkJ2!s}l+)H@T{DShrRT;G^p zMt@Nv>-uR)xyCDsl{7{Sq1+U_ zdoQ{rGt8B41Ni7Jvea0-^A;h<=+gXEkv?T2+N|qWt>=v`kaqNS?2v%fiG*2$K>o^l zl6%fy*Q1IRwi~4UQ(9|v@Zf#^YbATuBYXgdhzuR;WF z`Ew&Qv2(*(aRQ1OM&jqLzp0D7Un<@CXp1xu(y zb)Lqd5`au9LHyTMBew{K_S<*LKDArU;3H}O}?}5u*iD5 z&-)=j#5{hb!iSe5mO@0zz+74N*r7JHyVFwIyUBxL#>L5A&GxfgBcY|$&lF>8y3R*A zO}chUfY}SK&MCDynNyUr5?p^r)A~|cJuE*ysq;oC{j?JbJQje!N{FK=Yxv}ml5E~8 zVL!HnA&ui4-LqY;XO?=wG5s-T4t0d-KbK&DQ4V7<#G#aaH^FWG4>UhMQwdRn|Gqla z_x5JH|1_1SjApgWY%Xv0W5sBwLv@|#P^vt7+jO;>!!RLiWi2bnQ=rIRDx!E<$-tPe z^NrIhoxmkYl%_=WCGd^Z7M7B9yH?czXHmHiY{Kb$k(IG4c+fBJy@oiHX=QA|ND=KQ zh}jwetvis+4I#jWrMtkwNz1^vdt++nl(1Tv4&MImzmd!!0e-7eLCBmkVRNUZuz^1I z-Kc23&U74L=}!6N**^bM;(l$JFz?(LI?IoW(#FlS)pr@mrfs<;&XQ684Czr17*^DS zXbW+uo4=dY;izXxj&hevM_hP8;R8l!dGGO+SPWAAr4*FL#?ZJc|D28LKFCI8KiBV8 zD2jYU!-|yoN)3hi;$zKPa5f4S@C{U!qawiJ;u$e%Gq|NtBX<|z27PjE5)v6%-Q>hU z10J2jo_oTI?;+)id(miHYXlL=U*+9Xm@6Uv2HLV=U$Kr&YFEE}W);pVl`LdyZ|4q0 zts0i+RSVCVxc8Li7YO=w8cqqVwC4k^&KW z_KBi&;(+6~2uGLj|GyFpEgBSBut^#H60%%52&%fs=g~@xZA2to6aB5_CZ~jA#6$8U zZ!^TwiTi;~ir}#lUzZK*2y6oE-GOd@P1(znd_v4atuN367p1;^Dub9517i4_Yuc|; zU(X1~cY)NX`%^R8x%#w^Oe)h~fQ62$eIq5h4x2pM>z)q!zH4_XePJTWn)J(08u6!O ztF5$OyM<7}xWInRaX$<`E{AL)AdpJy_J*v*MW;J^DF7$lXvR7vd5*x~*!7^g&bZ)# zsqv%$cq$b%B7KLgrO`8!I{CE%W()mr_J)|0)i2@oFHC9|-yrP?M}!&IXwj`8B2tvZ zOQ_EETWg;;(beIzIEvz-CiUq*nN$M6#r($I{6*da%r++hUiYkB#+t=MnvYv_A$qQu z#GDgifO1<~0TYa*;n1o&`xu#fQX`dExy)o~F15a@dyQ&u{;^3Vi|COjQ-t*(yuORT z>L?Ox{qXTalJn2X!*t!tsOFwbpAzM!KzxK`YK}37Bm*C>wFg{#PxH+^gCE;Vlhs&y zJBo1vYqqz^UR4s1z)yPVQ&YUmdm`i1{iG?;%=5F#co1UKBw+j&$d;UY$E2a}vxs4< z&-$sh!=>*-Z&Zavg6uPz8@CR_&kX;XcTKAyy z&~A=K9vvCyFTw>K@k6No?uP3>9ZU4+fZ+1rm_N5bJC7dfSlpFuCV@WcQW~w_1GawuPG1=hKrH}%^gYjTai7}|7-=AL z@xgz_wywIOk9llT%sO9^-*`}=ULYtKqkZxH=<f8ErMO0ry#iv*t&C`M4Cs-M{ps^fG+7~Y@Ox(_}q5m z^_}Y__-#9a9A_qVgzG<-V1O|XV==^}JbpL9?~KTjBH#6;%`L_mwk@@`>?%uAmA~^W z0z%6k$!!#7a2jJ_iV-RTbl1XK$j zvbjEA-_g5R#fas^jI1}AQ^&m-`^wjf6;nO02?8rL8zn0hhQe()z@?@HVkyLkf@3#) zN6Bfu-csp!4@8gKFMLW^bF74FtAk7tWL~5(U#u_v{*?KxRdJ7M4C}cTjHeUrEr3cd zamav#uA|N7_pY5cJ0=kqUEa;W6KTzls>v*Ij)$LRqy8DvqaHA8q}}5e9F>A*qk5s)DC5hQ^S=ab$ZJyuZ~Dm1 zE=6ECk^hv9T6*98jmL}T-ub|!`uaT68`J1FEHB6vyhI~u3dfh1vH+;B-f<-taEeUY zNLIf}ss(h{V!u(Hs?|2Nl~TOo@E-hmtiwcwT{G_jDEdTrmxFTD&dG{=s)Qo8`2oqKy25yR;jbk};tWG}}!u4I;DwYj3m z4Urrt8q&{$LGR!{!Hv+|Wi6&GD7Io%Ci7BUM-QD*u!Y zWL8vts6+MrM~8~Kh~WQGF^JJI3H6(r2Ihq1#bogZidmEr#d&vL%gBR&9L?GTS#k5O z?Tro-(%#K8B>=&0=YDQjZ-oohB=bGLXoD$_lczZTq6Xt==43<0d=Ss~{u;qkpL|G`BHdiJ1?lTtxoq^+o8O3n* z#+5pHG!v?qqo`B4>$VK@XIo*)U|-=11M;TZaa-f|{t=FAX(E!Ecc!(%W@+`eI+C;{ zAIhU%bon_<9*f6cFFWX<31S_G)#LJ@=f_$UQiZJOVPOB@T|Ze6WXK4eYo** zII%1nTd0(D;;2a!f}gXavIgE71%xsfr8|4s&YO0Z);{bF&SMmIoVl~Cg+pq*e`B7q zx%$|l=tP4^>xC(LYtnff%6mR#+{x8tye0(9niYaUsKs@C!zX`$h5Vw!I&(Xys&t6S6eiPeRb_ zFFSLnV~GE;0tOiGFcw1`s^E7MEXVg|?6ztGd*c?y1m(9L^oBjIZ7%{(o{1}wW zSCl0GJ&ld1k-njC7CrCQs9UX8?y}QaHtHW4J?;R*dhQ_FLL92rX7CRuT9;VDZJ2pu*bY9^^$~U-Jjh1%yvH=~NGy|H zOycHfcPQU3X20MC%|>12*rT2-QLrN4d?>VFllt&W$kL8|y5pFhdy^49qE0-UgQ0F@IuK1oI$fm()pb1h`Zn04(uGT0 zBpb}}sA6Bbzlo^cOo`vS$)|Y9D%Q*d)moL=7GhFNh~fPcjFY6cG|7)gkQbhN%0=D0 zrV?;uQhL`f2Udt84&0S7@T+N zy_$M`zA~SEm@tmoy!~-ygbQ4lFYsMKmNiV()OsyLMtfo1^98IV@gq&|LH-cfwecsH z^GVK3>SXA^Y!MvJ-Vl@O_$9plg-L~oR-FGn$KIbQqpyigx!ef&Xs+etdT4;IeT^d{?_Y#Bbs~xZXcWP2f z-E%hzTTp9IPlVYXyw@C*l`P{IY!FXy&0aZf}Si*z39b!@gznkP)igVXx zKaL|- zA?IaUHp%T^+KX%-ba7nn0-(IIQOqfvMc1u%C5Vq*f%sa@JWf!SntI7I(cY$a94N;o zH6Ay(iq*1A6Mto7`5pn@?E*GBf%+SE8zTdnwo(%`9)qtI4|^PNW+`P0hrrlfkiA5RCB`#ytx^JjRfb)Tj=zGM$;! z0j>XQ4UWY`hp`x9Qscjy;7c4S9}uGIZax~%rvC2Z7g4E=XlQAEtzAgpvI$m;k`QWA zBPW_vkC@!Ea50+4HH5yGRX;q)J)dKNdFLrsow)!am7}<55TMFDQ;dQUYc|El4nnwR z?zk?cAyp%Lp0J4Fc2i1^>_Z5w&}@_lR>9>h+5`mI_EI~;3nQd==PV585-%2!Z3tcr z7`ozpN?7Hai2J7>)s_yNdyPWGmh}9!-B;9`)&XYoW6un_8wTxxyy6dc9$qw}+;8ai z_ae??U7EZ1EW5l0?S4Yd#Q={>56`ku{|nQDJ}|7r2hkQ{Qj5Qv)#l;uf{Uz`%vpFE zcloj3zVds<#PYsV$-MI$sD^#z)ZDkQ1$mXID5QW*4@^B;G?*2HTD`A@tdyd0vwA}F!fRdX?n~X>^)xGiDaw8mJbl7D| zkGu8QP2ulHGX>`3f{q;uuclT)y6@)lq!1sioX0IjU(DAsLK>W-WpDhw_pXd>of6lk z%7!s2j3FksV03^%qN}6IyU9sYH5FDx-X0xhR#)T1WYdWQe*QD-$yfIO^V4~e`VGokhY?NHP&rK5@6u%{1?tFSKfh@~YzKCz2i&Y~4D+t-xmKmE z3b)28vToEz=O&x0UvIjUruT&|58_bg5yNvei${VOO+Zt^QiicCk9>D0{H%{0iWT7M zd>8TLLvmmmOrV^EDSnUS4c+btHR4wcyH!$fBd265brll_M$$)cm*XidiAColQyco6 z_p{gaom zTcn5cHrS!i5q=4;f8kKy*KaGQsK}6WC_UGivg(^WADTuL0QVu#2D9FyaFMDW>QG<* zqeHRCx2K>y>GW-BU2R0YsquAbioT?65-Xzap?=l-yldP5+JKcM^H96J<$u5*9Ql+{&lvfODzhm^EY%+7+2x;>x})9|Z&$5oG0 zqU>~c)G}@cFZ;E$wNd=~mg^;h>;5X|lnc4O!7-z|d@%v?rHtUPzq2ir_I@Gt*I1~V6+u-``TUPGkKI&!_BaFR5IqTwl z&Ta0|8_g77LXu!MK-*MawV;{2Z?`Pk%~zqcmMz8MaWqfZ{5j%{^aRg^^<#&swjoho zW0X#~e$y;mJ-jb_UpcesygTiV!Zg7nH^y4?Qv%H9EPPQo^>d8`U6K?z6;X;|oGp;F zYNMgiC;{s)WHe4)KOpFV*U4HJ{PJAM_C0Q?S)2%y7RiFW-&|=Lmg`LM2GUx z(B*lsyS07myuZ4@eZBiiJyMnK`3O~mnN}-<+|P4>E7|WPbyOmW@ zJCXwk>Fa%a_Ekm=5Lltvs5KvU0YluPX!js?xVo5nCluppL+XLMvQMKjH+F+8GfxTY z0H)}aRVFQ*y2LA&x3pU&%e;xHZPkf@lT{jM=aLCIAjFBIX`<_ai}aRi#gx9RQ+=_( zG<%L`7{m6(Kn%ip^s{W#{{|J(3x<{AAliZ*3Z3?Mv-;jBGe+csgdxNH9Wf~md)Ptm znFpwhW>Mo2qvfcB{%e2EMvWb0qw-j-3Gmt*tA_IMo+Dj-az#T7oPLCSH|pJ`d+DNO zM#%TFGq&%>GR3snzLv6xT4H>`-DBQQyG96z(9;qXp%vM#sN^cs$NJa<-1Evs1vzqJ z1)VUpK@V4tO$s}!5gC^(a$I4Rf#^!U-JIRZmy&tz`OjJ+lVKPMGOnkD))M_eoHx&~ zZ$n$9;yxQOrg!@O6Ar4k8X6*t6Av=PcTNq#C85R+m?4A)s#y7o3~{<_fg&=9`XY;&URq|1>>G57Fsp}zJ@evL#OpjT}^ zs@Cth*jlYNgoh*TP={lHrQ*mM||C+;2?= zaF`m%4P7_$rIB>#?&dJh!?dGMZiZxfHtBk9mcyWhoSD?W3W!vPvp2+~cz+46e_>LV z&JXNcn&B;_(e4tdZ$^8eBF42NXvCwj&1o)4(CJA+O=|oStNqo zH33v`*98tq`(I7C*67-bj)orw@Jjf_iYYaAFH7J~W|U8cM0Ts^ptuKxbkmpT`17MX z9h;O_X*aH7D2^B z^tkP4uOr!kew2?I+I_q$+>g$~3=yRNP_hnw3+iAG=x%>pfetsJx95Y_#SdCUVZdF- z7IZiYT>NzI&_?jXi2msPa}9iE)Q~m=5Oi**KA77fCMEj2Nfxt1`1}l}+U9v2SW1X)i_z8cuk=>;xp8hYkv0;P`ir!P6?}w?8x2V z+t;Eni(fr%r}bj7_F}|E^z#fJt@U52yX;#8IM;H0+xw(kOv%~o_O+TFV=C+dTRmxb zUeCuDxMa)36=&I~6G#PjgJGpTh_(=uQv2Pk21&6AX%j6HIKPFNc>JZLBZt#qeA$+N z>ruQx8S_+KC^Q>217%ET>NfIw`d$T@ML=jXy&PD-1gwV~j5C>YFGBbFt zY5BXrZDV#!mh!;W(Dd<)wvSap0KzDFiFD>Uvba~^-{~8??PT#T+uuzxwxkB>9H@(k zX8+isB3WtUV5k@NVH`|MS^d^U_$$1(SUg+hRM08!AhopVo)T9>Zag~zUi8MI83A}K zwU;{jT}B$#1)B*~Eq6t>^pJ2)RHPFJoH-N(S2`%JV29HEC0Hv_O)GAw4K^BSeb|w* z`&iAg6U^%m#CI#7J_5^TWv2X=?UV*Ra?**1nk|eb*rl zdkb+W7R2ykib-V0Wk)X0weDm%zYT?~i^M5M4%M?{H^?(6rxP`H4mEXm#>z%;;qkj$ z&)H0H>{M@U^t?SKTV|?*IE5Ve!wGX{awB__J2bfDn&KpcQ}y>k%scin)5!V~4tt zB3pgSwanWr*tAgRS@}x~%)!q0Yfon7A}%>$!_+@JCCVP(&&}IVjNE3eOG!jlLLXF; zTq=ucVdAH~$StDhzAz31)7fAIkvw{L%|x^;j%ocIh@rQXA!K>OD+T-U==hq z&Z}PB4f3%L0lKCOUxV3oOr4(th93eI@Lqe&h}v}o zU#wfPxJNXH`ShD*b?+xZo}kFA>|=+LOodBLIsf#EruMo1K@^Sc)%}{O^QGd2Qx?(L zbEHJPrv#Xp;-XW2@-qwW#8-7zcNuB#(^8rFU&xjS;v1Jw;z9}st`f;IWq*nLygza0 zfz^m>K1v2w^ae?E9bxS?o+wml?=y!wqW7OmFu;t5u^8e|F29@LTS>~Io}sUAu7;B= z&GvEyQeC^brE2;9VV2sLZ}WHYyPys=d!j>Sx_`>Ne4fsaE0sm~@@<$cIv7zF+KV`E zCM3@h=WkFl0JI|K_FEc47w7CkhsDS=d028RnOxhgT+!6WvNYfl{UESHvr#as5)T}| zm|@9RF@6>BbpL$+4x_hPD5Y-)`YV;UM!qp@}8sE@X*->fRxpR-YO2id49 z;ptczE|jKA+$~5!4uw`O40$BrY!nC}LFUWuOiGIGPTMroYw6Y>;~ji<$HFVT!qR54 zBm-R!U=?+`10!{H1`q*7-kz2vm$P8##k`r&eZ@?_h}mZ1{jo{mgNkhUJ7zq!s&UxA z;gJkwIGBB}-|G|(mXU4==Q6H6CA5JE_gLO=UH+Olo+~w)d1+7atB9Iroo}pL4+xW< z@WT5OMd`!=XC?)Ci*OzaE!d<&ehFC**>bk3uY*Im&&#}Hcw&NCumogZ$cCan4QOwS z%6%CCHmOhS?~{xXl)jo}+ays)E%6qs^E(3lmI~tfI40tMaty&F;ou5#BW0(Yc&p$4IkeP#lpq~dr`XTOw`=gN>Qib@-@dDlNj{^4N z19s3+)ZjJL$qsMAE(&cuOdf%c_R}dsRZA2+8U#&o9UI%>9tXu=T8|$c+5-a1p9d_< z!Q2ipsg&PMGVc^xS>xpP1fFrwy_@fXGumK@7u5G|O#AY3nZior`$0`=;WQ?d01SZk z45^!82U&^)#RpNJUrZfI6CH0J4*JC1h;iSeG6A^8qmMkEM&Y9}F+jh__%x%En#X9b zgDu6pq4(nr456-LlOkCLYz$+T=(-*m=zLRr{b_r8fbn&Vu&tDmSzCF zTHmnp+|Ok;1N`?XVO9EOY%;sSjeJxC*#oHI7^d;SSyGeBu&azQ|%jX{&!LYI(L|ceSmHcj2 zGjFOtslI4Be2dz$N-{$>a3rHJX+D%r`vIzrdo(|Q1IwQ99TzC*tC8iBI{MDyNXWVQkyL8tz z4jgJyQ99W6b1q}}+|01bIrdR(gfr2fY;74H-{wi9dh@;En7H077mIsj5`-OfS2!s% zUa?0V&RH6#cvd=JnIa^O-c{^KMLKf8&u*r>8pE|m!D*2)WdRv4TD@Rg88_j+;3e-G_#)DoVW_q@eJYp5t zuKAuD9DE6|Xa{B|GB-<0j>{GXr}XnlqaM1I%bzqoim)iMgj2CzqWZtT{+YBmi=qag zUdIud=Wnbdc_-)*V%F`WJN13Gui}YRu!jwAtd?uzi9;Rz8vZf`5Ms}=O_e~S$2B#1M13Y&Kz{l{QhSM9N#_D1%lXgm82o%+UoR`Ey;fuvdirKu z%=rU{a`TNqMroB3aKb5@i>6(8N1}VN?dxX7Qz34@9#^dRGsi?(y(?T;KGDFAa`x;=9;Zt{slBQBpyRXH2+aU;`+w$o;009VbTrG%?*6v1)&!oVnh(h z*i#T#BedthL7+A8!@=U^ZhI}TT+e}Xf%cjY?I0oq`GIXeSr)WOYp>_<=k%~Z;70IC z1^4@Y?7<6H{HVZz4vld;)V^PYjuqUmK^=-2HmgG%>cgKVIRlBnABmmnEGlKDuLR%6 zySeaP<7b~^-(`^6(yOc)DTX@Km*Y9q+g&5;&B{W=LUS3aTbb{L-PEsKU|4bQxEbpr z5IVxs1_;y?Eb^JF2jP-}`io}nKFwivQhm`M*%Y|zU@aWVk$m7#ygReJaOjm6*wa72 zm$sizG0xWxz|9~lBK35}>L%_!?kD$ONnoZHTqZgFIkZ7*nWU;)>@IfcSUS9sO~K3W zo|m<)tpVtgn=Q_0!NrO6AY4QQ{9 zaW{S%@;+QG20!lEt2p=SOxtBr{q)(C2gYly@W8D6J@Qur)Q?z2mN zS7ZZjQI<1wBL13Qqn3GuGFWLj;GV79zVG;W+?4?4=s4;ln^a9stJ-Uj z3#sX^`DjvbL{BnA$ZNk`6|5~gQk0GyaOf?TYM_8N+j*LQIMgIl}e9jhE&w>~xF?$hZYg3pya2Z$kSf6*@B-8)xU`F~>pCUoW=G zNgv($yfAp6HMwM+NhiPSxRx&LSO%B$k@ZTR*p|n)x9!wb`CR!UTl2GkwTAa+W7i#v?w**`(Z45Pw%GP&Z-`0l{1#sS z!lZHqBwfbJwG;HPqPsrcl4;H+HC|I=ec3X6EAQf+L9!{RNqzlKCY1y@-m+_7YC(D( z_@b8tEo;$Hp;o&1ng@AVAfAGVKW9EQaK;w-%8cK|xa(_g(CV#BxM+n!)W~${GKMne z>B7=KV;-2);ON54*6P%*bY;5tHDR3{yN8t04rMb;SDW zdO{sP%lm6QMt8Vk-C9x&HK}jMF)0AxySKx zMlgkYrT~EzdT*2!D0IYqwX^b3EAeUat7Fv3^GI@@WlDw5aOn{`Tdpb|6V~Yaj3W;N zzL5|}I&n_RQ);hjeqA}Oc*kHOV(#pmm$?gYL+`!Fk_TaVUt_!6B6F{Z(=?e$3g^Rj zSW=r8OfU9loMfZ^hR$D_z_4<_qAl2@&R+b}tnP-~J74v+6SdR1w*=S58_~kM_)Oj{ zi&~vT>-8o1u3OM-)CM#gb)Jm}?`2wEhiSHAGE$+!wr;Br>!EDa%K5};{?PGd0XBX?j#Y1N2-v&fkCE4gMydS)5ZHoeI_p>_Ck{81=c+k2q=u zaiKR(t85@}0}t2QNW3J&>_1%|m@Jtk6XbQ}Td{RNs~_BJ4DY5laNL#pG#9#-Ka?d* zZe}|M67%l3btYAU6co!Sv!x<-HD#rq9H~f04mfcr$VXK;p}4xapQZXOSX)?{-kr4y zQ|J1$n+uz^GKuqMxDu zaI0^HTb{_HtMb@n5v8*hBgYJd!xDmcMlrhHejndCowYC%ut&t_$VGp6ife92zflo? z70|X@(2!6-0s6n8t4WN)*IwBeI#YQ;=lqVn*o6Kp^M@0M`X%oDO%==**WSDhcBr!~ zzlGPoaH!9|B&DL6)>oc(%d~P%uG2s4jIPF3O5`dkJ*%0Yd&d&$P@DhJp|l^BmYk)- zKwf7V2`w$144A~8xj(t-oPk;BxIFVFp9nxv3aY!P()46{wa&P=v{7bmL33a#rz1mf zK#xjGN0j%#p|Z-kwqgx1blRFt1k?*DJ*2BafxTt2FESJ#(aL_l`T3YA-)mwKf48f| z;9oIDnA=W&{>DWvW*M{jmcmk`NcBFiH$YSDmn+Zn4WcMDgLCenz1BwL37Vn^=IXh^ z>Z|i5`y&tVRwb{Gug}kiE^|FBdW~(lArtZLE2~eUYnb2M^XP2?ehHI@73aWjez6!N z*w;Y`f~kNFfs`N{)OFmTMMy#jQUV*;kJCW<-5^arHq<>q68zBwDS-~$2vR#Z0ufR> zya^tz9Fz~n@3ri|g8lu$KN5PlAQ)^Qvv2@q4wcm47REjEOr9?J*5;yRLz1iBTj3l58Bf&sk4 z1|{Ig_kL^U6K&hgCx{QL)#$#*O~Q{wMUV?=pv=witusD9aHz}tw@1u;jjDsQRZ+x7 zKW|3eRX%5N1ipx%%7&CVqy0aVB#x$sy;~0fb9O?k;e=L9j=H83N5Qh@^ z(*&O?)aU9{P(`o-uh|$t3B2WLvuAE0@$DzeZzb~ue^njSp|+23s3f3|7WLCTBfV(lz?3aGfv+%$J^?&y_Y!aB$LQ& z^NOWPA+SQTQ5+54&y||*J*PooNm(~;?=4%fd>y5qYW)uLtlhaCRfc22iUWE{?3bRz z`oZ;*^^BKp;jJQk?T5j!S*MU6X>a4=Gy_s70$BtNwh_ZDK4OJ~sDdDpbE)I!zPl)J zwfG3d6VIPyqyCZ6gAOpPJg{gBaj2Vrn$_9Cz(hK) zP?H)Ly~|FPV5UW;Q*Wr!lOhqB;-7t5xm>Jb!@H-qx!}0jC_V6+8DceqTxel*mepLY zb5>+2`MaB=qUS7XF*@XBI*t^jBM1EQ7UAGZ{vRrUq2+}_3pOd`-$K^27I{XCqkxip zQ6-1Ds&wJn?Qa`sczP1IY2(@##ZuD1CS@JMKadba&7Ez(p~>~&yRQau@#QIPyiGpz z^e+jRExx_k8)8yAzlGPo zFsWSi_C^qnZ+?Bu#R08PCXJV~!{_UK-f6^3 zf>^J!9+=cpf52B~q~{WgPaH2=t@GrF2Kw+`xr!m`brrw!DLlz>|KOAl%x~PXHpL$4 z1HJ3=F7&!Dbjseobk_h~lnYaRx~2f29I?xooF`r#vB|9waGQbm>-Tk0%n4cBCLCC|N zgB&hHn*VOwKX^}P{*wCvA0zSC<3NxmEZ8yoA_P7Y*uB8FPPsukAsbZHVEHEyv;l5! z2Tn2l-4lV^1i?T1Cj_71=TWfN3Pzb9Hn&4e%Jff@Y?PiHj~9>3K8_YMre)z|N`v8Y zMYF_drSRi-r8-r#XsAi;9>=5rfF}v=y)}t!lQki45q|tDgd5({2;=OoG_FBoZZ7Os z$pLZQMcwS6L4ho%R*O`>Ge+AHx7aZ6^f415=s$aEjZp*vIBNre4~P@*d^9|G?o~e1 zqb(Ler;>mOcn(ga3#b-&-YI0T5q&L`=Jryb4#D#Ma zGWc#?(f~|ckp}7fWw@$jrFLIlA57A)SjVF%QPNb92QZUf}*CQBUfxTD^ zF)7#5%-~J|_@-l`Wx403C$9#+22o+| zqJ9t@!S}WlA2Xm-6X>02IY~$TFHT{7U|I!X;TED&;CGTo9pbw`0OiOx{ou28O@6Si z>b$HIL(ozpEL$TU-NrFQCps|o#=)3fjQFw4R1Q_=;zF2YQ;Y=o3nMWz-aB6L-|9-JkNDzIyhgd&01#?5P~P-TI@SJ! z>Z;YVV5c{+b2!cEG_vZ#=vSVznPMDR)Ue**7yK8d9i7M%BiBRkteP$i;Ip#L?w01e z7LnhfFgqr%$b&UCJyR|>i9HN`d+;8zx}oFnnlO$kmUcwhYQ_ZW9`q%?m%h^HD7mC=M8f;pJ{0$Cx_X`?mLkI)5-_bD9%>Qz<^)PZ z1Ln^zes49hClp{_Rk%=a-#Vb%&B`wxVo@x}5!st27;r%dsLNVZFFPEhP%uK0g1{El z1ev*4w&XO*ydNdbY{}MwHtbp5*OMIU{X1ykI2a4f60VUSlP!%M)Shdvb44jIYtRw0 z%(#E0@5&`}4W0rxgEa~SRtHj8(+uf4^F=6eE@cvEckAj`uOp2*GZ<~3 zSk!*p`sY2EE#bYH8)8urzlGPouqaZT#t4o~X<-MWvNXVLzLut(g2L~f04F)C_e;Uy zmm2F{>mKg#2>(%|*4vH9P8SGwaDT;gUUXsK#vfBo81)yZ6P*&6`$&^71~}yR`NCCx zmo*-_u9PqF)lph=dzGtEjL#j|5t2!goq)v2O31waP}QJY?yh!SHyTJ&Bd{Dh>c+cy zEc^Ldc1k20VDZz+FT8--n}btS;R&AOc&midW8nA`;tdPys{Y;-pz;YVOsiuoxM|(40 zWA=F^VY+>QI}QE%T|ww|hB}HC?2^N511wMA|G*Lg^$_p_)TZ{N1pe@153r+tdfeA3 zL1-)Zgl-2mb}(-5R0s9H7H}a=Zim`oCM9^N1yTe->sWi91+Bmy+zyq06rs<%{{kU# z@)~SjhiFvdpGLX0SkLc_CCc|0WMfL}hegjj&^*L%R1%E@o16(QXI+pku42zVPnOhxr8K!IIZp zl}54S-*Sk9#eHy*-tuPp!yR~3BNEpWC&4@DoD$7W;ah%7Sfz2N%E>rN2n%(iLoQ4l zcjZ(ram}PyTQ;SX>Bu41rO_9hT6X0u63_T3E9c~He`*&16#aA-hHu^I4KPC?zEOFn z!fxTJTGWTu^I^-jwjS8Kt0x}ygWG>9fe9Abi^dR-%J|a=2UOmam#!3bo3GEbCcJK? zX65m%WUIKRcld?(v$okyPnbs`9qCbe64)CUaw;hF>4{IqpEf)0c&mJh8SahjknGr| z-+?y(05^qU1`--1A<26ct zK-NfBzH~|(d#BeM{y|ZG1%HQ63f0|X#@l|O37Y9=fgI$Xg-WabHw6;;(R1IZ+snMY zb2D8n$^8;Qi~k}8zj83q#Qt7uAtNQamvIk8H&4S5b`(k0mh)@#la$myG5Vt&Osgm? z-a%tkqD0 zhA+Q6JTlCwa0qL?xvc2JzGR->VCl-PHfM=Jg65=jb4s!b;U%xU7+9~xQu(ml_X%Y9 zWSOT+>`p;p`ig4|ptQ7B(e>EtT{xlN^UKq^o>qdY^hD{mM;X6Rn`RbLZywlG;1+Kd zE8@FrBpuqbb7-`?ZEEgQn`&@!@J1OG9A`omj!A9VfV^rQ%T*S5VV5kPq;)p5PfgJ^ zy4X?uIs>M>s;a0*%F>YoPHYPDE)LY#G+Z3jL+#5R#rrU%YRmxV$IA3K+TnS>mCEup?4rK(H_}4;giWND+ zSeMM_N!CWiYyECE!|k`)%(@rL_H8PA(Pb*{^lHhi8gy@_0%4;^IZQMtqx55jxcnI{ z%6Bl2$<~^Ds9-w>N>_$|Etg-xYD)bZW0WNt1> zDcX8hBJ^rCkgi2n(QV$X#&O=o6YVz4rcV7wo62*R!>!)*-vzaIO(mcg>~dUOtKrK< zz0_ct5Z6_k83ia`^xE8#+(eOYaMhd0LGmHZx^9(Oc_;Qoxx*8lO9+$)Hl=-++Q>4l zy`qQhyNMISdn~R&T%@H*SD9Lt?7%46SI1q~H17(+yQkhv48wx2mpoh2#@&HZ9-gX- zIfJltoZ1e{&wv{)LCM*Ymv5~rs)cg!oLcLhXl3=C@GUW?hdC^h6%+xvOUBcQw&EPd z{H&!Ecw97WR7x{& zDd;D$5(Gio|6xM~vn&Cd-61yh?oXrKFKgJ?$lc()O>LDY`N40Q@cy*c@L1gyqHJB~ z=T8#dVK#+w9Ge0Fyo__WFRE^CTWM~@ln!oChZGqqk!Z#b39}iBR%p$c15zry-xaB5 zR>={cOcWwuXfqB8%5Sn>TD$O(etl><_{D)uakngtw)l7_>wjoo!^vx;x0=7$vh_Kk zi}AgHCrHfO_?Xd{PAp6;AybU+vKX7}f8BDO`UNnZ-RR5nwSilOSt%^cy zIWHn;Y!*}DfuhdJ2RtiXo7xB1qIlT~YA~BZJ;J5{U}I^p29rqVRlPxW_#kPeOlU~J zTbWzC-0X~>Igpw3D*$vZB!TD8g$4yuN?h_zmr0(PV7mCN?u-F4VjE_8h*=wiR%ll0 zB0LlEb2dpfw*1&#*=B$2Gq~<#&z4sL`N&?LbI*Tw+$H-&C_>m;YLkZTUcNWaE+~zCR8O^@fn}vm zL+_Fzy)9;K`amd`KLlu)7H=MYs2J;gC@VGJB}ixL5QTkH$_vy}DR8O^J&9rgby&*( zdnNKq2}ftZ@U7Ge1zpev{tFdB#RX(Vq6Y{>6`3Wo(SjQp6Jlo2J5ZsY?YntdRe;0a z>dF8@&jgYT$;wN7$?NCNnO;ILPw0B`#Y_B{(Us+^T*E*?N&Z(83)ZjS(E{Bs?x-XHwK<^2D?8O-YqD6e3TTKFwm{e#!S2TQzEGMZ4I*IZX= z*}0oKtf=PH%2IlL#vsD|G1#LlM83f(2)q}ba&a1nSi&*EH13lKLcO1n9m;K0I?O%| z@uC;|#p$J+oyuJ z)-OeC+xyO8H!v|#5XF2_7%VN*=UEhsIPs{Xe>K2tN$t(t5RY2_Exi7PM}6gX{bo{P z$ukoq?r{MRtFtlsLTe|TAfH<0vnPYBr|@AOh4vpks>s5lDxLsyD{ZJM@^Rv`w`L4; zDAtknLB0$QD5oMNq5!&QUOeB+`kNNMhwkcFq7*h86uyl-qu7Z@;Hk2yl=E zC-y7|EItR;^<(&6C%EBIC-@-Pihi(hf>2F^od`1O$1$+}UmxM&K!=+k>rl8IZrFDy za2u@shm#5ZJnnD}%(66WUI%*=I>Mhu`DsvjOw16TpUlN4TO zVkA|e>oAWxb3Bil&=7d_AVq0zaCO+Mo_M9)MguoH$AqFG*wS!dr2Ro5aDI1Il~u*K zTZK>wtG@p87YVVTEc-r|?)af9t&8t7^$t8LQ@<_rZb&K;wV#>Qsc3UMfvD&R&XyOs zeXViGr)+GGd!JO}^X?jNP9ZHbhF2EzktEcTcNX!r*9e&wwc9cs1A7txF8?IXaNL@r zN2GC7LV8YbZ>5Muv|rht^n0Q+S|%#*bmCEmS^fJmnBbdx(HQJe=x6^l!YbqNxjdO4 zYjg5OFpbQZjIRU}&0TAm&Zi8#bY&>W_7=>e&K}`WNr2EaZRDuJH5w(w9toVgQ3KPy z_tV;@l9$GVGrvBSS=RwvXRR&iP-1-_k#83fKDI;8B5*cZSUGq{kUJmEB}$nw;2CLFJ^r9&8MG{ zqI=Umt_(^qv!3E9ULj z{*skKhh?Q=C+N*QQIu-93fmr;gqwr68UaL{n-9la(7}nmb#s5h$eL1 zJ~reC8ga7AzDqG)*V6THHuIxA&k>yN+qWrFZ#Qd-sn+RLT0z?;(;Fa^?EKEGw5A-Y z$O-=(v5>%Hveo(4Trq?&3vs|@+Q7jJMd;=7H{IId2>Dq2_fphfo2vjE?k^-%i9YIQ z-`|(PY{~A;-(Z_Ur~56u{)J6B;o_G| zaFy^h+J`m`He-3)lx~*D3}NPB6DJPUjbEdN*%Zcqw5i*fE~L3;RlL_}bKqZg&dyjE zEmy3I6a<9($cuH0N{Ioa%4FN$D4v<1@u1S()frz7ds43#mt17x{#Ll1)uvSCz^0t$ z)CKQ;lCQ4KnSV-_VSBUO!H7E@HEi+Bo1zVkySa|XWcic)#S*UeVz?xBKBwtn#0G)s z7&|vdlE%mz+_WhlSeAjh{^H6NyIXob?s3NbD19>(*$iKv_sQVegk!ke!eb}^GU|jZ z##rC<dC}*ITG6<yBz!)0@Q@{hhCHS&gU| zl3+H4c|4mMwtq!nb;WnnPwwM8ltmKa0*W(NG;d)Vt>h(qeIe+H0-V;V(s}Dz_o`CI zF^d5Wjy=JVX#1X>O*1z^ltUNh3gLlGVV5Ur%fGYIR&&9LA2+u;D_A8l#Fj>T zMnvxQbH^mOOVdXhPQ`hPaEXJbv^a!K=@V1q%LTs`-^*U-5*&DGfh;GBT$!LmvI}vd zwlh+N0W4zI6zQabSRX`nHwkH9nmMtleSrUY4<=ZCFB(H^ivLd|JZ-mf2Y=PD3Mlk? zEKL$d!?~t$vDY&Z7gORoH3FB$3e2XkjU+#?|e{Z9#ng(%&YL+5q;J!g(d>!zC3BBDZmp+Gc`oYy5O5fY+G?@rzO zLtIKP^r_!BKLKQ4wNE8adv!_%)b9qX$swNV(@zZwEud#qS#Yhr~vaZxZMgHdH zcdSnUmk;hv0<%{}Kok=hZw2X-x=eT88RUx_7qwhk`rsb*9(dIA-r#@^@7Gk)Q^RX0 zU(eDyI@}^2_>8%#Qg^btv2o?rzR~hF+~DaAf_NXMo~^uV|OLyW5fJa!WZLu4VTZGVp5n zIHPGf4J6;2iF|tEQAbk;W=m;r=7xBb`fuU&FFfjTwrWoiWAC-BX0I`?r`>EMaON9b zVfj-@1q-c<#Iz|ekHYy+9+d?6NE>}Q^~&5AED(E5)vl047 zz$k>)$EkR@aUm>~?*7sm_1V`~2{^vz_LHF6c2#RXC^_(`l9w)1-)e(hNR4oPln5C4 zo}|C4z7!F}pfpb9aeI6I@iAH6z%u2%)%rl$pNk%-`HKAYqid^BaDVmNo?#vgi*=$T zK)vVm)%Of?vm%RUJp-H(2U<3LdQhdgI7HPZ+Mlv=lmRFJ{NS5~`i`k*S5QBYw%?H@ zx)o<@2w%iT_)M68DNx`q9`)0;|LN`{h~o%Cy1}CKPZISbuk=^Zf*ynA1~~&90@Py; zp9KWgAy^WL1u_6!{ml*hfj&3LOM{&RDpv>Rcl&X!{cdmzY$fi>Hlez z5mMRX`6mShMpt-q;#ane*q-NdewIoQlCJwIia(PS4fCjT$ML8nz;lC$%qB#NrptTt z@l^3As!j*eL|XnkD~2q$KgPxzGXt^@KqAEm*_@i%cd9--%c$-O_Mx<~Jl749(!CZr0$v=7hw*iOoe z_J)U(q^EB(VQA@^npK(TCqugU`v4-fvLXY~Pyi1?W>cz&KA)92-R@{NCMV7Lfw#Qz zyzfpt>Ib|3R00#Mycdlj9%b>T5!P`O`d+@8DgUrvL)IIIXC1Yg@Rj7qkbtaa#(OEk zHtP>)UEIqzKm_v_kKddU&Zy57R?esJYEtZ^_-*!0$dAr$kv; zCZAF`#RmZox?|GH{|)1#_6_b&IMf$bD=ZLswKI~~9MY&?={YY)koQTR1Ds;=DR$zg zD(NJ2+?<33sVTZLq{HK=#)lY9)Tqf{#-3!Q{)y2a?O<9}VDT2>Q4W6^)+(RpW=_wP zKXF`e#CxbB_Sj}k(LHt17D>;7TM*|mRnK3tQh2beRMA6QUFl)z2!y%k5>$8c#XFo{ zWzoliMq)$^<+4F2P8Z!sH$7}szWZ!meeQH1P;QJ-~uDsv$Vy3pn1lBo*z5X@k(O?^v! zP*vkB5&C&cX_m2y%l!W7j{5j4Q&;j=(qn2;h6WIux`G@LxXM(*gG;qS$5Ku%ABkAL z&gF4--=?%O$fCDQc${M;apsKQc!;6uj!CWt(tPk2ec4@!_IUM}Y~jb#B+FyZv)-sm zm5a^kZ?%8_-DIx+*+jT4N(AbAmww=uKgWX)pp??W$T&5*wo&F`k`F5_lv^QK7D1wY zz7l>XHg)tb1(+?hz4;qrQ^CK5*T1l-i!TL>dxK-zO61GL@)5s};-P9)Ab%o?RV^KD z#7)PugxM7Sf3hh6&`UwEtSic{Q+GK}p_lWvhT!?#sz*kThvSAg>rxv8-T{dAjL)XB z=%Q)VB|R3GKGSAuex%_Qwirb6lTYv>(glFUn z%_)QjvKl3$R0QKj68yQ?-0blr2;O)othxXkey+11!Np?iJile1#7QK#U#|TRJ%V;XxTBQ4I)lt$fMrc)R_QgG$ zv0d`SrVc>;#{!t(JA2U>VpA!98sUjV8^e-GjLAuo(msWd#=IQO)oXXw-YZeONZ6Qi zHw}Z?6v2@;g`b=k6uq-}vlR73*Lj|f29>xQ?;0SqLbFnm{^&AZ!7bmJKcDyHL8_vI8_B3yFvj2z zinxDuY1X;)n6z@!c1R*EX`+T)pP$ZB_Lu*fCjD}ix5pKMT0|Ay$-^7i2tDub#)!*s zXHz9dHa$}11vZ^}B*Ubmr5$KjyLG|oBrElgkREh^Y1M$mTZm0%|7lojmkpWq7DU@D z2p23=K4^vzoka^V+_>f;gz0imY(?(@EGtC_%}R}Yl)D`mH4?F@ZHYjbsE-Z*aGv2H zE0qKwU`!Vd>z! zuR=a1uSWb;Y#5vTCz}iKjJ~HL0u+;j@yhe6S<)t=`hv5*9;y{itq4C^Vz0pnMO_uUDb3G7nlt9(q z_b7F%F)CJk-do>Zt75>(vO3`?yFHv^i)9CyD^saVoLd9 zXRFhOi-MnqgU-9)Vo%nt6%oK=nK-2HI;joL0_uw=nWs_i2K%PoiRsvmFt$U<06CAJ zc+}CqBw)6*_GWH~M^*h6UjM?Q@?Cuu*#jiC^}9mva=t3QH)2n)GG(XSn;t;C&}L4l z4D+Z9|Iwq+zwJJM5PVnjR&D<1B@^~4t~&1Q*LB=wL6&C)YOlhN1L)3t@n!aBm_Q~h zy0JGDt2@8c%n(#5r=e=%_t49TsvUUL7h8;-a;8sP_Ur$0A-cCfu#xM_O<76t8Lc+`K$c*h}-O{TQcgr00Jh<$7AyZuU0zG zmfkBdbyirI#iW=Se_Xur2JTXaKPq4oU<`*oYhq7cQ5Qd@vvk^#95Uf7?AB;O_6$E!2RNTN7*alUjy)tZ#bHTkD zWCszN{Skj7H_(v>CxzC2df0yvhZ=u9i{PQYLoX203W7rg|F_o4RL!^g zO2UChMS2b~V}L%tthtZHoorYa=^J3Q7&N&f*e=9lbhq&;-!Tc!PK%V&R83vB#vQvZ zFo>RuDae_;fQD1~yy8s>SyGTGkQq&|Y;DbOwL!EGTjiXS>rU7D1M1oKU5&-i;o>i$ zw@y6jCyf8L0w!2zFB(HUs{2nP%-rqEGxyfZSMb0{}M@|ob!dNWKtwALSycL zvr_N?zGdj>$DKaa!Z%c^@++$0A5=bdo0rU$RzI4=BXXa+2v9wepRQkI!@i=#PwPP) zWki0;M~}(%(s@c2-galY;PL~TDv2MWt}KvOLlECt+i{EDzNxj6Tz@@w;4=6-uJb-k z+&(6?FN~;Un3$%KL`l=4(L}jtr;^la)O)K3a|sQxqP)*nA1O;m4mfz1@Y99-KUD!! zdlyPA*ruj_i`iCO9=e+Qcq$a%cGuEZ?r=pX1_o22Pgt`ia6J>2&A$$|DR-;!(;UVK z0YN6Eqp8@lTm*#`&e=v-*Kb;}x+rE&L_=(f6FK559y48UCSTS1I^{}6ht7w7u_@Ah zn=0=T5TEePXbe2_N>-YG7w7A8^KEqQ&lj@bPWkl?UH@`SwhR;le9|xX&)@%4GVidc zOZBWGMtW6WQkuCwAu&_7p#iutM3b(FOsV*AEm>EBr0psl!?@qPtJT#dhvLrr_xLeR zZ0hJ=6EItPd-FHMrdECnuYX}v$q_yGgq-t6oe`cQT@T|uBYA7;k>?q;`|{qoT6U>7 z<6$;M@}F!f2`E7I&I;lnXSA2_6j2V0qUPWcnNQEYVCZ7mjI6bej=ujsd$5&)OBiicb3;fT-p zTdGjd!nfWU8ZBS1G$=7J#Av?$rKlXdXqIv3r`PLLg)K9c?}%jrr5&s6h$2i?x3N^2 zKz(?skT?n2ccFc|fw>M$SPt6Upj1Gs8K8Yaz*TsNUhSL7emi831wQfqnGd)BZQD-+ zz|wZGfeuRG!rwg&xLNS<7)TkqjQYXw!{!D1=j{E{1fhE{%lfd{9b!}4e;VZ!#qq>g z>gu~JWN`#Bq7mC9{p%rP&fU~uSfH({K1vjrO_3hYrfM~4DR7MLzFp1mbd`FUVX<;u zOkr5gfv;(^Q=*IOSrLF99^N<&lBuXzzwYs3ID2SAl?`5%kLJeh!2D&Mi>ej}HuZwe zYVm9JPN%Uh7jeQQ<;_^H={l9oP6k95f9%=FcejoijmdgiX;~M#_y!Q89i1OHIn>(K zk#uHLvf^oWmC^K(%qRC5$dHE?_*dsbAs# zYaL9m!Co{5+Z4vBKaH?`RD_9d*z7wUUOS(m(`#stlJ3zCr#AC)u&iO!*4wneYzlOQ zO(g-(N-U8PW<{6oDOecX@GW3q`QE6P!r+~LTJwq$?UorfKpd)Cf%Q4@9cK=0s`0~j zd1i8b!;FrLpvip3SJmz300^zntQ4!nRSH@ZLpvkd5)VC;D07N*oArB~6d+XDf|(SZ zY>s2nisN6?UBff=fL9Eo%VT)w*@W)c{3WLe_6AV-9^L9-6+o zWwfK*4(Wk?v1|glQYu7I`bcef)o>Bs>m9tF7vzzE8#n>)sZ-=ftS zh!g6Mr@@fS#E3V!6?(I5KsSR40iFI6_eVx_?sJ#H9(9l6f`&Xg2^GoGnj5<7id-%4 zCkbyAs)HP(7L4~m@ z+Pmzl3Bng{$3IFijLG-Rm3|eCk2G@5?U-ymj~`A^r5!I-TY4JirgWCR{fax!!(PUK zXvx@)R&pO6Aifvx0tJl*CxLS6I+ZzbY46RK_ROlEh8Aa&eV>D}icUQ0*U0zRI+!iv zy_p;AQ5Y1zh1b9EsO`<9^D65D#xA0C-^4Sm+Ifg1xyj7RFAtD?t7)T-Z-aRh`G52% z_9+RHPfRu*W+YOL{)-Q}O&Td2t#gZ0^^@Bo%bt$!05)ENOh#*7#mK zw`xcDZG;M(epM@vNpav&nDIWZ=@U?zg>XSvo#6Zo@TUTyFq;Sq@}%?B!@aHwuD0|&iA4Sj}%Cpbo+6A z1%DKOF8yjSZa;T}4p$GYcMcEs7MQvm9snu3{gjtF$UlK!4tPTqgzlh2V5J|cL>!#% z(9=N19D0Qyh{1t>;13&?Ah-#9>r?UIx9!d?yG}k-;F~_sMD&7U@v*~ z^OO80sqm3A(rExZyZh;9;$;^URS>Sd#}A;NJ=H|}QX8p5q*PAJqF{UmLNN6EdEdV7 z@pj+3$Zyl+exdcFz`b`8*2L_V%TxFI@vK=V&c`%OSWBD{V6C2`(P; z1D-D0bj($*3c;QicHi^BD_QMjrLyqIP08KZs@Tj`>t~F6S)NBx$Lb_2^}k7l^nz(MgT-5jM+yIFSPSlUBrxVVYqkfGq47xFRkwy{am6sF@H__lzNPR^S8SzSO8j7@_ic>gur)~ka7xX3=BL8XBRQ` z;i=hn0RFL=F&+rD(9igNt}S3Cxc%)O7Hcn;5CV;#MI~jHC5A@elgXV=iBGnNk4f!n z`l^=U&Fo=MGQY4eVqG?VzqE`UxkqC>^fZ@GzqX1#QkIS!aAH#sYR#e4f^ACbx0r2> zesHBD_}wzgyef@udI9@uy7APACoV&`vQ2q&--@0A+tdo^KIm>xYuyrIxCaSYyrqTh zF7N#<-%Nem*X^Gs1h^nJbrm^+5b44PD<2uWGC`r58_FbkRo<~#`!=<_eil(3RZBs2 z*3<8jwz}G@mJWI3p@s~{gji{r*dX;|vL*6$yzx7`T=9ir8@l@pnP2g#a%WO7{JLGo za63kX#6^L54Ux5xk@J^wIGk2+N@kzb*J+mNp z`YpWvg-r=fZBFvA`k=p3FrP8UE)M_L)c;MI`So4P)vpcDX1a7>HbwQHYzhF-)yM2O z*|n84glw~xEM6NF&xG`XaDOqN|c z9t(O!4dC-#axtU)dLuNOzDh`0?qBb;I0e!xs*mTCT zA&D8YCmU3ar>p6Wfgk~7cZp8j0Sz-VyP*xz&N+CGmGUpspM>*#(%5{*ss0it!KwjP z8@Hbj5d5hIslXqW7qGXSf{g${4?dg)g0;g&gTUQCZ~I;YdqQycz7qZD`1yc6J#zb{ z74AT>z#X7NozT3~X*WpA-@Ga}a0hsOc6WFS*z69mDXl+^@=XV2+v){o0yZi~wO-ZQ z+af8-t$dc285wWYeI8=jrNC^8`ZzWP0H~XJ{b;A)1n1K3d78I;c=9+*#J%>M%JLji zQ(&EKn>!#)kC0Q-B<>dioUIrTy}WAKqEW*1DVD{^dFZSq>*db}Hg$uF|8^7W=4ted zGQ-fO58HH|L#q0(xfOL=#UEVpbl~gJFP(1EPI-xn!sNtv zDn1Yg|1E&s+R{}l6{z9)X%kkMi#7YNelHwl}x9rA-$hE2BbWY6L^6XQ#xcvrh_;->UD>x7E9YU zM}+-t5oK})y7);}>VK08=>^kj1&g;3o4Ws}VcpboPf*Cpkci?$e!L_k#3!?yJ?MYl z9lIb3mAZOey%Ls{qJ?IqGUh*<}_4}Yggpq~7c94}y0(4AzquqVWmU}E8 zQ*CA9MSpk}oD-q$ha~o>$4!nKc?dueiD+HppiAc~j)_2sDt2^BpVQ)8n`em7iYOWJ zza)O(QTQCq!Za5&9&WtCra5xeg! zUA<0c>kVhN+oBYeH7Jj`Agsa%_Ne|l+c^EWGn<{+bht`2LUo3`<}QNiwMuaf1no^9 z2A@DYiW@nC{spb#sb{KWg(0Nfm^_&HAKsZ9d_QlcPJ_T;Z~onYod*1AYJ_W-c*N_^ zWy(Ij+~VGY@xJD`tL*2z&QM7w@Lr97OLni!R5q2)!F)H|-l8C;`{UQqOciHoCt183dC`&x)aHHCpX(`n)`kD~ie9+d=mHF^m;r?M*7a_>kbU}cDP zx2wGZy}^V}S}$5i*g|jw9#l=ET}OzUQJfj!LBC2xq=S@J{c3y)m1}lkt?9d{DI`up zJ&HZ{G~w1mZ)tbw9(HwQ?xez|#HI3hg}8|TtVt%Pz~incHJ+wVijIk!@QK&haAu$| zk+p@T-JC$@k=Tx#T^y-E8Q_^3P-D6MewnR%(tJ6^`9+PjmNCC_rsa6Vc;6O24L=Ya zrRu2OOmJJBMv(li$K>LxynF40D(X~lM0nAbyZLQkk8=BQxqsCcuzR3l!S4+K?MVz+ zvko@FgVJ8%=QDu2_g5=FJaFHZ4!400S0H61_g`E7rVf6(zdPB%NRj;#&fW+9Aax{2 zUJ3;55d^tCyuA}>Z*H_IlUjvg(=*x?xY2p1BN4>5_5NPv4Nu^_dUEJ(nKPHu z==I%Yf!v11FQ29v=i&{7k_C1I3UGg{IT6}`9jnpKaqkPKI6L`<5A#s=O?pCZCbU!P z--fmBN`@cvohh0-+l^7UMHb$RJdq{z_pUlGa}`g_Ug~ApVlj$ZeiH{sso4z6Yhm5E zwiX~}PjTk)#WOru0=%mNp~&9uWPL9$pLi4$)c^SaCfIf_8bdrP{!b(P&Sy5_1G{tP zWu~i0IX%11@4j1=P?Vg{_=KJda+iIX0P`q@BRmQKWRqHrKQc+eceieL<||>Isyrti zAo>Vv=-KBak>dGt5de=|oP32Tl5A>`$Qt5#u@>SYhOIH4D8>}yT^VQLM~jen3%yq= z|I(H_X7QI+wZM>_H@NUKpJ|jQ*Cvv-28Jwch?Z-PduLzp1G~42OmBP<-dwvlBw`zU z{gzq0XgyVb!aaAb~ zMu==7du3#0B`bUHO&q%<*`w@{nUK9@M0O-IzdKN$_4Uc8zkUzr^ZDHKUgz9%Kj-Q7 zzR&wQ_gpvPRhAjaPw4c^ZexN`HHBwcss9Noq!UD|Jv82eJu35e!&-q)CFMm(e>LQd zsFtq6j7vLSu^Vr1h;H7XbkiH|Xz3-rS|M4f z_)&v!VnIst=}hnP9m}S>Os@ma@wF$?+TSy+=L=`u1t>B<@wGAI%?PSWNw&Sd;>94~ zbzPVvxvk0C7x~fm3!rbh9A>3nP0b#>YJTTAkgfHkzl5@IGHCnJ@IjFn?84=QItte6 zQ^wfyu3xyHp*uZ~{;qI){lop(8BJd4O0$b-4Q%82=pJ>WCnlXv9PpP1#!g#(1 zv-&zz5|3%;dS+F+&NVTU!&de*Dw6vn7=k|S930?9h`iiv;y>2;^2yc(qC6M#nu9#F znd+mg6r=6K>3~*)X2Uru*lxV@>aB}ffrHGi^%T2z0%fs-ubh&tdpWQeGzf*X<&H9& z--2!%D2e*gv2h}k1aHKod1DTb0b$j&t1$UBw+hzx;J4AmD@qlV6z_1irfeqEj|Sr8 zv7On}$$tqTwjLeM-(Z`1{Y!ZL3!9S0w9{ZTfn9C#d6Un3QM*=G9Nt%exrh8S=Q-D` zsSG`+O)>o^n*spb3w}}ERx_5ufqd*wCAd*n_n&;Aa1F&xu2X-C6xO!_5Y#>s>HQ$j zH}w#C^&OMgT;EFE^95Usht#cCjC$i=c^%u7eMzHRY-3Wh7{c7z71t_%jv)D$-yYpk zar(qOnqE{J{StT+rg*D9?|bfS>WW27V66 z;ecAO!98H31Fs>=@$-S3j-LhE50Rw9aer7Hy*j}^v>sPLEIUGHcd$)0{BD#t*GBxX z@8YpUJKUR>Gr>VHt8(YXUq}dDUUsBUB%YXn+7$EYY^u}Hf`HD!S&GPd++O7dCKz&! zpC5t3)cokf;)_ZasY0DF*?QmG73JS^7q;6P63%yI6`hh`LRVHiO1I!ogw!xMMYFf# zn;kv)VwSxq4N3;8?jb6F2j*;4WsH%0?aM<;u7&8CEj*+5Sqfv;+8d+d4!{&B9XqqB zV^IIG03!JDVKfHYRLAc|n53Gp6b&v`)|q^lEwI*TFz>OCI0wELyvwDBrow}9T~M34 zc7jbM1I#W7ge~UUWt9Y!CzAsgRL+Og?D(m_$yK`C zN6=@+o@v%17N#*A7gu|R+NXRcm3Qz%xjICm+BjNkQ@zr}@3O@xkg!RqK z3K}y#z!A$V@=0en_NYuwcuqRQN5Mk{2xb~XBYGn^p>NlCzm6)x1e0RwFLs@hS2H~c zS_u)(Zw*7a)1U9fJrSr!G+BSdqJEA{aDVNJ&A^G8bmD+Bj{<+%>I~r(dbY%$^p~FsjD375>r{s9Yr%4#yz8UK0FY*EGQKCx?R1#0or7AFcJ)H4F+k|RD z;EZW+j2rg33nx;8KJ6SF;6sRvPHBA;GkH_e{PL{lsHpXXNZrlPM;=At^X9zbJQMHR z)ge3sy-QF0Ysp{U&yKdCbyj31Rb&=9C0p>DLihsiZ$62t@w~fEbs5(y-fgfbC0;qs zPIT%L304dMhY-?SU8S|Ydl61}SIp{Mg_}Pd7u)NHJ&LiP1kiQM&OGWkv%0 zNQ%vL54Exsi`PZjV~-jYdly^3x=!>hPn+gOx=R9TO2c#$xyhQz2Y@7p_dx5EEMIwh z^N*26OtajO}_CAIza=-Cnj=HH|kKyDVp)a}+1Z|M67+ zspCjW{?rVu9(Mj?K1t}Ot-wm;hIZ5-cx+O5;0}-(k;Aw_Iw6vDEK^W-g7!eVVPFKo zYjW@d)Th9u|Fu9yLG}YWTsIgu5X-L6c^&Lgo4*@neq3DI;@5Zz^69cwv&pD?4)2+s z>#G+JtXt%XaIBU%K|PA?G#&*2Yr~bEIC0$>Bqf&+-b*?v%kF!8{_cg>`BGy3X%QOw zML>}PUPu_{Wsk?#PuwmEvGW>i53~;$4M|_@J)u?%+PZb@QEowfdI(Z*boZ4c=(+E$ zyA{~!0J#s@o?kV_Lo$B#=Cs!cYz1A_h$d3>VZgDSbPPkXmP&nxmKs>fiu#f{l)p)O z0npko-!hPxK+U<*Ab3+7$xcE?W51k5rKDv>Lt176kLk>#{uj&8J`llfhtU}1QJ65l z8{yIDLiIa46?5pser<5UmUdz75y)v0mB`rB5W(Z zQ@gupU|JzrDesDBp$8%Ab1mLBT3joVJ{@?jtvNMg;UTw!ij3bpmpo;>r3$Rz7{75# zMbU;cJ`5q6fwFoXQy}L01x5Ap*2#$n2LQ=8c2e;YN}Y`rq?J_NMD5QlqAni3D8i{< zsfJ}k>@S>UrT!JIPoVJ@Qxd$G zaVp>VOIC^lnw8>hzxv?T!RIlx##aKLa{7H_-DglCSt)LonNOWeouVGnY4L)g9(CHg zEf4CHYX@o9EQQ3Eb4vgUd1Kh~g`Y-4&h?WN&f>?ai#Rc%_wAu@*HRDPJogmy*rq%# zGI=uOXF2uY2ixQRg{V;)k76QtS5=-)E9h=2P!yH7u;Oiv}5%VwM?OD7IEdY5qW$^Cyj0u*E2A=J9LW8(Z0vuuS~tMj-p+@DnY z1arz}HC{gNjEyaa^LRmNxf-pGX9BXRP4=rF2#i{)HV*((v$gQy%bk9t1K)Kp%DS?% zVrYh7!8XN@5V@~u9_WSnD)lvGM6;ZD1ai%4%cmooDk#5Ield~j93de3sU)WUU z-6VJJM|E{MN)zn8;(2d*01A|g9JYgNs&iRJuIJjIHpTg$Yzp)#>HU@SQwkLe*p)F} zMv*S5FQ-FjwLE=$29e`UG``BT0;HJ;t^H$NW=0}aiel_nR>W$y;39&q4O=hEa4cYa zqB*vyMO7UtdNEnyHudi?b=DY+h}wZQRr_lcUYpn#$Hq9*PRX)Qc2L%pJBs)DrN=+1 zGUCg**|Jf4R0{&H*z>F>%X(V@j!ukjM9T-^kLC%m>5z>I9fI$r8uHY38xRIw<@43~ z0+=$mKK)9v5)jEiyBSJR>8F_9dBMHH;X64#X%kmp$~$nJ1pNzDlH>CZu_E&0Drf|` z8?*-L`O)!ni5yaaXv!Z-w|)$|1vVve@WWr1Llpua%#X$G_`sk~e6;r;yMVO2fpb^? z)ea}Miq$x*KC5wfx26M#!Ft}Rb>iUzuryrW1Aw#KzR5T=k~|$TCaRDch|~Ei!7xo^zC1t^5W!H- z(!7Fyt`JuSf!e5pomd%3aW{U?_=z-TvQxsDO&!7e&ub9DUWd^bY*Vbi8{u@RIoQe= zZ+iQCF#YJ8{pamEUx#@`&EeptI+OBSjLk!Bit9w1iceTge3j>H5>sxSR)ml;dTpdH zQX_se^L;om1yPOIB)}I_m=|O;)xK9I7|h*05reN&8>&@lNw+JXvZl8iz7D1pl9l4= ze~tBeDZkuH`5n0tn#z~;^lbF;YP*)ywu(i9pqCk^q_uI#<@=S)PBAyQFqzbP_9|p- zvSQKml0-JISAz{M$b|zB_3+_KB#CuxlnR<;K1TyBQmFSD9C=2?C@-zqTum)L%S!z- zrAIv=TD_t17Hm^|zZ=#L{IyT7l3kuXPoQ-hc#JzxhI)UO{h~q27)2^?Q;+X=>b3Mw9$C zZOc3FO635fdN#SuZ*K<2)D<~4TSu^7YeufOwF@pQNSHY=$MusPdsOP!tCut6FwXs5 zv<=+!n2b4&Sst1FrGBNlgive#`%tBy#VOI4DoLXW@MHWkGc zs`ZDdsGX=uCk{CCC~#KF2f{1Jqr`rRR&hlYB;uP*fEGHpF45d|1!u|lXYb=NJ1Kjl zaq}bS0ze)mHAuGbB52(nEy`AVmO#`0>r)0{Bp>BQ>UZ|W3z1xrV2=_&h}@1@q7=l= zXB8;O1Zv_BM)z>M^WI zqr*-4n@BwEat~xOA2yG=K|PA+KYG+G+F+E;8`%Dohp9D zOyEm)%~fJiZ!5m%^`t-PEF?~1!MH(Awcw#!K<;48jnh`68`(1HY)C%l1h|xOkE>JgOg5@+4;^;kK#Rz zM_L^~-PpF8z z(^5smD*TP#<%X%iQVDP=K|PA^M2`~Vx1rVgKJUB?X!8yg`nmJ@)^NwH+4a;*$Vof& z3^xF#G11H26h$zRV;>f8Jg=-qvDL}B;g7?-qItzn(mi?%Oe-WS70(_tQr!QRpZ(_D zYyR(huMkb&ygFIg$#5ITL!KzA|Fpk6F*Cqma{!!7F}uT+x8aa z+)SucBCxvYI`TvcJ%(G|Fop_ys(zW0Qb#kIVy;s&URs@r8RIM~^^cGqcYtUOfW}*} zN16X_Sh379!|fkmYhy^|%ScuVlarz&G@qN#Gf6EP%S}U1+WbpaivRGtQcrUh$JY7+ z3l$qOYH-J$O8S(lL0KtSz~Yr)s06a^LRy{kchqyJB3Ih|dc=cU1v*2yxeOFn9|Hy! zHbnZ3w~%wxjFlD9Z#d>QU#OPZvE~L^(OiqLiu#Ui>aG+_ixsz#FpVkfuDp8`KSa=)E^onzp?a<{nU6hTkAy@kB#uYNA_taiT1p zIN;2tz%~^Kp%!FQ4!^`K-pJ?dJe8S9DV%7lmTD5b3m@?dx=1>WOlhYsn2%DZgKSDd z@@;2?{elV7f$h^xVrt}BO=YX={I+K9v$rZky$3IWZR!R>3zriJ3Flg6+FoI*KU6bKbZMqR?{w79b9p*=ag)fIV}cy4u^S} z$4@b=;T1`gwUOwwPKKtq=T?_#7?XwoM#Bhlz9JvKHK*OtIPIdm`{Cb=M`l|d5}(Tv zAoo~T@SSang4&e8f3zvYd)*UvarYB8=W~|g#5)^qEO+utbz*fyxCd&AJ}>eB2t^V? zYJ>HX6GLmw{0qqw2%fNpe3hwxMfgS~j}(QFq$xv2e%5zTo4Rotn*sp*N_Lf^ zvYVy8l~#%75*aHm@tx8nb}lbRTtvH{S$jDXP*q>78t#;imUIbxM)MU$XEJ}y^fPOV zp%JEY#asS22gf#L=CkFxYyn#pV=TZ<@J(}{)#b8}&1)H^f#l+oCpqV(p9kOTq@StOR1EVk;jgoX(53gr zr`^k{yY~d{J=CTIPq3+EU>H%=Sqi!J#!3S6+t}!iO<5JMqFdFS6LJepIws6&mjI^> z1Y%=gq1BN@?4VAwtg$hA=C0Vd zAZx)=p$6vL9DN`3k|O3)(%Oxc?e+luUKBa1?_NDUhil$-_x8)CBU%Kv-Kg}!@R$MN zV0G$5TSKeoSOIsuYIpg%>E)Anu5OOY$UY{1qUq^#mX-Q@LVs=r(HaVkw_uwBemAVr zTPXbW6&MX4pD|9;+LPL+A}Z@dK9qb|$8JCQd|GY`nw1iQWTn=mLr3Uv*Y<2V_Y7M@ z9x5|0FLnKtm69a5NOCQYT>`81Qv>NtZ5@kM16=qAnRxe>rjGRw9wvaQKNrVK!gKZg zFL47^4$$*4um^Ig)(J*C@hMA%O@Xo&CV1|e2kz~72bZ)#fxT&B zB39ok{^n^Zr~TEwZ_b`?LhVc46vN%LP_jQa@AqGQ`65F^pZWn&G+RFl|B0G(;(#-c z0((>#gjbM9W&aYbS4k0vRG+iZjf`VYY=rF_ioiY{%igsP#bS9-WoD2+J zgK4{6*)=R=)ot84Hcw>lSAC5MP#LsfXL%=lw=tr>;n90QLsDY4va3O&sX1gd9S7lp z*qKND9QFRb3}P$%aOMVkRM9Wt^)Eci>R}HBY6kUkhp7}{zUhn0_bs+_tlm7ug9*sT z;naNe1?o}4|H-2O;B~=^hU7a27a~{;oz(|5a(9ClZ#tDmoflgxe-^sZQwkVxYkPg& z4DB<9F-3TtuZ;7=T;#oPwZz^y>Rpv}{n?abkGlQ3kXY`zZDnui_9fgK2lnsr;BKNV zbyPKZ~jdxQ(6(2+ZmM!ou<`u;0w?OId%^bBZxE;V6Q`oZt5! zCF;a%=KE@k4T_V;>H?LHqm&fn#Di3X{Fnep|FHobHvi+D9*#F77X%lPL0dze3HtN7 zaBkq%zi}t1Y5h@v{`{oSO(BnNkjLZK@RuHlOM!0F_ao~i_+t#@fIuuqK<9O^M^*i9 zl>LkYg!*$VKN-cCBwOd2h*{z&SEvniiPhaw^h9Za!G(I%&C_`limNy}a-mH-DKN3P z>q$mAyN1vZ8*d`?g_DpK=EBA>VCKnU!ySpY@jQ5@7$5cKwcQ4vcA^m(qpbzL)JHp> zxW^u)U)?cWQpB=u3Fo9qg|lO+V1D~$?nvg-X!IvamiZYtrzALf-thi;<98Mzd{KGT zt!dYzp5`fiDPoPiI10Bt!+|6WWOu@%Af4-?{#asWQ7wc}eLgDvRjtmv(7yL--hGNo zBWE7<1J{2lfe4N~jK*M(YX02_t81$j!v`fv%C!<)s^77MUC|J_S8$FaFRD>e@GZsg z0o0>JPVguINba;`Y$e8`dHUYNn;{t~mf-HQ6qRIwK5yau=29DncK~hvMvM|=lt;5j zsd$YbH8G9V$ajw&n(^yBZ*~>+?&^bSg=D29FTRi`ytJ?ED(X`;6=u_TJN>ST(KUce z@dfCf!l@HOr=-;tKoTRRORLnY*DRqfs-+}!>FVQw$pfOI<=brTos$OO2g~tx?c;3-+k)-wkUR{WB@+Bv<1M zc?L`JVU>@Mx7teJna;r?*0gnbRil>uB`YNg%}Q}1TD@pM-Dk^%37d1dn_N#`ox~6M zu2cf5hz&{8Y#XL8VPaYc?zuZJg!BgyRD3;To?I@ax$Og7;YwQ^lFzNUPW@JM@*TX+ zvwp}crp1>FuR|LqWSTgw^DB|)IYBgHkA0}55 zxE~tuk;@?P+0}sM*oRvbs$iQELWnfLqSUnEf9UR?{*8x)?TKe-r7!uBO$||B_>OrM z?V;!XPCDcBK^9tLMA0Qz-r^$v!fF;J1f5f|wV<{9`gtXmP7HjX`CiPn0$2US)s6~U ztST|)Z1K@OJwR+ZwI-`=MT&}NbR-%fZ9C+Y7MbGYs{v+`HV+q7o z%;EeEwyD`)!s}nyl)JZ3#b=_o%NOY@xJQ6jqqU^>jn;=Q_Pmj^Y!tfdjSjUbvHxUK z$-pP0-r=pQzA6>(A|2s5q+l-Ree^&Hv7PSkkGNGQ-5Ld;O2ebvR9$^ycU=W-Z3%m% zTSq!|PSh5kz0bZ);hpZm1G z%9A|;OFWkt)JOMKKeBY~Y2cHaxU<&KkXLOA_(#!=vYBe z4Z1)T>L>4lKYIS-Y!sH;;aJeC0~g7U^J0)Ps0FlRci33y><+f6)!&V>3zGckfFAc( z>_sAdXWotSVl>eylov^sVI)~r`igl9P@57zolRl95L8v>qYS}*_vl@dlTzd>_2O;& zpcgmeKljJSH4ViBSmFkp*1-we_AhT=snaU$z3X6%Q)H|;`O#vcQD7twbg4+TBSVg8;xHYax4tNAfHQM~G$+9DNDZdiV6ol_N7oxSa+ zyG)6VMM!};K|lpasr%z|sKdoR@fkA=nAv5bwNTzI^fU0>i1Kw8uA2XLW>X+YPgp<% z#~nsvuubj!ZiHC|A0k}v6Mdi>JE1O%o~cB^rGF=IczRK9+kyh0uOtO(Q@2jAsbqi! z9Hrf<b$F{Uz1nkR5%&bc_v9L>) zAX$E<-Bg~HF=Y;gT;m7HN?A5t(BpD;r}+}M1h?->WAUgTd5!e>MSi->e1T7LA5~BJ z-obS<-~WoPhdEyBV3awsFvCqWn!q8sFNDI3x}IT(X668c|L8Y%p14P_rg~p2GqrOT z?E|&kV!ooYwkoyW(H-m(Ksy)5XBY&o$pPhs&<9cquhgEdl6o@19_A~uM$qh3RMxBOWy8`R`^ws z`?Znl7yUx$UV9BFNN-&Mdz3IjWVieK#JlQ6n;lk@ruqs6CeJ=dDja!~G9)ufYC+ zwSH%xMD`pHpnjEYw@td0KiN0HOh{<)@#d{o>qLyu_A91{O_*aNXTQ(;Fa91tY$YDf z+#rv_BKRe|{)I;|VM0w_(8}7QGiBD7MeG zBeYWVkdnFPZl7k)u}9_EKL3PUDAe<0{N)X}15|9DXYl-zAEmVrY_g~lYtDnd``<1- zH`b)#_`<8$sHDB;(g$o&qDX(6i>oQZ`+_|Y;*o#%KOG>$W_0^T_!BRkHR$(82A1xKPkc!_5$wT*JAIZw$~dVe4>V3fe_TrN?~VbYS4SP>(88aL2n!-K zhkZvw|FCvDthqr_U=&Av(0w1u58Pkni5%wK;ZVV&P5$mXRgeuK`=b0Y3{ryZ`loHd zjgWyq$rpHDe+r$~K^}!g`MXh`EE9Ovz?v+K9nIxH(;Nmfz&JvwcQCLJao^2EX|RkJ z>QPds^C-vlYY4B3*9rsimlzAmc+g1D&EH*nA;XG->)xBTxIhOWSJ&84J_xQxz2e>Y zSEZ0gF3>z&WNo?n0Vp^1>RX~Q3@m#S0PuQzGu zcKXbdQxc3J$*_=}MjFXV%cj-)=q{sqpa1JrOWnvkuJ$~%&$D-c#BpL*FMIWj+Q)_Q z>3P9O`cFtF?@i)Ro32SmI^HT4IrAt8$^Y{KL~zn!GzNJT)|KClu=LP}2iCfy;joRB zyylZ)s zW?d9*{qKs@fb-L}@ECrR!%|p9>!w^Q_peT`UkmBexWmnEvA2r%ei%$EBrEkne*AN1 zmBd!cmv2#bLdT^9ZhCCJEEd##F#u15itMm|N?L0r+C>}$G#cMlJ-w5&$Mlq+W&De) zXVm8#bvb*KEC_f2OIGxaKo0d=yS<$$+^g@sMm=kETM#1+g|KtauY{7(on@u|C#aB4 z5Ut73cnkI@&fg6yt{I}uwAok_0)hyiym8N!$N4g|HJIM1+`6lNh;3zJf5}QoL$gx8 z26r$XPudb1y?GA1WJ&GFR~jb>`L0w}0_L`-;X_0cqRjSZI<-Vy^>G?IKJlb)EUS;CAHi=q$Zvegk^RsvU?peMvrN=qc91{&g17?c%$>R z+bZuwSvqmRnN5M;B>)g=K{h4$OU&AgU%=D6Gmf7y;^p0qvmwdeBoH#XwX8ibcCS2+ zr(hUlQ{#>1Z|ewv0C5sq7YygQ(Xo~}W&s!DWB7tq{CPHM&qv27x#s?_A zN@U>*DlrusPQdXU{FKF~)7j$-mM)LhZeokH+#wX_u)Nd*PnyZt=wNVXarl&MS%f@# zv@@n0-oLMjsjDzCg-nAZd}*U%w~r6|%Q&Ks1%P$gIEBsxair&#FaJi^2Ia>~C}Q4U ze3&~hUku%5ltMbY#_wN6#FWGN8*EdOzl7Jnuqkd0LrnVyP9%hPHC2A9NQ(s5V!lc2 z)CQPm!->|ygxWxDO6EV=6aakB)FXu3TTs?*RY217dGiJXbFd%>#H*A!Ho#s zf;&rAuwfOsx&8$TaW~##{<^q5(kl zeGjt_>eA9aaBqs%w{?=DY@Q!ztYx@6M7^wby<1`hqpcxQTRq($&U&UbVJ zr;HCMq(hhH+?#5}?Vxeejr{18AL$FIestX&s1M{ThpyxHW9ZQ)e^?)?(T_GaXptPW zIxIpPzypt-0owB8D!k*7KTD4wcsSPa{-GuYBY1dR;9UrUA7DZMxPc2t%kdUR4aaLi zXbBW2Q=zjv*rpVJH_Dzhh%i1wSE7b{@jJhaO7CVNhGj%CPs0bYVEE40nBRfg)Sc7V zR5B2> zph$G1Rep#rj1jpEiBV<&=^1G@kno`d#|^-#`$v5W3GFYpv!3aa$?k9wVDb0k`U)K=E5eS zU}#S#`@nD`=dm{7)}$kx+0-$w|5yMKoOT$E!8WD&yAj?PkK~ij@7+x#dq{sf>!ymc zN0?0A19P?n+ACuvS7*GTHYIz4O##3gb>bw+B<~U~;<+2*<^sE-W#TNF^&eeuITaF8 z=`n=?mu0EP3FzpxWe;C}CCt3Pp?$BdjG>P(r&KgnL5-ub3rs5{E0y!qAiNM|ergfj zFKH<8owIDPr-4F_q|Z=*Fj^cw>xEO&x+3dKU{CIb&cCQP{ZUD~nY(pBzBDUpKPlF3 znE@`}1mGBu-7~(o>~wLJW3#rU)*dVNN-J)|`b?fw=Hds+8*67-segp@xC2CMIyByb zZR-B-hV^`rosJD&l()dv(|e4{hDO#~R#n8cBkzsXLH7rLL(>P%O36X8QsT41EvZ}Z zanZN;0l$g-uS}g^osY9p$w0l+<>w?O6-yKZN~wLlQyyO>GY`@+4balRb*{ODEz!z=)ls8YT zU&aUn| zhVDNUK)hx^cm;Ws^)JziH2QH}DUhk*Hg$QHX1L@FP&6*CY8ce6A8JP{}i~ETBw@hOd6{A&Paa{h=-BTNTO13hVH+pR>;88A+ z5(X56?;}>nx1>lnvA61JBT$)J3ETs&volrA)v?nWvy|~`Eo%p{7e6okxL2;*YCczG zb`!n)%%e_*4#ZaG;mi&8D5qb->tA?O!bQGDva8&;4m|RR_|1Jc;iff~;be@zjUx+)FAHJk@|MIRU~HD$@Y+3HCt|GKr2O4+%%x+Gq9yCJ9+)G^RL);DxYgs`ad~^6I|&J$ z2R5yeuC8an>69!hdJpt8;3k!D#n==%+VxI}PZg5B53dOxN}?fUL0oMIzNEp>QO zFb(RjD^Mu)@6xA2GkSZqP_|!4kEF7$AE>EuM)53@oo%n1c+zcN%h7OE-aMiBk{O|o zgS0^+Qtw|rZ#wi7au5T8f8pSV*Bt2!IrKt0RM1d}6&-H=cgdr}z5SinAR`_<)vdz< zIcO~ig8;p2#PLp#M*YLu4b%=g3UbJi{4@r%J@l%qnnfGCJ;qY!-bV7yt}Qi{VdP<$5$1kH7?L5>qZCf#QJ`O#&c)0TOgB1E>%yEM z_6StIgMQ7@DFCj@Ku^?@UiJGZ;K&~<{EcFuO(ZgP++x$Op*BgRxtR=>pDa3>swY;q z;Mk+0Eo@h&-|-7s^^7`K8Hn=V$WJmb)i2a4D0_3BF{5p6Vk5yuA zMbA9yFVOy_0Yvb#!)OflsLz9xTAL&NLnB+0H z?#>ZpU3jnWl}d6hsLi?<1{1!IA51GGD;1t4xYSj#yvr=%5^AEQP=OU57#eE)+OAMc zZ>YM4EbNrD=J3_p?wm9Esq_Z1%pQX*eXnu!t;Bl6f`U{+YX#mms92#%I9u@z)VPzSQDQMXO5VBbmCEpfc(`eys#op|?iR>MjY@4YmzFY5fQc6u|b|961G3NOb>QK0`$x!eL$9 z>)*-0DF2{=aVkQJW2+ z7GzWDzr<|&qjT%Hb{QFxxbqKSmHE9iG8+t+$jkK5?ghUsA4CUdr9|)syw)Xq9tkFt zPiXpziJByQ`Xr`Qp5CwQA7q6Jn1F3c6d|&x;-WcO(dYAQf}N(g?)DAO6dW%e+0?e) zEhIYxX}8#Gb^I|Ne7j#_$ZB@rGrvpNe5TIQ*=ji@TgXB}I#HNkZ&)DQp!05cn95Wf z1t46VAnU&ExK=@%kpsAf)AH@b3n^*G_=E-Nr-~3zr)o({k#ZhHC(!T3&mf=K)X9Gu zAhuo{&fj30%Kat0{)J8D>bm;ErJG$;*%5w5kJ-g(o=OzhQQNL7BMe6)-pB<9wJGKQ zXj9Cl@!}(m8e!LvHPQEVw3bmj(hczGFg_!VZ!Meb3r+!K^?Y#YM#B%TwSP4kD1O7b zFYz+o!EpF4hT~mAA2~mYW1E_5e!X--S*`7<;RUWF@B!b)aOqG3LIvQ&dC)Jchwnvk+AOf_MpvmUvcEU?znxw=&eBtTgo5DgKKWb-%F7lP2egP!Jx zvmJH+SpV@54mFGXNB<881nmW)d_RuE?dM_OXMt`GVmSvoyMt}2?02K=r!!Z^A9l;N zo{`_=iz$ibV?nIZAh?i80|KY?!H(!4_qdcF&GAggQ#79wa=De3i^8S2$J_G5kA1ZC(*c&tlFEmEF@v?m0PpV`zOsQudt zh~Srp(HLw~b-x?oIwtN_3hjFq{Lzjp*H^0r^Pbw9<_HW6>vxVIaV5lxLbFnLPqe92 zuWTKyX}2baIHwFt)w0pWd&?MVk&6iL;QY^ZO1^vn*h;Lv9UJ_9Zb7i`Wo-oAB~mHJyi|Ii4cH5VFh!8X)v!n(Z>#GR!S9;m8wS=HQPht`>siNrx{A-Ypr$;3y{f@V9*qG?Z%eJvryB}c&d5Nk#OjWYtJ*v zYHeK8u;<4f<)wb5jTojMeGm?21L=5;_40}cEE9naPnh-I(^K-=;;N}P2^Yu2 z;2FO&X!GRrlOT%gNMmwp?-Cj7#NKUcov2AC4mk5D@TaY>AiRP+s_&O*C2UW6^&!aa z`N7vA5_^>o#sLdgXQYNV*gKG8N9vn~`am8P$^U4YHNkspM!@ZEG@;U$yi|lefw9jF zxr(pRD2vcQ*WeEhh#^FB2H07UNxJQS|v$fJamCa*3Nl`rATJ6I`u zQkN|lk;A8ctWC_1w1nT&I*)Nmwi5AO4ddyZLM2U1dhrc1!b~tw`D5=E_}Hio7_lElV5NNLkh;3NBtq}{cQ!rR^H*v4fd#s zU&8BOc$A^4o8?fc=d?dA@AljJ8Rx*~j7tXLtL5`ArjSz{msX)3rS=~^%As$+LYNB6 zoJ{o=&w;9q*Am9UHYLlWi}DPJ5!R12=Ku~LQx?neXF>I0Tl}rf$o>lkmpLysNr%X; zb$!V1@MJ#rs6OrV5Jav!F4KZnsR;S(x;KFHUabnbu^vUsd`L71-lt?awhMMM#4+Oy zKH7dC+>`!{`dl`(JU&dH%HmH~YAAcPfJH=}N9mI51Wa5>$KkS?g z3R=J#2igm)Ah-hZz9*pn++p*f^E%k0KL2i%b3&#U!c?q%Sme}Q$tf*Vogch^C+zT|}dMVC@Hi+_ESFtdUJ?f#Lly1@03KK(kTt**ZbFVA) zg}1~vv}kMgQr0sGw3OR;sPyhpa zxHzJ;?<&3a0>(QKb^{k*KIfiUo!{0+!;t#*oN-{5e;U*%O zR!CMVmbK!R$PModw`ALwVXc4=EGN&+LYR*AID)b4IaL|tQ_^~_o6T*(1g@C*{epYV z_k991SQ+cAjSw2z%PxNt3gB;_C`%^}IDVIK^4b0WIs!y(5rkSd_Y26s#H>vj)7A6uwx6^VbC@LR zuNGXA`AlUN<}=Y%6V$Z1Et3YascUjTrzOc{F3y5xI7D>wS^6htUMAF;SDSbXzU)Pm z6M$_>93fKfGGUOvyX!=gZv)DwZbbk6*_+%)Hf2sj=oD$qn{L%9v5Cx3iE4gdn7Uy3 zX-KARe()MjcpDNf?29k zqqvo}l9{frqU}~il5Euoo9qgx?~&u)dTX88)X9GlAhwDR=WmcrUBLb&y#9qviDY07 zA$NakjXo#jEv`nV=C6V&>_=wZt(-w8(MAyH1+^*7|7cVA5teYgRE+IkG;YP!eTn1b z;MF3&{_NXZgBf+(t@B@E0D{a1Buw9sm|YtsSDkT}YGvN5S$)N-vOz<^M8?Gr`EqPi z#jCBnqURdV2fs+vd#+9%N%Km_L>=Ap{w%9l{nyaeoKv#=ycmN%&_t^8a;EAFsYT$F;uM%k+78}R%MU8CRiUC4I z;c>MWwgejJtoa@o57S;0n{G@gU@tcR&Jv^;=F@f*C*co81^!TYAPJpg4&aVUCl%oS zG}P^<4ZypG>=*O^?*^)X7DyemjUc23esF{Nn|n|k_x`B>QZ#Z<)1gs;HU(7!yHPgRMvTXf!`RlY$#K`}99$5|ZCMa{ioRN> z;Yfm4=l%(5Q(C97sbqjd>ZznSYGv-zG*}N-qUhp%l#1xug%1}W^XbXfkp_JPXnd@7 z^JPZ7Nb%bM`8>)|EUtaI6@$gs2PmUR1QzGij&16DF!5qK7eQMH)14NXmu8<8p6$!2 zeW@@?i^shyQ~&O?p9j1&u^wYHiOp?r>0zmM=I*_2bAg(LRY^CRt?fAxOiU%vfz74W z;v>{}k;Y0}Q6f7O#>-I8vNk`HbR_pOLATA>uk;>)`ajknf=dshG03Ja(EM(M?~;t$ zE3{l%Mf1B8q*lNv&h~ym#-C)iXnkZ8&wJoyC9|0SE8t>-(u zc{hEsEJw3lHQH}OEx3+=L2~OTa>29(Kr`t1ub-R z&$3eg1JlDU5Upjygz;n<#f{2%Jbl5*SSh|dryHN ziUyjM(t%{9Fek2muc&{yO`tBr(DV6bE%mCb#PN5f0Kh=HJ~`sqqw?HF{|@Hi-Ub&g zzTB%gYw5}6E&K`zbK!uctQy9bxf=*Bu_D90=0hkFgsy0kj1MsB9}VMU?WIG$1E9JM z#{*2%W6Alr1l_RERg5)AI>4rKUediPq_nv&Dd8(WtrLDqUJYa8qNsy{$|`w-b3B^t zuE?_ThES|!kFLLjLw@y^CFn#=I&r|!`-78D>;KykAYRKMyn;N6=a*} zHLdJx!FaEM>V`bb?rVrq_ZG-6&!jHQcz-BDA6_Hr7k- z;>`9mLj~yD_=5wt5F$S$MBcT(Kximvkkm||RIwg2O?YtRQS(EL{F}Rt3w!=+n_|!mavw@IqiGT^~YBcp8DuVxN!?%*4-r&kJ!DFX!v4dD~-}_RGaON zG;qg~cf}EO5L>SgXKt`ZiTn~? z|H7k)zn8t!P@e3DD_W$J`@$60_tNi`zYGj%9!@D$cb^0m)T4C&qeoRcsP@nB6uWN_ zB+IVeXZ^AhqbK1ZTgu&^XYRW0;MxCcVE$x%+hNmhvjn0IC=T=d&X$~O>vs( zi#XJH#?{#C;TlsB0_Ux`^t!S#_e=pQb#5Kz{QGgaYrT5hB5W(I8KKSni(N4^qM)~> z!?pQ9i}N(1Lx5qtyO76Fsa{#@vse$S+?2I|=-K@37h|93L2(kSGUQMbAqS;xAZGJ} z0OY@z2WY4ps9*5Mm_L_5U57Fw2o)vBfFr*-HW~6iZwKA+kJjTu`EiT$qfXF49PbDG zWYB$qHh{hm@Y4!{rRr}M(CDK>f%Jk{egmD?!5$^^yHS2g$ycpPW_8e=#7u0YPp8jz zaq8JzR%hPqWdfpW;#J{LkJ3AhM*#ra<(NJI)tP}^p&EfN2-nAE;Hr75d|j{po1Ty) zUn&CNf=})iRe^2d>p)x^Lu0~H2R{y;I0S20OSyn^g@H{Z#~ziz-!#Hg?>cH+HF_aQ z?jpBsL-p+5JCaAxv(0r{We>iPkVLW-3%M##L77D zYQ6g9V2mI^jD(o7E;?AlZ?YN_avN`T0DQN{*Hh+kx7oBSlhSD#^(kJz4ZwSgp5$kZ z$U6TFp71Oy^(Qp{rV6695*ly89;N%cVJ(u4%jK=Y-w<+k7LFq75mxUoSQ|D@Q_Q^c zxsOQs+JnDjr3|21DfkCXs4}l9#CaDUL~0MXmA&sXya@TORD6`o0TG5ODvv`cRf*5r z7m-3w;!t88(3RUcBAVdi%mJaGa#M|31Xq(bSa*>vroP1qe%~5blTSvU7p6*cB7DHq zLTpNK=Q@EOS0#G_e)GeSM}U=r02-$GD=Cr(hB$k#{GYC$$-_Djru9d3Q3?zo%RdC3Sa zMTASMORLx-=4-@-PvTsq_{sIjAe(y1XmCAINhcF&Tsk*^b1}=8AZJlTX+ivb`gI^b z5+8C0KM90LvrN?Rwyi){`9}^T1U!_VY;2X}j%;eVn#$MEQ;`W-SLSBGPIn{ISXX+u zDH|JatFy&=F139K|-d0E+k3!Gn@LG5csDi5L?xU^EcS0?0yNae_>O1 zl?oLf@GcHL(C81;7u;eq!?ig+`ohk66Le-Z=S2a;rPR*?b5d_RC~n_mq~fEtI3>$b-=D`-xi-HCCYzt9 zpx5&wvEx_!92f@=6|%ma3~tv3m>f(9_xjp3K2{{Zc&yfyn!Dob5I1)7n~a9>i}I_* zUx4@YTLdXpD3p5$cHOdw=QieF_$N^ldJlx*W5(S;3qk!;oCIkLNJq$_XZu)YemeP3 zS#kT}BhW!V%mzI6heNsjwCRuHvCIhm>_kWXSKN(V@bU zgKB@j9NN`iIJSEfX_ri9qsY;WeHf3}=o3c?Qh>KAXP5So2O0<++L>m73&SP%w%vJ^@ z0|skn(ro}q3OP3C-7rt1gi!w8epTf zewB|5*S|#-a4YX7+7ugWQ|YaFi&r~MTVhLU$P$R z@3<>w<+6U|Rl>uD^n#55l!?76fZ-F~B!J(P_`>5F#!iWQXU)xk%SBbdOYXLk>g+QkGU>qo|@V5ntMOb-h6gu)#e`46t(Uy(&5i1 z;aHzp=mVJyPm-IO60gQ+7_%Ia)=vcUD4uTH6U7KX;jmmP8*NiL)`s&^?)mnLQX$PK zSAdjXV3T&V=&t9K%XCdi-&JrGyEVz%KpZk|?z{nmv7zIv)IU?Y-vgnw5*}}%HWl=z zVU?O8URJg#p#JnSvAC1y)N9kF&ABrMU`%|k=TdYh@NwZ;sRyvE6t+yJbAg ze6W8F|1WJJyjH<@g?LotZ_$eTU=)FPz{^jQGt087^Wl>b<`I^HX=0LE!!V~w+n6tq zPY3Vs7&vd2ZkV5xzxkv%$2q2qQDS0hYgTgD)^V$9lw%T_m6AXW!e-hy&vdfYp!=3} zxU0Nq{@O>!k$sO6Q|+3=?#I6*vPIdAp353H)Oz--YCR#w>g@~S)gU6DBgVqu;@Slx z^`NRNw^}u2dg*!YaeWSW%O`kUKj1CpLuO;q>!3NKQ|<57QURWY z%M28Sw+ZAfMJplocCs#Fng-RTY>$5xEaj!L)r&dss5(5-w+<$FFIs5YuQhk9e?3>d z@Nv3Wq-mK?Rm%ji{__!8CiH!5F48>d<2g1ht1nQ}GyjOSWE_DBMUtF+Rp@Cy3Xn-C zEu0ahq4XYH-q?^r<8~kVGl{BJ&$iI82r4O%)ek-c=@5ea`^y5}tAqa14yg+L+5|c3{tH1n zVYf@cM}eW6eZLZdRv-%)wuY|ud-hun(o{bl?#CS<`#`!tFpK)Df59H74mlzSHU|3l zrW*LX4)v(Fe;Q@tcCakd%TH?o({uMuZBYnUCa-4qA*Yv+&4cx$r*-_{9%XnGkBS1! zG?QZ2iB@%|y}#X)_{jaqwjA%g_9A&=No_`$aCET*AXg^fsAsdV*>}^9BTQZ9VgO#+ zqH1Wdc_ZcLiu(_k)ebz$*rbrI1Viouq1+tL`{J&*WXfYrTQAby-j2zvN|z<@J0ii( z);qKnR`{jyJ7rgEY%7Rz9M4*wO^o(2x=WutTzsh)=nOVnS9$;4bC+YagsK za0}|fFlyoivGuL&;2vdkh(`gywBV>}aB`~*<_%Ixv!WY7#)>2rQ-SnzXMfy1P z0Ih+cM>jF0>}$v8Omr?-)p#rk%h-Vu=di9A==NIlH$Z8HWu??MYmIEqc#!(8beRiz z+Ise&Q3A`tR+cWWTCTlOUOs(9TKg|PkiWlTwFQ3pk&KP6K8O2Kh6plJ0o80}o5^sa zLo84?aUGqLVL3W%$ks@2dd1wi;BNQb^8raTRKy9nqwGq@S*b%%g>*w`t%Ju~s7KZQ zX;?>cvD7wMKk`gprN_j&{CRK+VSu)>uWcvkx#v2gSt(<9R!S_FJTv9}9d?H4 zv^4k83#_Ad&F5iRDO;%-HFvA4e4J;;U-Jc<-6iLLgE=`Iz;TOy;>o-bf)8LSAbH`; z2*{P zcS6hS6Z0(|Pxfspqglnu^c_yG6)=)u9Pq6O;cY5I@O>;iN{-~~_TFU(O&AU!ip*^09(RR=*m&qlwm4;DDxdR9`gGb z*jc*>2MfNaQXF~%iIb2=g@gX)ibF&M<|03Kfq$$(%R+x?fY{f8yL|7%hmJx-IC$te z0vPEBQ-U7?1m8f`;Dgd%&fv$6AI8965d(HEe>VhjPCsa&5cK%4!4TVmEfALL;j=r` zrbhlW%8%o-hv~G+*h0I+#w2-y`!|a#Ha4k@7lG?Jl$^FL58*atdNiB5$0AdWHZAo? zM}%l_)aQn^)8#t9_!^osCDGyM@dLQK0nWTDDSolJO<3xrh*G#Nfvcsr)v={Mv*+xh zD@L`t(jVB=qa~d;H}7B0Y2pm9Us~^&&_D^h{c&0>F5Lk=gK^}n(Gdw&#!q)gySRG} z<+@P3;FTO)Qws!InsScxhW-&+vafsrKuig{V%)=0T_kT2*>Ur9om4*Om{-|>8zxsI z)$#%Km}8sT!}Nb_AOtt;MPsN<&HibGcg+f%I5M8nO*~FZEHp_LBjrSresRrIFR zM+Acl7j9E#huYL#Od>k<0y~G8S$WjaP~!>CP>FsGmUca#fnhwh)imZ`KTysQP%~dhaT|+}=j+e4u6Ag3o3oU*;^QuQa4t>;vCA$K*2Y45Sh!j)2 z+QHp0l9SruI<&@XUWRqPK=8#wN=I`3_?hFZ)c?SAuM0x!2Y9@N+SJ;ghShS=z0cws z%SfrjY^gx{J7LPT*b5(999v4x-dGF_BiMU_pE)cmp{s=TJfPo2R?mIPB<^%0-6uUp z!VRtx9t_da0V$j;Ex9&h@u zt{jn9Uw!Ni+!bCr`N?EPThqZxVbNZBPr-#ty4T0lI9@PK9jZx(4mkEGsQov>cm=!R zBK;PvJ3cc=Vw~m0(mS4K#;q%)WZCuDEp<#iWi^~Hp1uB63F1*R#P@25%S8j9zqN{O zMO7S;zvaX}okxqkf)_+=IfhFB^(ZOSAaOGyifKfGImtR9A(fT#MfV_+6Z;<3-KyW8 zT%;Rk7Hhy8Jg4HMqbgW7F)UAq&o$p`aG87YsBBrr2wKnHmVRB!PaWT`dxxxHHZ6p) zP$kXX&QU~!!3gl9L$!O2>gGr`V^dDa-o!18crEWq1hx>?JuGAHSta3PkNQ`!uxW4R zhIkb2>EFWZUwD+-W}3jw5cc9RnKL@ivplsCtbMP?K8@_Fert~|Wv|x;_b7}1=uyg+ zI6>}g@k{LjC;=%?T>@VUV8;3k_DC98TT%82`m6vr90SjTo(f-a@zgJ;p?t4?Wr);R z#*a&HsQG%mXPQ;mfk)M#zxYbWfkN6{W|ib(+xhG=`T~MN@R#-Hsu)QL?j@Q>WH~qa zMU&~{r?Sj+T}|ybh;PTY#OB(VV=OkQZDp||b3_4Mn^(u(xlBUVhyAhSJ;$}aX=BOi zkPQ$>O^8__R01x5YSz`p6H%U_gp$ZiIz5)w%<|sY)eZpb zvDczW_e}ELf+mTE^-aG8uZwVxvOJ1M0f5Hy$=uk_Bg9wJBwvLPNL0x`cqgx6oY0FB zbn+~!da^wrpD_IO!H^~P>%KLi1{}AgnB2@Kk21;f+0;TW2u`l19C*}&7%h^8C$8_j zd@uPo+uakSM{tcJY$u}ZQoU1!*S=_cM1q&4YMAUYCp|OWs7{!D)Y`H`Kefx@h;T-u zh6AA$C;ksXSmG}$P}1dpjf1;Ri)3%Ch@iF6|m*%B4s_^L-oSaTB%gYuf~Y8AyKc3 zMN8&Aue~ovi0g4y>K`FJ=z!4L3XiuCkHV$<)383p-`2lpvFcMFD06R*TA!);dbS&lEtm86b9%y&Zvq~IE4q?pok7=Qh%7>Fvl~TRar;(I?ntm zJ<{rdO@Zn|KVCTz>ktiAmP7Z{9aQcSA|z zu@`Tn4I^y(02XwJZ^o1se~hPM2(c-dipS2^G%FeUKIm%;P)kM{qfBE93X$Nsghx-m zM+{wp+LSbEkeT6#mbiP^c(?@(<(KJD$}}g`#C@A;ttC{qihK5^o3?jgTGVqs$IOq^ z6)iXw*W0+D2_t&yh-?+C$Oln-V4f-a+KIwr!+fPVf|-I*K=>W^$^?aP8 z4F#)!m(h$%(ME zrQFw&Bp5QWYLH|(xb#t&Sp)!mdq?X&jIqgW*9a-0N7S3K#Dj+1K29^m=oqL5Q@Ozh zHZ@BhB2bJ_$y+AdQRVi*&I>`88_E5;!2n%2X{WyD8uk%cW+BS;vpfIxRYSd$>2gT& zpr6sKEK-Pc5!;;P9T&Oh=0G>k4(swd4p)SXlyU4yhtmuj@r47O>3)nk>+*Pg93KFL z&CKE+Dx=PbqHp8U!i(pIFG$} zEJq+{s^@1-fixT}drz;}e@Y;v9j;1y17H>C-g`&bPlo+)9zPU+PKAM?$N2G;5q~^0 z(Eg#IqkdV0;D>^++yzuTlfqPDcEBvgVWbST)8Kh!dFA?3xNsGx$zDha<(7)c&Z2C&Bk^{TVdx8`w37bSTeGjIt zvO4j_c&z5RpxD4!D%omhfZgj;{8r}ru}%HL?Jo@wg4_3^G1R8y|1`o%o=5~xis$$Wop0N zB{s^(V75A@kR!v+KfXGH?Okq<9yb;N4l#zSd^Wu;sQ zjoW1iw_1jgZiU<*C^~TpZE0AoP*prLr<#e&5rgfB@wQ>I;0Ce%cH2`PbF{^Cp`2pP zi%a(E%OvQOA77v6ex3^?q4!oKQ+zTZvB{*{lw*?Cd>J38Z9OUM>Q>gsKO3uboR#_o z&R-fJw06MbE!3ve{xqz3&O1skSwGi^x!uNQ=+c%fw8-oP>e`F9L|&;&_JldW@0GHL zWu-j)W|I?xf}GQ4MI_d4BZU{-@n~A+q2}6ilf1Q z_9;e(g{LuLBdbp}MXX9|SU=6#>zC^?kI1WrD5GrjEsz6lsA<7F>?i7jFR7oWp=fhX zhs%zf6YnEARFe)JaO_b~kLrZ+3h^kt-=fv)$$*^Fa?|>NDS7`*-VMScZ14wfOg`2H zF7C$GxJfsNM{S-Y)qB0XQcM-7EDG)v;|@{@xbLi~a3`JPZvCk<_w1k^C4(9?I!!`r zVoX4s5bH}`?9f7;9~~pU?@>cd+llY38Ly*3_e`#Rp&n)STX_8oj|!fR(Nqt>nSJEW8g|X= z89~iyL{R?XMZr|00IHhZwbO8qa`=xPrRJOVbQ2M{&+0*H$%GJt>Ql zV}2-Yk35W@dpeJxpTua{ikI{WTK^GQZt=PH#2^2$b}drrGtaPXBZh2Riyg+fuVmRw zr8}`pC_p$qgMDVUg@M5tqaDfJ7_Y+Ms!lRl!&i5z*R+;R5v&0g$M{0&ZH|dWt)`^G zW!dFtfyA+DtApv(Zv0noG1?0MO`HTDT59n%Hl<9CnT_k6K-{hE_&&o?n%Z^q4}g&(Q#!F~ zXTaS)ay-^L%SZ9UM#mntPw+o)Aq02tMPsN(J^s@O56zD#Ik%!V^K})=O{Iv4Eh~P> z@(kE9 z0hq`pZ8}kp5c261B$1=TMX%f42+75xDZJY263>G<0sDm>SXL^wa0z#}P3UuBQ+8`q zC7v$h#Q9m}VD?S*XSR9Vy;Mj2MvDy7?#Yv zp1}oj8JcBHQaMC#oK_pqv}x?|+)+oyLV9CyoR#`#O80vpwD!Q`E!3l) z{%KgnzoB(W-}xjvYBa`l;tt28gg@VMNRkf5eKP~dO{x~2zhtGH;8`irFIht;7rF{n z_-#c{W}nS8hUn8nvQmh^r`%VRhO7MflBv1GcOGT--oV#S+d}+k-!FBd{o|+2IDpHq z$Ce?5Etr4ZUbFe>To3y||7}r(Gg!QzV?0YNXIY>z7WM?cge!Mv-ab<1Hqwaq@v$)+ za0dy5`@KRm!O-58@m-=rJ7SFGA0OR)psA^f^dg{PVp7iH3OFKIk)m5x(0j;q=XOWL zp|W)7fWP^auzxlGuWTXIet}U7v8mACVzzHue&~%jZ6Vg`It7@VDh2yCeY;4b=0&WW zEq&K4KN^TlsogS{qV5L+$D<|#Kd{# zsf*XiK*U9Xo@I5OF=wPs`hA-!ULJ__S3LjnDc5vd)3pSdbEp%75``pu<3ru5{d4TP_sslge4Z$n)MwJ{qm(soXMHxj|F9 zjwUMdO6v@YSEC9F&f<`?9^2I6?+^%Ey?gUF)TUy73$K4+Qy}56uPJ^z3N%2z_S;j2 zQj-dC4?4d1NwU^?@Q5c1vcql4`9Ikd00c%vnO;0Yoqf|XJ#_$0V})qGXKR_V(a8TB zal}?c82~VD``6AVxPf@{H`~jWI45{L8I#RsNLAvrBYPX{-|`*URC|TK%1bfrbkrxK zyK6faOgRfvExX!+TxT8zxqo@~4zZeGr(sKe@TxDD*a9+c;4&ak>4;*BkSOdqJS&dI zJQG!vNJ@mVQ`(ojG%=jaoKgmjlRt=u(4KQZqy-FyW3b=*gEc`r;AR8MWr1K018Mla z{2)C--&^6X1BUJkU4&q3=xYBp5Kv%Gg@CuiMj;)HfxrG=&f=i=9}0*F;JqIBjz65& z!5$En`{1)X)TUnlX_WJ1CXE-H6lbH*!l&MNE+KJQu2CVlx}MGR9ei&dYCj6MDVL+z z6ae(^3@UPHy=kG&X}?ahFia!u-?dF$Ap6;{A08-axQWwbUlJ&0OJSuN3PR2AWf z1P7A$JtNR-&v(_jcuw8-+05{~OU4B1o!VnW2jB#{%M3Wl&NE%n-k^EqpAe5pXx_X zkO@Y?z52{;t(HOrIZd2exY$^(<9B{g?UrGr|lpq$8rVoGF!)!>TlK}gITLr_{_S*fn=2a?>o%tGAk zHwzOQ7B7xjv!8TA$DCO<)6JtoNpCtLt)fhFie_n1!HWEM#9lr5>L{zPZWgbnrC=41 zNkHS}69%LW*9|TqoZ@ZvWgB~N#?@#L-8WJ$thRWx0p(3Dt;Cb#tkge3de8x(bpRf3 zp*B_ir(vzVmHG7*p`mGyN3EmFQ09|mM%>!Rd`=qv;|nZZEJoe%tduJ(D|IvMLD;T# zax~zS1kQNoYjwxvE5i?2sX2qT=Z!6rB8wEM6AhuGAuh=N0hS}VT<34LNnxCcFaTV> zP?gK3K0SHMV-Eb<`&IPI4Hy&c+~7=jiL+Z%4IGMfEGehObH^= zF!Tj%-h#)>@l`nN(%&DE*Es5UEm2Z*nlJcr`5bbay6g2Hg%SqoMilU4%eQV+mmI1| zhYmRQDCl2X2VuNIJgWY;XuTjDg$6=Vc0*bavFnc0UHf+TvEj~Qz$r3O7wjM& z1q}FpTU@NZn99K7d2W}1xngEcl4fv|XN*$Eje57(5b9AkQG;q665=ie+M@9h)hK=% zzRcl2=J0agqlPrIH(Q+8mgisS)L2~bZWt*&Ysds#zV)FfXh?C|5$*qHMBQ#Lp`ePxA6KG9`)XyG2XL3)RKIwOfli2klWV)MI_zV zm02I^)k@S(6ZXM9%I!aT)DsVZTuI!zYoogKQ_84@9m-tZ9@GvNZ*J>3U03Q%#0T)M zh18%iUO)Wjo!W?y`Np7Y2=ubCp`mTWAC-Sg=t_<>8K z?tE5ubLH>xA+Z;b7 ze@i~yCp@#BQdnym5+`B4f&xMS?OpBf>hjCwhxv{W0^-3CFmwoP1uY;T_`^4_HSD-Q z)M2``XH3}NlUe&_1lbDxGt;2L!B>OT$NuN*kpego|MgkZIw4he06{+uv` z1$$T04K&r`N0`d81o$nXLPBCaKjwb{Ofp%qeWBi4Fp`(Y3UAY!;n6YaCqfR$SOz>K@Ka zzCZ})SAMCEEUtRL}2MJ~A2R7l+6e?9&Eb+K`K(LAbC zfO(CbT9a9%NN-S&^5g7L`QdAE;#!+A?A0#?lfyB!*P*n+vQo-5Va@~RB)_lK;kEp+3Hk}TE}rn zC?u17gT2zfDs%BudH)5}AXF7A12RpsfZp1(2R0Soev9<|qo&QFP?5m^*&sLNJPh{v zi3q<2B)7r!p0aO8q;~viS!H4vvNHHA(;b$_#2e2>M3lrgcHAy4@i6P8JqbBfmJS_o zY*Wxr31cv7AvU%3Tg)y#-=-y$?iaIad)TC9x)>fdTdQgLsfo`Yd6)32Ft-E5rZlEP zjj}@k9s2v?@^dGmyYVmgu+rE_+A?Oyt$a(FI0>~WIn*Fl@@3C@^{dPw7y%<6E?07i z2m8kC+thtsy#j$#r|)8X?2AOb9y~dc3~=-kvNyy*WsH-WGi2O7Jv9j?4z}y zwpY@rA6v|X2`8v&Sdj|ex~40j#RLFO?d6GAtMI%&&p*i7pmb~%Bi;3T{m=+x@E zQR={YY*YU#B7WVQzach-hxS`|{R^9_L7-N$dh*$o>V3;snjYDz=O@l{nGo`RmG9NJ zN4$Ub1Kg(E|C3Ec0lhVLqZR&|R~XBiI8^S1*5Gc1ON`rQX07MnLyR@_&jCd02$;jT zrN&6J3Z@FCBh_TJ1XIGjESkY<0o$Ub;bjLlRgPM1RM2Nu<=l%joUX-}pF2W`X0@?d z!jXiucK6bS+#|9ayU>{We%h79kc4H33g=E%Y-sZc`pdv#E&3B{!-y`v!Rc(r6sRc);CA zn%A!&oX_NG4NgS&A~_(|WrJODiUUhi)2jUCo!-r-tMj?i_5Cuv_!->OR`K7Thn_mB z(#Bp)Noz0!dD>Kqvcmo%gEmYNoP zv!T^jLThIINl^eb1L*nI*rZO-%prxr)#Zhg^!!9+zfQAdx7U*y=PpeXcU4I`S6ZsAcJC$*H&lg?H`cL&T2iMK@s5kD9{79DbGV~ z%5~?>&P%xe>91jJr z=ZLh%wm(1hbpxCJA!kJZ__Xf3e6V5XyK<>7$ilH7y(c%W00Ma_J@fsX!rM4k-jl_z z+$CgH#A4bavYkKcMER1WkMTGw^}k7l_Cjc#gvVQmP2o}hX;{IiberdS=gIU-Z>O*B z=*lI^8aff&u^j$PdYPD)0+a~PN_oMuQrRwMc~+g^s_8q`I1B3}Zh-~;5QFj{9xb|D zf(-zluAJ^%rqB>b>Pz9eoD-nsa%Vo!I_l(<@8=y^%D7%NK<{DZEDAnyL(Hg_Cw)Oi zHD9|%#+4U%Mh}5D@0qc}4m{aYC2n3EPzDsFNocua}>h zKP_`qUgwcz^AlCnEIa#2>c=uw$nSGq@@aQM-5{Cw)kU0a5<66r4ju68=Yzv9@c(Bc zAiPe&cm=!RG5r>;4#}qe-qSaRZp0(zubmi5=MnXFW)qO3vgAgMeo1EU0r9BVOnX+g z8E(-i>JDy`<|H>qNpj(g#*@Ap>u9D_m^Ri>kCI0X5@ap?5JG8ug{bV7beAcwrTV~J z#lA=BMebZ_daIU!UvODiCWQcGc3)R>fTsPFch8!W=u+#mBgVq;N0@1dj|j~NAFW;s zGpa_wyLXCB7S~M7d(MY@oX>{{z$}|cAlkV(ZGBUae=xY!;wj(KtsN`-+GS=MnlIuz zF~=Tt_!|Mj*7V-Y4fQCV-@@x(c+^+~r=Vn&yK8o-Z+lVGz3g!k-eAwnTuRj!Jdzk+ z-etl)%KJZh)P$IJ_Fx1VvwK>fN2FANZr)5hUc7MOMLa_@?36)5S3tl?WwCUz2sme! zr)E}+iGJoqQBs&x6t0;%oxCgInd=80^*oRc8FyD#oWhDXlHp^pavkdVSS#o2$!Iyk z=+)b2pC6IsTEZHOQqk_%&dDKhVw=&fRR!(yA;!|t*a>O0B^#g401EeKnhe>9TW^j~ za$#PMBG+VYH8@}9k*Is`4w`iG&Rf9FpSh?dDOGnRk98jZj=HIsOH}u;wV4H1^U6Kc zQ)N^8aS{Z*=5W0MLyOP_4EeWj9nktu3tUp*S|bGOft7!Dm;aeX`dNvTsr<#QB|iUKxU@XxNaayFf~ z@rf_4wD8tM-lAufbI64}tp?5U9-=$9 z4uQ)qP=Zs}k@x}moI9mh7nowbK2XdNxow-(%gC%C6F*wXdv(HTm{;eh&+>QHGHo7y zQ}4j~x0aH>((^i|cczq~%;oNM$1wX0wLyr%4b^ zO;xt~>_*%bjy>upSpT*HA$WE#8bduw>Q5v5rQ^ZWH^H3J;uj2q?;J*u`S2E;HU)ZW6uJ5{n!_4e~4)ZWCj($*%W8`W0X1tY5pMnlvY?)O2MotDo%fn17|@9`IN+|sD?~6 z)sW0UfPgU4H_M=AelSfyL1 z0!g|IShtlCEQtnggP%Fr%%e}mZ7C9>F7``S$`78EGU1n_3#8c)uB$%>LK8Ay z^HP}!gk`0|PR71Nt<}R02~%MwA~IG=&RVDBwI})Uc zbK%Aq;TyeZ_fzO|XV$fWEJxa6*i%wrHl-r&YrABjtsL(@5vJ&}L37!1-b@8eR#161 zlTKYfJNt;#vaNI#Q<@D&49BSP`#og6eeau-Kn^a`^n^)2abiC0nL}mi&;k3O5`Mal z|F zjXW6#s;0qgc-rX5m#L|(KSr6~yAMGHubt?9H04aioDiUNgO@^^;0 zt|aY!$M5NXw5iYT1~TSx--$ z@6)fSBbgYo0a1_bB?_PQ<_R+;1rJmLBk1^+g2BZ!3`Dt}$>f{H=!p1R53A;0csguw z>Sw6l?AGSaH7NleE6ye_x6#rr5Uy35%>#$=SoZR>Jy%HF5E^dem@Yx+|Q&xW( z<@1i%t|QgH-<+=GZ0lsoaoVaYTpsBz3Y2LLt($Om>Vn(Uv!mHmFRns_*x5MT>nb>U zO^pjv8i{x6z|UVr8iq0kUr70K9x!yhGBRu=cilS5U!U28Dk_ymF5z-jLFi=HE3C99 zDzJO`VK%jz#=mAepZMB12*W9Bao{ZYqU4DaW3FYdoG3gZ=KAj*k>C=ZN{91l>iN7W zpS@q{ljt%M;d7p+?sR|4x4>ePMVbKc?2NS4;)f=;xGVe+jxj7HdB`C(b9;6xU55ngc3Kb!JRlAaXhEh$yzAaA?sH;D>O#l)IlH&xHZklhIarn`BO)T?Vx zG}-!D&$gr}y11 z&kH)-V|Vi(e^fFj(JzQ|>4XJXdouX05j-mu0Lw}dWIRB?ckp1zX%ZmMWfx>$ktzD| zUa2QpmDb}dmB Date: Wed, 8 Oct 2025 12:29:51 +0300 Subject: [PATCH 017/277] core/rawdb: remove duplicated type storedReceiptRLP (#32820) Co-authored-by: Felix Lange --- core/rawdb/accessors_chain.go | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 0782a0e7da..f20d675ff8 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -664,15 +664,6 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { } } -// storedReceiptRLP is the storage encoding of a receipt. -// Re-definition in core/types/receipt.go. -// TODO: Re-use the existing definition. -type storedReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Logs []*types.Log -} - // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps // the list of logs. When decoding a stored receipt into this object we // avoid creating the bloom filter. @@ -682,11 +673,11 @@ type receiptLogs struct { // DecodeRLP implements rlp.Decoder. func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { - var stored storedReceiptRLP - if err := s.Decode(&stored); err != nil { + var rs types.ReceiptForStorage + if err := rs.DecodeRLP(s); err != nil { return err } - r.Logs = stored.Logs + r.Logs = rs.Logs return nil } From 064ab701ea98dd4e32b331318ca7f2cc5e5edcaa Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Wed, 8 Oct 2025 12:50:03 +0200 Subject: [PATCH 018/277] eth/protocols/eth: use BlockChain interface in Handshake (#32847) --- eth/protocols/eth/handshake.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/eth/protocols/eth/handshake.go b/eth/protocols/eth/handshake.go index 824e49fb2b..bb3d1b8eb4 100644 --- a/eth/protocols/eth/handshake.go +++ b/eth/protocols/eth/handshake.go @@ -22,7 +22,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -36,7 +35,7 @@ const ( // Handshake executes the eth protocol handshake, negotiating version number, // network IDs, difficulties, head and genesis blocks. -func (p *Peer) Handshake(networkID uint64, chain *core.BlockChain, rangeMsg BlockRangeUpdatePacket) error { +func (p *Peer) Handshake(networkID uint64, chain forkid.Blockchain, rangeMsg BlockRangeUpdatePacket) error { switch p.version { case ETH69: return p.handshake69(networkID, chain, rangeMsg) @@ -47,10 +46,10 @@ func (p *Peer) Handshake(networkID uint64, chain *core.BlockChain, rangeMsg Bloc } } -func (p *Peer) handshake68(networkID uint64, chain *core.BlockChain) error { +func (p *Peer) handshake68(networkID uint64, chain forkid.Blockchain) error { var ( genesis = chain.Genesis() - latest = chain.CurrentBlock() + latest = chain.CurrentHeader() forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time) forkFilter = forkid.NewFilter(chain) ) @@ -92,10 +91,10 @@ func (p *Peer) readStatus68(networkID uint64, status *StatusPacket68, genesis co return nil } -func (p *Peer) handshake69(networkID uint64, chain *core.BlockChain, rangeMsg BlockRangeUpdatePacket) error { +func (p *Peer) handshake69(networkID uint64, chain forkid.Blockchain, rangeMsg BlockRangeUpdatePacket) error { var ( genesis = chain.Genesis() - latest = chain.CurrentBlock() + latest = chain.CurrentHeader() forkID = forkid.NewID(chain.Config(), genesis, latest.Number.Uint64(), latest.Time) forkFilter = forkid.NewFilter(chain) ) From e42af536c552538a8ca8c353a26669fd2e9eb150 Mon Sep 17 00:00:00 2001 From: Alexey Osipov Date: Wed, 8 Oct 2025 20:23:44 +0300 Subject: [PATCH 019/277] cmd/devp2p/internal/ethtest: accept responses in any order (#32834) In both `TestSimultaneousRequests` and `TestSameRequestID`, we send two concurrent requests. The client under test is free to respond in either order, so we need to handle responses both ways. Also fixes an issue where some generated blob transactions didn't have any blobs. --------- Co-authored-by: Felix Lange --- cmd/devp2p/internal/ethtest/protocol.go | 6 ++ cmd/devp2p/internal/ethtest/suite.go | 97 +++++++++++++++---------- 2 files changed, 63 insertions(+), 40 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/protocol.go b/cmd/devp2p/internal/ethtest/protocol.go index a21d1ca7a1..af76082318 100644 --- a/cmd/devp2p/internal/ethtest/protocol.go +++ b/cmd/devp2p/internal/ethtest/protocol.go @@ -86,3 +86,9 @@ func protoOffset(proto Proto) uint64 { panic("unhandled protocol") } } + +// msgTypePtr is the constraint for protocol message types. +type msgTypePtr[U any] interface { + *U + Kind() byte +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 47327b6844..c23360bf82 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -196,6 +196,7 @@ to check if the node disconnects after receiving multiple invalid requests.`) func (s *Suite) TestSimultaneousRequests(t *utesting.T) { t.Log(`This test requests blocks headers from the node, performing two requests concurrently, with different request IDs.`) + conn, err := s.dialAndPeer(nil) if err != nil { t.Fatalf("peering failed: %v", err) @@ -235,37 +236,36 @@ concurrently, with different request IDs.`) } // Wait for responses. - headers1 := new(eth.BlockHeadersPacket) - if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers1); err != nil { - t.Fatalf("error reading block headers msg: %v", err) - } - if got, want := headers1.RequestId, req1.RequestId; got != want { - t.Fatalf("unexpected request id in response: got %d, want %d", got, want) - } - headers2 := new(eth.BlockHeadersPacket) - if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers2); err != nil { - t.Fatalf("error reading block headers msg: %v", err) - } - if got, want := headers2.RequestId, req2.RequestId; got != want { - t.Fatalf("unexpected request id in response: got %d, want %d", got, want) + // Note they can arrive in either order. + resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { + if msg.RequestId != 111 && msg.RequestId != 222 { + t.Fatalf("response with unknown request ID: %v", msg.RequestId) + } + return msg.RequestId + }) + if err != nil { + t.Fatal(err) } - // Check received headers for accuracy. + // Check if headers match. + resp1 := resp[111] if expected, err := s.chain.GetHeaders(req1); err != nil { t.Fatalf("failed to get expected headers for request 1: %v", err) - } else if !headersMatch(expected, headers1.BlockHeadersRequest) { - t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers1) + } else if !headersMatch(expected, resp1.BlockHeadersRequest) { + t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 111, expected, resp1) } + resp2 := resp[222] if expected, err := s.chain.GetHeaders(req2); err != nil { t.Fatalf("failed to get expected headers for request 2: %v", err) - } else if !headersMatch(expected, headers2.BlockHeadersRequest) { - t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers2) + } else if !headersMatch(expected, resp2.BlockHeadersRequest) { + t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 222, expected, resp2) } } func (s *Suite) TestSameRequestID(t *utesting.T) { t.Log(`This test requests block headers, performing two concurrent requests with the same request ID. The node should handle the request by responding to both requests.`) + conn, err := s.dialAndPeer(nil) if err != nil { t.Fatalf("peering failed: %v", err) @@ -289,7 +289,7 @@ same request ID. The node should handle the request by responding to both reques Origin: eth.HashOrNumber{ Number: 33, }, - Amount: 2, + Amount: 3, }, } @@ -301,35 +301,52 @@ same request ID. The node should handle the request by responding to both reques t.Fatalf("failed to write to connection: %v", err) } - // Wait for the responses. - headers1 := new(eth.BlockHeadersPacket) - if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers1); err != nil { - t.Fatalf("error reading from connection: %v", err) - } - if got, want := headers1.RequestId, request1.RequestId; got != want { - t.Fatalf("unexpected request id: got %d, want %d", got, want) - } - headers2 := new(eth.BlockHeadersPacket) - if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, &headers2); err != nil { - t.Fatalf("error reading from connection: %v", err) - } - if got, want := headers2.RequestId, request2.RequestId; got != want { - t.Fatalf("unexpected request id: got %d, want %d", got, want) + // Wait for the responses. They can arrive in either order, and we can't tell them + // apart by their request ID, so use the number of headers instead. + resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 { + id := uint64(len(msg.BlockHeadersRequest)) + if id != 2 && id != 3 { + t.Fatalf("invalid number of headers in response: %d", id) + } + return id + }) + if err != nil { + t.Fatal(err) } // Check if headers match. + resp1 := resp[2] if expected, err := s.chain.GetHeaders(request1); err != nil { - t.Fatalf("failed to get expected block headers: %v", err) - } else if !headersMatch(expected, headers1.BlockHeadersRequest) { - t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers1) + t.Fatalf("failed to get expected headers for request 1: %v", err) + } else if !headersMatch(expected, resp1.BlockHeadersRequest) { + t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp1) } + resp2 := resp[3] if expected, err := s.chain.GetHeaders(request2); err != nil { - t.Fatalf("failed to get expected block headers: %v", err) - } else if !headersMatch(expected, headers2.BlockHeadersRequest) { - t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers2) + t.Fatalf("failed to get expected headers for request 2: %v", err) + } else if !headersMatch(expected, resp2.BlockHeadersRequest) { + t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp2) } } +// collectResponses waits for n messages of type T on the given connection. +// The messsages are collected according to the 'identity' function. +func collectResponses[T any, P msgTypePtr[T]](conn *Conn, n int, identity func(P) uint64) (map[uint64]P, error) { + resp := make(map[uint64]P, n) + for range n { + r := new(T) + if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, r); err != nil { + return resp, fmt.Errorf("read error: %v", err) + } + id := identity(r) + if resp[id] != nil { + return resp, fmt.Errorf("duplicate response %v", r) + } + resp[id] = r + } + return resp, nil +} + func (s *Suite) TestZeroRequestID(t *utesting.T) { t.Log(`This test sends a GetBlockHeaders message with a request-id of zero, and expects a response.`) @@ -887,7 +904,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra from, nonce := s.chain.GetSender(5) for i := 0; i < count; i++ { // Make blob data, max of 2 blobs per tx. - blobdata := make([]byte, blobs%3) + blobdata := make([]byte, min(blobs, 2)) for i := range blobdata { blobdata[i] = discriminator blobs -= 1 From 695c1445ab4eb26b630641a7bf6c27fceaa5fc2b Mon Sep 17 00:00:00 2001 From: phrwlk Date: Thu, 9 Oct 2025 03:59:06 +0300 Subject: [PATCH 020/277] core/rawdb: correct misleading comments for state history accessors (#32783) --- core/rawdb/accessors_state.go | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 2359fb18f1..46aa5fd070 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -170,9 +170,11 @@ func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint return db.AncientRange(stateHistoryMeta, start-1, count, 0) } -// ReadStateAccountIndex retrieves the state root corresponding to the specified -// state history. Compute the position of state history in freezer by minus one -// since the id of first state history starts from one(zero for initial state). +// ReadStateAccountIndex retrieves the account index blob for the specified +// state history. The index contains fixed-size entries with offsets and lengths +// into the concatenated account data table. Compute the position of state +// history in freezer by minus one since the id of first state history starts +// from one (zero for initial state). func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { blob, err := db.Ancient(stateHistoryAccountIndex, id-1) if err != nil { @@ -181,9 +183,11 @@ func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { return blob } -// ReadStateStorageIndex retrieves the state root corresponding to the specified -// state history. Compute the position of state history in freezer by minus one -// since the id of first state history starts from one(zero for initial state). +// ReadStateStorageIndex retrieves the storage index blob for the specified +// state history. The index contains fixed-size entries that locate storage slot +// data in the concatenated storage data table. Compute the position of state +// history in freezer by minus one since the id of first state history starts +// from one (zero for initial state). func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { blob, err := db.Ancient(stateHistoryStorageIndex, id-1) if err != nil { @@ -192,9 +196,10 @@ func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { return blob } -// ReadStateAccountHistory retrieves the state root corresponding to the specified -// state history. Compute the position of state history in freezer by minus one -// since the id of first state history starts from one(zero for initial state). +// ReadStateAccountHistory retrieves the concatenated account data blob for the +// specified state history. Offsets and lengths are resolved via the account +// index. Compute the position of state history in freezer by minus one since +// the id of first state history starts from one (zero for initial state). func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { blob, err := db.Ancient(stateHistoryAccountData, id-1) if err != nil { @@ -203,9 +208,11 @@ func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { return blob } -// ReadStateStorageHistory retrieves the state root corresponding to the specified -// state history. Compute the position of state history in freezer by minus one -// since the id of first state history starts from one(zero for initial state). +// ReadStateStorageHistory retrieves the concatenated storage slot data blob for +// the specified state history. Locations are resolved via the account and +// storage indexes. Compute the position of state history in freezer by minus +// one since the id of first state history starts from one (zero for initial +// state). func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte { blob, err := db.Ancient(stateHistoryStorageData, id-1) if err != nil { From a1b8e4880d24d5e618b0637defa380d75e30c831 Mon Sep 17 00:00:00 2001 From: CertiK <138698582+CertiK-Geth@users.noreply.github.com> Date: Thu, 9 Oct 2025 17:34:30 +0800 Subject: [PATCH 021/277] eth/filters: terminate pending tx subscription on error (#32794) Fixes issue #32793. When the pending tx subscription ends, the filter is removed from `api.filters`, but it is not terminated. There is no other way to terminate it, so the subscription will leak, and potentially block the producer side. --- eth/filters/api.go | 1 + 1 file changed, 1 insertion(+) diff --git a/eth/filters/api.go b/eth/filters/api.go index d678c40389..334a19f0b5 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -152,6 +152,7 @@ func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { api.filtersMu.Lock() delete(api.filters, pendingTxSub.ID) api.filtersMu.Unlock() + pendingTxSub.Unsubscribe() return } } From 11208553ddded439c708fa430e559675f25052e7 Mon Sep 17 00:00:00 2001 From: 10gic Date: Thu, 9 Oct 2025 20:14:53 +0800 Subject: [PATCH 022/277] eth/filters: add `transactionReceipts` subscription (#32697) - Introduce a new subscription kind `transactionReceipts` to allow clients to receive transaction receipts over WebSocket as soon as they are available. - Accept optional `transactionHashes` filter to subscribe to receipts for specific transactions; an empty or omitted filter subscribes to all receipts. - Preserve the same receipt format as returned by `eth_getTransactionReceipt`. - Avoid additional HTTP polling, reducing RPC load and latency. --------- Co-authored-by: Sina Mahmoodi --- core/blockchain.go | 26 +++++- core/events.go | 4 +- eth/filters/api.go | 68 ++++++++++++++ eth/filters/filter.go | 68 ++++++++++++++ eth/filters/filter_system.go | 35 ++++++++ eth/filters/filter_system_test.go | 141 ++++++++++++++++++++++++++++++ internal/ethapi/api.go | 8 +- 7 files changed, 341 insertions(+), 9 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 71eb4c45a2..b7acd12aca 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1690,7 +1690,12 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types // Set new head. bc.writeHeadBlock(block) - bc.chainFeed.Send(ChainEvent{Header: block.Header()}) + bc.chainFeed.Send(ChainEvent{ + Header: block.Header(), + Receipts: receipts, + Transactions: block.Transactions(), + }) + if len(logs) > 0 { bc.logsFeed.Send(logs) } @@ -2342,6 +2347,13 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co // collectLogs collects the logs that were generated or removed during the // processing of a block. These logs are later announced as deleted or reborn. func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { + _, logs := bc.collectReceiptsAndLogs(b, removed) + return logs +} + +// collectReceiptsAndLogs retrieves receipts from the database and returns both receipts and logs. +// This avoids duplicate database reads when both are needed. +func (bc *BlockChain) collectReceiptsAndLogs(b *types.Block, removed bool) ([]*types.Receipt, []*types.Log) { var blobGasPrice *big.Int if b.ExcessBlobGas() != nil { blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header()) @@ -2359,7 +2371,7 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { logs = append(logs, log) } } - return logs + return receipts, logs } // reorg takes two blocks, an old chain and a new chain and will reconstruct the @@ -2588,8 +2600,14 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) { bc.writeHeadBlock(head) // Emit events - logs := bc.collectLogs(head, false) - bc.chainFeed.Send(ChainEvent{Header: head.Header()}) + receipts, logs := bc.collectReceiptsAndLogs(head, false) + + bc.chainFeed.Send(ChainEvent{ + Header: head.Header(), + Receipts: receipts, + Transactions: head.Transactions(), + }) + if len(logs) > 0 { bc.logsFeed.Send(logs) } diff --git a/core/events.go b/core/events.go index 5ad2cb1f7b..ef0de32426 100644 --- a/core/events.go +++ b/core/events.go @@ -27,7 +27,9 @@ type NewTxsEvent struct{ Txs []*types.Transaction } type RemovedLogsEvent struct{ Logs []*types.Log } type ChainEvent struct { - Header *types.Header + Header *types.Header + Receipts []*types.Receipt + Transactions []*types.Transaction } type ChainHeadEvent struct { diff --git a/eth/filters/api.go b/eth/filters/api.go index 334a19f0b5..9f9209aea7 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -43,6 +43,7 @@ var ( errPendingLogsUnsupported = errors.New("pending logs are not supported") errExceedMaxTopics = errors.New("exceed max topics") errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position") + errExceedMaxTxHashes = errors.New("exceed max number of transaction hashes allowed per transactionReceipts subscription") ) const ( @@ -50,6 +51,8 @@ const ( maxTopics = 4 // The maximum number of allowed topics within a topic criteria maxSubTopics = 1000 + // The maximum number of transaction hash criteria allowed in a single subscription + maxTxHashes = 200 ) // filter is a helper struct that holds meta information over the filter type @@ -296,6 +299,71 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc return rpcSub, nil } +// TransactionReceiptsFilter defines criteria for transaction receipts subscription. +// If TransactionHashes is nil or empty, receipts for all transactions included in new blocks will be delivered. +// Otherwise, only receipts for the specified transactions will be delivered. +type TransactionReceiptsFilter struct { + TransactionHashes []common.Hash `json:"transactionHashes,omitempty"` +} + +// TransactionReceipts creates a subscription that fires transaction receipts when transactions are included in blocks. +func (api *FilterAPI) TransactionReceipts(ctx context.Context, filter *TransactionReceiptsFilter) (*rpc.Subscription, error) { + notifier, supported := rpc.NotifierFromContext(ctx) + if !supported { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + + // Validate transaction hashes limit + if filter != nil && len(filter.TransactionHashes) > maxTxHashes { + return nil, errExceedMaxTxHashes + } + + var ( + rpcSub = notifier.CreateSubscription() + matchedReceipts = make(chan []*ReceiptWithTx) + txHashes []common.Hash + ) + + if filter != nil { + txHashes = filter.TransactionHashes + } + + receiptsSub := api.events.SubscribeTransactionReceipts(txHashes, matchedReceipts) + + go func() { + defer receiptsSub.Unsubscribe() + + signer := types.LatestSigner(api.sys.backend.ChainConfig()) + + for { + select { + case receiptsWithTxs := <-matchedReceipts: + if len(receiptsWithTxs) > 0 { + // Convert to the same format as eth_getTransactionReceipt + marshaledReceipts := make([]map[string]interface{}, len(receiptsWithTxs)) + for i, receiptWithTx := range receiptsWithTxs { + marshaledReceipts[i] = ethapi.MarshalReceipt( + receiptWithTx.Receipt, + receiptWithTx.Receipt.BlockHash, + receiptWithTx.Receipt.BlockNumber.Uint64(), + signer, + receiptWithTx.Transaction, + int(receiptWithTx.Receipt.TransactionIndex), + ) + } + + // Send a batch of tx receipts in one notification + notifier.Notify(rpcSub.ID, marshaledReceipts) + } + case <-rpcSub.Err(): + return + } + } + }() + + return rpcSub, nil +} + // FilterCriteria represents a request to create a new filter. // Same as ethereum.FilterQuery but with UnmarshalJSON() method. type FilterCriteria ethereum.FilterQuery diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 1a9918d0ee..02399bc801 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/history" "github.com/ethereum/go-ethereum/core/types" @@ -551,3 +552,70 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo } return true } + +// ReceiptWithTx contains a receipt and its corresponding transaction +type ReceiptWithTx struct { + Receipt *types.Receipt + Transaction *types.Transaction +} + +// filterReceipts returns the receipts matching the given criteria +// In addition to returning receipts, it also returns the corresponding transactions. +// This is because receipts only contain low-level data, while user-facing data +// may require additional information from the Transaction. +func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx { + var ret []*ReceiptWithTx + + receipts := ev.Receipts + txs := ev.Transactions + + if len(receipts) != len(txs) { + log.Warn("Receipts and transactions length mismatch", "receipts", len(receipts), "transactions", len(txs)) + return ret + } + + if len(txHashes) == 0 { + // No filter, send all receipts with their transactions. + ret = make([]*ReceiptWithTx, len(receipts)) + for i, receipt := range receipts { + ret[i] = &ReceiptWithTx{ + Receipt: receipt, + Transaction: txs[i], + } + } + } else if len(txHashes) == 1 { + // Filter by single transaction hash. + // This is a common case, so we distinguish it from filtering by multiple tx hashes and made a small optimization. + for i, receipt := range receipts { + if receipt.TxHash == txHashes[0] { + ret = append(ret, &ReceiptWithTx{ + Receipt: receipt, + Transaction: txs[i], + }) + break + } + } + } else { + // Filter by multiple transaction hashes. + txHashMap := make(map[common.Hash]bool, len(txHashes)) + for _, hash := range txHashes { + txHashMap[hash] = true + } + + for i, receipt := range receipts { + if txHashMap[receipt.TxHash] { + ret = append(ret, &ReceiptWithTx{ + Receipt: receipt, + Transaction: txs[i], + }) + + // Early exit if all receipts are found + if len(ret) == len(txHashes) { + break + } + } + } + } + + return ret +} diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index ecf1c870c1..02783fa5ec 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -158,6 +158,8 @@ const ( PendingTransactionsSubscription // BlocksSubscription queries hashes for blocks that are imported BlocksSubscription + // TransactionReceiptsSubscription queries for transaction receipts when transactions are included in blocks + TransactionReceiptsSubscription // LastIndexSubscription keeps track of the last index LastIndexSubscription ) @@ -182,6 +184,8 @@ type subscription struct { logs chan []*types.Log txs chan []*types.Transaction headers chan *types.Header + receipts chan []*ReceiptWithTx + txHashes []common.Hash // contains transaction hashes for transactionReceipts subscription filtering installed chan struct{} // closed when the filter is installed err chan error // closed when the filter is uninstalled } @@ -268,6 +272,7 @@ func (sub *Subscription) Unsubscribe() { case <-sub.f.logs: case <-sub.f.txs: case <-sub.f.headers: + case <-sub.f.receipts: } } @@ -353,6 +358,7 @@ func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*typ logs: logs, txs: make(chan []*types.Transaction), headers: make(chan *types.Header), + receipts: make(chan []*ReceiptWithTx), installed: make(chan struct{}), err: make(chan error), } @@ -369,6 +375,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti logs: make(chan []*types.Log), txs: make(chan []*types.Transaction), headers: headers, + receipts: make(chan []*ReceiptWithTx), installed: make(chan struct{}), err: make(chan error), } @@ -385,6 +392,26 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc logs: make(chan []*types.Log), txs: txs, headers: make(chan *types.Header), + receipts: make(chan []*ReceiptWithTx), + installed: make(chan struct{}), + err: make(chan error), + } + return es.subscribe(sub) +} + +// SubscribeTransactionReceipts creates a subscription that writes transaction receipts for +// transactions when they are included in blocks. If txHashes is provided, only receipts +// for those specific transaction hashes will be delivered. +func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription { + sub := &subscription{ + id: rpc.NewID(), + typ: TransactionReceiptsSubscription, + created: time.Now(), + logs: make(chan []*types.Log), + txs: make(chan []*types.Transaction), + headers: make(chan *types.Header), + receipts: receipts, + txHashes: txHashes, installed: make(chan struct{}), err: make(chan error), } @@ -415,6 +442,14 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) for _, f := range filters[BlocksSubscription] { f.headers <- ev.Header } + + // Handle transaction receipts subscriptions when a new block is added + for _, f := range filters[TransactionReceiptsSubscription] { + matchedReceipts := filterReceipts(f.txHashes, ev) + if len(matchedReceipts) > 0 { + f.receipts <- matchedReceipts + } + } } // eventLoop (un)installs filters and processes mux events. diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 0048e74995..e5a1a2b25f 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -781,3 +782,143 @@ func TestPendingTxFilterDeadlock(t *testing.T) { } } } + +// TestTransactionReceiptsSubscription tests the transaction receipts subscription functionality +func TestTransactionReceiptsSubscription(t *testing.T) { + t.Parallel() + + const txNum = 5 + + // Setup test environment + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(db, Config{}) + api = NewFilterAPI(sys) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + signer = types.NewLondonSigner(big.NewInt(1)) + genesis = &core.Genesis{ + Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000000000000000)}}, // 1 ETH + Config: params.TestChainConfig, + BaseFee: big.NewInt(params.InitialBaseFee), + } + _, chain, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 1, func(i int, gen *core.BlockGen) { + // Add transactions to the block + for j := 0; j < txNum; j++ { + toAddr := common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268") + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: uint64(j), + GasPrice: gen.BaseFee(), + Gas: 21000, + To: &toAddr, + Value: big.NewInt(1000), + Data: nil, + }), signer, key1) + gen.AddTx(tx) + } + }) + ) + + // Insert the blocks into the chain + blockchain, err := core.NewBlockChain(db, genesis, ethash.NewFaker(), nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := blockchain.InsertChain(chain); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + + // Prepare test data + receipts := blockchain.GetReceiptsByHash(chain[0].Hash()) + if receipts == nil { + t.Fatalf("failed to get receipts") + } + + chainEvent := core.ChainEvent{ + Header: chain[0].Header(), + Receipts: receipts, + Transactions: chain[0].Transactions(), + } + + txHashes := make([]common.Hash, txNum) + for i := 0; i < txNum; i++ { + txHashes[i] = chain[0].Transactions()[i].Hash() + } + + testCases := []struct { + name string + filterTxHashes []common.Hash + expectedReceiptTxHashes []common.Hash + expectError bool + }{ + { + name: "no filter - should return all receipts", + filterTxHashes: nil, + expectedReceiptTxHashes: txHashes, + expectError: false, + }, + { + name: "single tx hash filter", + filterTxHashes: []common.Hash{txHashes[0]}, + expectedReceiptTxHashes: []common.Hash{txHashes[0]}, + expectError: false, + }, + { + name: "multiple tx hashes filter", + filterTxHashes: []common.Hash{txHashes[0], txHashes[1], txHashes[2]}, + expectedReceiptTxHashes: []common.Hash{txHashes[0], txHashes[1], txHashes[2]}, + expectError: false, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + receiptsChan := make(chan []*ReceiptWithTx) + sub := api.events.SubscribeTransactionReceipts(tc.filterTxHashes, receiptsChan) + + // Send chain event + backend.chainFeed.Send(chainEvent) + + // Wait for receipts + timeout := time.After(1 * time.Second) + var receivedReceipts []*types.Receipt + for { + select { + case receiptsWithTx := <-receiptsChan: + for _, receiptWithTx := range receiptsWithTx { + receivedReceipts = append(receivedReceipts, receiptWithTx.Receipt) + } + case <-timeout: + t.Fatalf("timeout waiting for receipts") + } + if len(receivedReceipts) >= len(tc.expectedReceiptTxHashes) { + break + } + } + + // Verify receipt count + if len(receivedReceipts) != len(tc.expectedReceiptTxHashes) { + t.Errorf("Expected %d receipts, got %d", len(tc.expectedReceiptTxHashes), len(receivedReceipts)) + } + + // Verify specific transaction hashes are present + if tc.expectedReceiptTxHashes != nil { + receivedHashes := make(map[common.Hash]bool) + for _, receipt := range receivedReceipts { + receivedHashes[receipt.TxHash] = true + } + + for _, expectedHash := range tc.expectedReceiptTxHashes { + if !receivedHashes[expectedHash] { + t.Errorf("Expected receipt for tx %x not found", expectedHash) + } + } + } + + // Cleanup + sub.Unsubscribe() + <-sub.Err() + }) + } +} diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index c3f267027c..a2cb28d3b2 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -627,7 +627,7 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp result := make([]map[string]interface{}, len(receipts)) for i, receipt := range receipts { - result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) + result[i] = MarshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) } return result, nil } @@ -1488,11 +1488,11 @@ func (api *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash commo return nil, err } // Derive the sender. - return marshalReceipt(receipt, blockHash, blockNumber, api.signer, tx, int(index)), nil + return MarshalReceipt(receipt, blockHash, blockNumber, api.signer, tx, int(index)), nil } -// marshalReceipt marshals a transaction receipt into a JSON object. -func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { +// MarshalReceipt marshals a transaction receipt into a JSON object. +func MarshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ From 4d6d5a3abf7753009255749407c32aefbf3bff78 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Fri, 10 Oct 2025 07:40:10 +0200 Subject: [PATCH 023/277] core/txpool/legacypool: fix validTxMeter to count transactions (#32845) invalidTxMeter was counting txs, while validTxMeter was counting accounts. Better make the two comparable. --------- Signed-off-by: Csaba Kiraly --- core/txpool/legacypool/legacypool.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 80a9faf23f..e199d21c7a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -984,17 +984,24 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error { // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. +// Returns the error for each tx, and the set of accounts that might became promotable. func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) { - dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) + var ( + dirty = newAccountSet(pool.signer) + errs = make([]error, len(txs)) + valid int64 + ) for i, tx := range txs { replaced, err := pool.add(tx) errs[i] = err - if err == nil && !replaced { - dirty.addTx(tx) + if err == nil { + if !replaced { + dirty.addTx(tx) + } + valid++ } } - validTxMeter.Mark(int64(len(dirty.accounts))) + validTxMeter.Mark(valid) return errs, dirty } From ed264a1f198ea4172b4f2fe622bae5987f114561 Mon Sep 17 00:00:00 2001 From: cui Date: Fri, 10 Oct 2025 13:48:25 +0800 Subject: [PATCH 024/277] eth/protocols/snap: optimize incHash (#32748) --- eth/protocols/snap/range.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/eth/protocols/snap/range.go b/eth/protocols/snap/range.go index 8c98c71d50..f32cca8d23 100644 --- a/eth/protocols/snap/range.go +++ b/eth/protocols/snap/range.go @@ -74,8 +74,11 @@ func (r *hashRange) End() common.Hash { // incHash returns the next hash, in lexicographical order (a.k.a plus one) func incHash(h common.Hash) common.Hash { - var a uint256.Int - a.SetBytes32(h[:]) - a.AddUint64(&a, 1) - return common.Hash(a.Bytes32()) + for i := len(h) - 1; i >= 0; i-- { + h[i]++ + if h[i] != 0 { + break + } + } + return h } From de24450dbf0887032eacdee7da130b5e038be5cb Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 10 Oct 2025 14:51:27 +0800 Subject: [PATCH 025/277] core/rawdb, triedb/pathdb: introduce trienode history (#32596) It's a pull request based on the #32523 , implementing the structure of trienode history. --- core/rawdb/accessors_history.go | 95 +++- core/rawdb/accessors_state.go | 73 +++ core/rawdb/ancient_scheme.go | 50 +- core/rawdb/schema.go | 43 +- triedb/pathdb/database.go | 4 +- triedb/pathdb/history.go | 57 +- triedb/pathdb/history_index.go | 12 + triedb/pathdb/history_indexer.go | 76 ++- triedb/pathdb/history_state.go | 6 +- triedb/pathdb/history_state_test.go | 20 +- triedb/pathdb/history_trienode.go | 730 ++++++++++++++++++++++++ triedb/pathdb/history_trienode_test.go | 736 +++++++++++++++++++++++++ triedb/pathdb/metrics.go | 19 +- 13 files changed, 1850 insertions(+), 71 deletions(-) create mode 100644 triedb/pathdb/history_trienode.go create mode 100644 triedb/pathdb/history_trienode_test.go diff --git a/core/rawdb/accessors_history.go b/core/rawdb/accessors_history.go index cf1073f387..95a8907edc 100644 --- a/core/rawdb/accessors_history.go +++ b/core/rawdb/accessors_history.go @@ -46,6 +46,27 @@ func DeleteStateHistoryIndexMetadata(db ethdb.KeyValueWriter) { } } +// ReadTrienodeHistoryIndexMetadata retrieves the metadata of trienode history index. +func ReadTrienodeHistoryIndexMetadata(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(headTrienodeHistoryIndexKey) + return data +} + +// WriteTrienodeHistoryIndexMetadata stores the metadata of trienode history index +// into database. +func WriteTrienodeHistoryIndexMetadata(db ethdb.KeyValueWriter, blob []byte) { + if err := db.Put(headTrienodeHistoryIndexKey, blob); err != nil { + log.Crit("Failed to store the metadata of trienode history index", "err", err) + } +} + +// DeleteTrienodeHistoryIndexMetadata removes the metadata of trienode history index. +func DeleteTrienodeHistoryIndexMetadata(db ethdb.KeyValueWriter) { + if err := db.Delete(headTrienodeHistoryIndexKey); err != nil { + log.Crit("Failed to delete the metadata of trienode history index", "err", err) + } +} + // ReadAccountHistoryIndex retrieves the account history index with the provided // account address. func ReadAccountHistoryIndex(db ethdb.KeyValueReader, addressHash common.Hash) []byte { @@ -95,6 +116,30 @@ func DeleteStorageHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash, } } +// ReadTrienodeHistoryIndex retrieves the trienode history index with the provided +// account address and storage key hash. +func ReadTrienodeHistoryIndex(db ethdb.KeyValueReader, addressHash common.Hash, path []byte) []byte { + data, err := db.Get(trienodeHistoryIndexKey(addressHash, path)) + if err != nil || len(data) == 0 { + return nil + } + return data +} + +// WriteTrienodeHistoryIndex writes the provided trienode history index into database. +func WriteTrienodeHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, data []byte) { + if err := db.Put(trienodeHistoryIndexKey(addressHash, path), data); err != nil { + log.Crit("Failed to store trienode history index", "err", err) + } +} + +// DeleteTrienodeHistoryIndex deletes the specified trienode index from the database. +func DeleteTrienodeHistoryIndex(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte) { + if err := db.Delete(trienodeHistoryIndexKey(addressHash, path)); err != nil { + log.Crit("Failed to delete trienode history index", "err", err) + } +} + // ReadAccountHistoryIndexBlock retrieves the index block with the provided // account address along with the block id. func ReadAccountHistoryIndexBlock(db ethdb.KeyValueReader, addressHash common.Hash, blockID uint32) []byte { @@ -143,6 +188,30 @@ func DeleteStorageHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common. } } +// ReadTrienodeHistoryIndexBlock retrieves the index block with the provided state +// identifier along with the block id. +func ReadTrienodeHistoryIndexBlock(db ethdb.KeyValueReader, addressHash common.Hash, path []byte, blockID uint32) []byte { + data, err := db.Get(trienodeHistoryIndexBlockKey(addressHash, path, blockID)) + if err != nil || len(data) == 0 { + return nil + } + return data +} + +// WriteTrienodeHistoryIndexBlock writes the provided index block into database. +func WriteTrienodeHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, id uint32, data []byte) { + if err := db.Put(trienodeHistoryIndexBlockKey(addressHash, path, id), data); err != nil { + log.Crit("Failed to store trienode index block", "err", err) + } +} + +// DeleteTrienodeHistoryIndexBlock deletes the specified index block from the database. +func DeleteTrienodeHistoryIndexBlock(db ethdb.KeyValueWriter, addressHash common.Hash, path []byte, id uint32) { + if err := db.Delete(trienodeHistoryIndexBlockKey(addressHash, path, id)); err != nil { + log.Crit("Failed to delete trienode index block", "err", err) + } +} + // increaseKey increase the input key by one bit. Return nil if the entire // addition operation overflows. func increaseKey(key []byte) []byte { @@ -155,14 +224,26 @@ func increaseKey(key []byte) []byte { return nil } -// DeleteStateHistoryIndex completely removes all history indexing data, including +// DeleteStateHistoryIndexes completely removes all history indexing data, including // indexes for accounts and storages. -// -// Note, this method assumes the storage space with prefix `StateHistoryIndexPrefix` -// is exclusively occupied by the history indexing data! -func DeleteStateHistoryIndex(db ethdb.KeyValueRangeDeleter) { - start := StateHistoryIndexPrefix - limit := increaseKey(bytes.Clone(StateHistoryIndexPrefix)) +func DeleteStateHistoryIndexes(db ethdb.KeyValueRangeDeleter) { + DeleteHistoryByRange(db, StateHistoryAccountMetadataPrefix) + DeleteHistoryByRange(db, StateHistoryStorageMetadataPrefix) + DeleteHistoryByRange(db, StateHistoryAccountBlockPrefix) + DeleteHistoryByRange(db, StateHistoryStorageBlockPrefix) +} + +// DeleteTrienodeHistoryIndexes completely removes all trienode history indexing data. +func DeleteTrienodeHistoryIndexes(db ethdb.KeyValueRangeDeleter) { + DeleteHistoryByRange(db, TrienodeHistoryMetadataPrefix) + DeleteHistoryByRange(db, TrienodeHistoryBlockPrefix) +} + +// DeleteHistoryByRange completely removes all database entries with the specific prefix. +// Note, this method assumes the space with the given prefix is exclusively occupied! +func DeleteHistoryByRange(db ethdb.KeyValueRangeDeleter, prefix []byte) { + start := prefix + limit := increaseKey(bytes.Clone(prefix)) // Try to remove the data in the range by a loop, as the leveldb // doesn't support the native range deletion. diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 46aa5fd070..298ad04f40 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -299,3 +299,76 @@ func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIn }) return err } + +// ReadTrienodeHistory retrieves the trienode history corresponding to the specified id. +// Compute the position of trienode history in freezer by minus one since the id of first +// trienode history starts from one(zero for initial state). +func ReadTrienodeHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, error) { + header, err := db.Ancient(trienodeHistoryHeaderTable, id-1) + if err != nil { + return nil, nil, nil, err + } + keySection, err := db.Ancient(trienodeHistoryKeySectionTable, id-1) + if err != nil { + return nil, nil, nil, err + } + valueSection, err := db.Ancient(trienodeHistoryValueSectionTable, id-1) + if err != nil { + return nil, nil, nil, err + } + return header, keySection, valueSection, nil +} + +// ReadTrienodeHistoryHeader retrieves the header section of trienode history. +func ReadTrienodeHistoryHeader(db ethdb.AncientReaderOp, id uint64) ([]byte, error) { + return db.Ancient(trienodeHistoryHeaderTable, id-1) +} + +// ReadTrienodeHistoryKeySection retrieves the key section of trienode history. +func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) { + return db.Ancient(trienodeHistoryKeySectionTable, id-1) +} + +// ReadTrienodeHistoryValueSection retrieves the value section of trienode history. +func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) { + return db.Ancient(trienodeHistoryValueSectionTable, id-1) +} + +// ReadTrienodeHistoryList retrieves the a list of trienode history corresponding +// to the specified range. +// Compute the position of trienode history in freezer by minus one since the id +// of first trienode history starts from one(zero for initial state). +func ReadTrienodeHistoryList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, [][]byte, [][]byte, error) { + header, err := db.AncientRange(trienodeHistoryHeaderTable, start-1, count, 0) + if err != nil { + return nil, nil, nil, err + } + keySection, err := db.AncientRange(trienodeHistoryKeySectionTable, start-1, count, 0) + if err != nil { + return nil, nil, nil, err + } + valueSection, err := db.AncientRange(trienodeHistoryValueSectionTable, start-1, count, 0) + if err != nil { + return nil, nil, nil, err + } + if len(header) != len(keySection) || len(header) != len(valueSection) { + return nil, nil, nil, errors.New("trienode history is corrupted") + } + return header, keySection, valueSection, nil +} + +// WriteTrienodeHistory writes the provided trienode history to database. +// Compute the position of trienode history in freezer by minus one since +// the id of first state history starts from one(zero for initial state). +func WriteTrienodeHistory(db ethdb.AncientWriter, id uint64, header []byte, keySection []byte, valueSection []byte) error { + _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if err := op.AppendRaw(trienodeHistoryHeaderTable, id-1, header); err != nil { + return err + } + if err := op.AppendRaw(trienodeHistoryKeySectionTable, id-1, keySection); err != nil { + return err + } + return op.AppendRaw(trienodeHistoryValueSectionTable, id-1, valueSection) + }) + return err +} diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index 1ffebed3e7..afec7848c8 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -75,15 +75,38 @@ var stateFreezerTableConfigs = map[string]freezerTableConfig{ stateHistoryStorageData: {noSnappy: false, prunable: true}, } +const ( + trienodeHistoryHeaderTable = "trienode.header" + trienodeHistoryKeySectionTable = "trienode.key" + trienodeHistoryValueSectionTable = "trienode.value" +) + +// trienodeFreezerTableConfigs configures the settings for tables in the trienode freezer. +var trienodeFreezerTableConfigs = map[string]freezerTableConfig{ + trienodeHistoryHeaderTable: {noSnappy: false, prunable: true}, + + // Disable snappy compression to allow efficient partial read. + trienodeHistoryKeySectionTable: {noSnappy: true, prunable: true}, + + // Disable snappy compression to allow efficient partial read. + trienodeHistoryValueSectionTable: {noSnappy: true, prunable: true}, +} + // The list of identifiers of ancient stores. var ( - ChainFreezerName = "chain" // the folder name of chain segment ancient store. - MerkleStateFreezerName = "state" // the folder name of state history ancient store. - VerkleStateFreezerName = "state_verkle" // the folder name of state history ancient store. + ChainFreezerName = "chain" // the folder name of chain segment ancient store. + MerkleStateFreezerName = "state" // the folder name of state history ancient store. + VerkleStateFreezerName = "state_verkle" // the folder name of state history ancient store. + MerkleTrienodeFreezerName = "trienode" // the folder name of trienode history ancient store. + VerkleTrienodeFreezerName = "trienode_verkle" // the folder name of trienode history ancient store. ) // freezers the collections of all builtin freezers. -var freezers = []string{ChainFreezerName, MerkleStateFreezerName, VerkleStateFreezerName} +var freezers = []string{ + ChainFreezerName, + MerkleStateFreezerName, VerkleStateFreezerName, + MerkleTrienodeFreezerName, VerkleTrienodeFreezerName, +} // NewStateFreezer initializes the ancient store for state history. // @@ -103,3 +126,22 @@ func NewStateFreezer(ancientDir string, verkle bool, readOnly bool) (ethdb.Reset } return newResettableFreezer(name, "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerTableConfigs) } + +// NewTrienodeFreezer initializes the ancient store for trienode history. +// +// - if the empty directory is given, initializes the pure in-memory +// trienode freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// trienode freezer. +func NewTrienodeFreezer(ancientDir string, verkle bool, readOnly bool) (ethdb.ResettableAncientStore, error) { + if ancientDir == "" { + return NewMemoryFreezer(readOnly, trienodeFreezerTableConfigs), nil + } + var name string + if verkle { + name = filepath.Join(ancientDir, VerkleTrienodeFreezerName) + } else { + name = filepath.Join(ancientDir, MerkleTrienodeFreezerName) + } + return newResettableFreezer(name, "eth/db/trienode", readOnly, stateHistoryTableSize, trienodeFreezerTableConfigs) +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 9a17e1c173..d9140c5fd6 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -80,6 +80,10 @@ var ( // been indexed. headStateHistoryIndexKey = []byte("LastStateHistoryIndex") + // headTrienodeHistoryIndexKey tracks the ID of the latest state history that has + // been indexed. + headTrienodeHistoryIndexKey = []byte("LastTrienodeHistoryIndex") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") @@ -125,8 +129,10 @@ var ( StateHistoryIndexPrefix = []byte("m") // The global prefix of state history index data StateHistoryAccountMetadataPrefix = []byte("ma") // StateHistoryAccountMetadataPrefix + account address hash => account metadata StateHistoryStorageMetadataPrefix = []byte("ms") // StateHistoryStorageMetadataPrefix + account address hash + storage slot hash => slot metadata + TrienodeHistoryMetadataPrefix = []byte("mt") // TrienodeHistoryMetadataPrefix + account address hash + trienode path => trienode metadata StateHistoryAccountBlockPrefix = []byte("mba") // StateHistoryAccountBlockPrefix + account address hash + blockID => account block StateHistoryStorageBlockPrefix = []byte("mbs") // StateHistoryStorageBlockPrefix + account address hash + storage slot hash + blockID => slot block + TrienodeHistoryBlockPrefix = []byte("mbt") // TrienodeHistoryBlockPrefix + account address hash + trienode path + blockID => trienode block // VerklePrefix is the database prefix for Verkle trie data, which includes: // (a) Trie nodes @@ -395,27 +401,34 @@ func storageHistoryIndexKey(addressHash common.Hash, storageHash common.Hash) [] return out } +// trienodeHistoryIndexKey = TrienodeHistoryMetadataPrefix + addressHash + trienode path +func trienodeHistoryIndexKey(addressHash common.Hash, path []byte) []byte { + totalLen := len(TrienodeHistoryMetadataPrefix) + common.HashLength + len(path) + out := make([]byte, totalLen) + + off := 0 + off += copy(out[off:], TrienodeHistoryMetadataPrefix) + off += copy(out[off:], addressHash.Bytes()) + copy(out[off:], path) + + return out +} + // accountHistoryIndexBlockKey = StateHistoryAccountBlockPrefix + addressHash + blockID func accountHistoryIndexBlockKey(addressHash common.Hash, blockID uint32) []byte { - var buf4 [4]byte - binary.BigEndian.PutUint32(buf4[:], blockID) - totalLen := len(StateHistoryAccountBlockPrefix) + common.HashLength + 4 out := make([]byte, totalLen) off := 0 off += copy(out[off:], StateHistoryAccountBlockPrefix) off += copy(out[off:], addressHash.Bytes()) - copy(out[off:], buf4[:]) + binary.BigEndian.PutUint32(out[off:], blockID) return out } // storageHistoryIndexBlockKey = StateHistoryStorageBlockPrefix + addressHash + storageHash + blockID func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Hash, blockID uint32) []byte { - var buf4 [4]byte - binary.BigEndian.PutUint32(buf4[:], blockID) - totalLen := len(StateHistoryStorageBlockPrefix) + 2*common.HashLength + 4 out := make([]byte, totalLen) @@ -423,7 +436,21 @@ func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Has off += copy(out[off:], StateHistoryStorageBlockPrefix) off += copy(out[off:], addressHash.Bytes()) off += copy(out[off:], storageHash.Bytes()) - copy(out[off:], buf4[:]) + binary.BigEndian.PutUint32(out[off:], blockID) + + return out +} + +// trienodeHistoryIndexBlockKey = TrienodeHistoryBlockPrefix + addressHash + trienode path + blockID +func trienodeHistoryIndexBlockKey(addressHash common.Hash, path []byte, blockID uint32) []byte { + totalLen := len(TrienodeHistoryBlockPrefix) + common.HashLength + len(path) + 4 + out := make([]byte, totalLen) + + off := 0 + off += copy(out[off:], TrienodeHistoryBlockPrefix) + off += copy(out[off:], addressHash.Bytes()) + off += copy(out[off:], path) + binary.BigEndian.PutUint32(out[off:], blockID) return out } diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 546d2e0301..9fc65de277 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -232,7 +232,7 @@ func (db *Database) repairHistory() error { // Purge all state history indexing data first batch := db.diskdb.NewBatch() rawdb.DeleteStateHistoryIndexMetadata(batch) - rawdb.DeleteStateHistoryIndex(batch) + rawdb.DeleteStateHistoryIndexes(batch) if err := batch.Write(); err != nil { log.Crit("Failed to purge state history index", "err", err) } @@ -426,7 +426,7 @@ func (db *Database) Enable(root common.Hash) error { // Purge all state history indexing data first batch.Reset() rawdb.DeleteStateHistoryIndexMetadata(batch) - rawdb.DeleteStateHistoryIndex(batch) + rawdb.DeleteStateHistoryIndexes(batch) if err := batch.Write(); err != nil { return err } diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 81b843d9f1..d78999f218 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -32,6 +32,9 @@ type historyType uint8 const ( // typeStateHistory indicates history data related to account or storage changes. typeStateHistory historyType = 0 + + // typeTrienodeHistory indicates history data related to trie node changes. + typeTrienodeHistory historyType = 1 ) // String returns the string format representation. @@ -39,6 +42,8 @@ func (h historyType) String() string { switch h { case typeStateHistory: return "state" + case typeTrienodeHistory: + return "trienode" default: return fmt.Sprintf("unknown type: %d", h) } @@ -48,8 +53,9 @@ func (h historyType) String() string { type elementType uint8 const ( - typeAccount elementType = 0 // represents the account data - typeStorage elementType = 1 // represents the storage slot data + typeAccount elementType = 0 // represents the account data + typeStorage elementType = 1 // represents the storage slot data + typeTrienode elementType = 2 // represents the trie node data ) // String returns the string format representation. @@ -59,6 +65,8 @@ func (e elementType) String() string { return "account" case typeStorage: return "storage" + case typeTrienode: + return "trienode" default: return fmt.Sprintf("unknown element type: %d", e) } @@ -69,11 +77,14 @@ func toHistoryType(typ elementType) historyType { if typ == typeAccount || typ == typeStorage { return typeStateHistory } + if typ == typeTrienode { + return typeTrienodeHistory + } panic(fmt.Sprintf("unknown element type %v", typ)) } // stateIdent represents the identifier of a state element, which can be -// an account or a storage slot. +// an account, a storage slot or a trienode. type stateIdent struct { typ elementType @@ -91,6 +102,12 @@ type stateIdent struct { // // This field is null if the identifier refers to an account or a trie node. storageHash common.Hash + + // The trie node path within the trie. + // + // This field is null if the identifier refers to an account or a storage slot. + // String type is chosen to make stateIdent comparable. + path string } // String returns the string format state identifier. @@ -98,7 +115,10 @@ func (ident stateIdent) String() string { if ident.typ == typeAccount { return ident.addressHash.Hex() } - return ident.addressHash.Hex() + ident.storageHash.Hex() + if ident.typ == typeStorage { + return ident.addressHash.Hex() + ident.storageHash.Hex() + } + return ident.addressHash.Hex() + ident.path } // newAccountIdent constructs a state identifier for an account. @@ -120,8 +140,18 @@ func newStorageIdent(addressHash common.Hash, storageHash common.Hash) stateIden } } -// stateIdentQuery is the extension of stateIdent by adding the account address -// and raw storage key. +// newTrienodeIdent constructs a state identifier for a trie node. +// The address denotes the address hash of the associated account; +// the path denotes the path of the node within the trie; +func newTrienodeIdent(addressHash common.Hash, path string) stateIdent { + return stateIdent{ + typ: typeTrienode, + addressHash: addressHash, + path: path, + } +} + +// stateIdentQuery is the extension of stateIdent by adding the raw storage key. type stateIdentQuery struct { stateIdent @@ -150,8 +180,19 @@ func newStorageIdentQuery(address common.Address, addressHash common.Hash, stora } } -// history defines the interface of historical data, implemented by stateHistory -// and trienodeHistory (in the near future). +// newTrienodeIdentQuery constructs a state identifier for a trie node. +// the addressHash denotes the address hash of the associated account; +// the path denotes the path of the node within the trie; +// +// nolint:unused +func newTrienodeIdentQuery(addrHash common.Hash, path []byte) stateIdentQuery { + return stateIdentQuery{ + stateIdent: newTrienodeIdent(addrHash, string(path)), + } +} + +// history defines the interface of historical data, shared by stateHistory +// and trienodeHistory. type history interface { // typ returns the historical data type held in the history. typ() historyType diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index 47cee9820d..5b4c91d7e6 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -376,6 +376,8 @@ func readStateIndex(ident stateIdent, db ethdb.KeyValueReader) []byte { return rawdb.ReadAccountHistoryIndex(db, ident.addressHash) case typeStorage: return rawdb.ReadStorageHistoryIndex(db, ident.addressHash, ident.storageHash) + case typeTrienode: + return rawdb.ReadTrienodeHistoryIndex(db, ident.addressHash, []byte(ident.path)) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } @@ -389,6 +391,8 @@ func writeStateIndex(ident stateIdent, db ethdb.KeyValueWriter, data []byte) { rawdb.WriteAccountHistoryIndex(db, ident.addressHash, data) case typeStorage: rawdb.WriteStorageHistoryIndex(db, ident.addressHash, ident.storageHash, data) + case typeTrienode: + rawdb.WriteTrienodeHistoryIndex(db, ident.addressHash, []byte(ident.path), data) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } @@ -402,6 +406,8 @@ func deleteStateIndex(ident stateIdent, db ethdb.KeyValueWriter) { rawdb.DeleteAccountHistoryIndex(db, ident.addressHash) case typeStorage: rawdb.DeleteStorageHistoryIndex(db, ident.addressHash, ident.storageHash) + case typeTrienode: + rawdb.DeleteTrienodeHistoryIndex(db, ident.addressHash, []byte(ident.path)) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } @@ -415,6 +421,8 @@ func readStateIndexBlock(ident stateIdent, db ethdb.KeyValueReader, id uint32) [ return rawdb.ReadAccountHistoryIndexBlock(db, ident.addressHash, id) case typeStorage: return rawdb.ReadStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id) + case typeTrienode: + return rawdb.ReadTrienodeHistoryIndexBlock(db, ident.addressHash, []byte(ident.path), id) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } @@ -428,6 +436,8 @@ func writeStateIndexBlock(ident stateIdent, db ethdb.KeyValueWriter, id uint32, rawdb.WriteAccountHistoryIndexBlock(db, ident.addressHash, id, data) case typeStorage: rawdb.WriteStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id, data) + case typeTrienode: + rawdb.WriteTrienodeHistoryIndexBlock(db, ident.addressHash, []byte(ident.path), id, data) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } @@ -441,6 +451,8 @@ func deleteStateIndexBlock(ident stateIdent, db ethdb.KeyValueWriter, id uint32) rawdb.DeleteAccountHistoryIndexBlock(db, ident.addressHash, id) case typeStorage: rawdb.DeleteStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id) + case typeTrienode: + rawdb.DeleteTrienodeHistoryIndexBlock(db, ident.addressHash, []byte(ident.path), id) default: panic(fmt.Errorf("unknown type: %v", ident.typ)) } diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index d618585929..368ff78d41 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -36,8 +36,10 @@ const ( // The batch size for reading state histories historyReadBatch = 1000 - stateIndexV0 = uint8(0) // initial version of state index structure - stateIndexVersion = stateIndexV0 // the current state index version + stateHistoryIndexV0 = uint8(0) // initial version of state index structure + stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version + trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure + trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version ) // indexVersion returns the latest index version for the given history type. @@ -45,7 +47,9 @@ const ( func indexVersion(typ historyType) uint8 { switch typ { case typeStateHistory: - return stateIndexVersion + return stateHistoryIndexVersion + case typeTrienodeHistory: + return trienodeHistoryIndexVersion default: panic(fmt.Errorf("unknown history type: %d", typ)) } @@ -63,6 +67,8 @@ func loadIndexMetadata(db ethdb.KeyValueReader, typ historyType) *indexMetadata switch typ { case typeStateHistory: blob = rawdb.ReadStateHistoryIndexMetadata(db) + case typeTrienodeHistory: + blob = rawdb.ReadTrienodeHistoryIndexMetadata(db) default: panic(fmt.Errorf("unknown history type %d", typ)) } @@ -90,6 +96,8 @@ func storeIndexMetadata(db ethdb.KeyValueWriter, typ historyType, last uint64) { switch typ { case typeStateHistory: rawdb.WriteStateHistoryIndexMetadata(db, blob) + case typeTrienodeHistory: + rawdb.WriteTrienodeHistoryIndexMetadata(db, blob) default: panic(fmt.Errorf("unknown history type %d", typ)) } @@ -101,6 +109,8 @@ func deleteIndexMetadata(db ethdb.KeyValueWriter, typ historyType) { switch typ { case typeStateHistory: rawdb.DeleteStateHistoryIndexMetadata(db) + case typeTrienodeHistory: + rawdb.DeleteTrienodeHistoryIndexMetadata(db) default: panic(fmt.Errorf("unknown history type %d", typ)) } @@ -215,7 +225,11 @@ func (b *batchIndexer) finish(force bool) error { func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader, typ historyType) error { start := time.Now() defer func() { - indexHistoryTimer.UpdateSince(start) + if typ == typeStateHistory { + stateIndexHistoryTimer.UpdateSince(start) + } else if typ == typeTrienodeHistory { + trienodeIndexHistoryTimer.UpdateSince(start) + } }() metadata := loadIndexMetadata(db, typ) @@ -234,7 +248,7 @@ func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancient if typ == typeStateHistory { h, err = readStateHistory(freezer, historyID) } else { - // h, err = readTrienodeHistory(freezer, historyID) + h, err = readTrienodeHistory(freezer, historyID) } if err != nil { return err @@ -253,7 +267,11 @@ func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancient func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader, typ historyType) error { start := time.Now() defer func() { - unindexHistoryTimer.UpdateSince(start) + if typ == typeStateHistory { + stateUnindexHistoryTimer.UpdateSince(start) + } else if typ == typeTrienodeHistory { + trienodeUnindexHistoryTimer.UpdateSince(start) + } }() metadata := loadIndexMetadata(db, typ) @@ -272,7 +290,7 @@ func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancie if typ == typeStateHistory { h, err = readStateHistory(freezer, historyID) } else { - // h, err = readTrienodeHistory(freezer, historyID) + h, err = readTrienodeHistory(freezer, historyID) } if err != nil { return err @@ -546,13 +564,13 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID return } } else { - // histories, err = readTrienodeHistories(i.freezer, current, count) - // if err != nil { - // // The history read might fall if the history is truncated from - // // head due to revert operation. - // i.log.Error("Failed to read history for indexing", "current", current, "count", count, "err", err) - // return - // } + histories, err = readTrienodeHistories(i.freezer, current, count) + if err != nil { + // The history read might fall if the history is truncated from + // head due to revert operation. + i.log.Error("Failed to read history for indexing", "current", current, "count", count, "err", err) + return + } } for _, h := range histories { if err := batch.process(h, current); err != nil { @@ -570,7 +588,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID done = current - beginID ) eta := common.CalculateETA(done, left, time.Since(start)) - i.log.Info("Indexing state history", "processed", done, "left", left, "elapsed", common.PrettyDuration(time.Since(start)), "eta", common.PrettyDuration(eta)) + i.log.Info("Indexing history", "processed", done, "left", left, "elapsed", common.PrettyDuration(time.Since(start)), "eta", common.PrettyDuration(eta)) } } i.indexed.Store(current - 1) // update indexing progress @@ -657,6 +675,8 @@ func checkVersion(disk ethdb.KeyValueStore, typ historyType) { var blob []byte if typ == typeStateHistory { blob = rawdb.ReadStateHistoryIndexMetadata(disk) + } else if typ == typeTrienodeHistory { + blob = rawdb.ReadTrienodeHistoryIndexMetadata(disk) } else { panic(fmt.Errorf("unknown history type: %v", typ)) } @@ -666,24 +686,32 @@ func checkVersion(disk ethdb.KeyValueStore, typ historyType) { return } // Short circuit if the metadata is found and the version is matched + ver := stateHistoryIndexVersion + if typ == typeTrienodeHistory { + ver = trienodeHistoryIndexVersion + } var m indexMetadata err := rlp.DecodeBytes(blob, &m) - if err == nil && m.Version == stateIndexVersion { + if err == nil && m.Version == ver { return } // Version is not matched, prune the existing data and re-index from scratch + batch := disk.NewBatch() + if typ == typeStateHistory { + rawdb.DeleteStateHistoryIndexMetadata(batch) + rawdb.DeleteStateHistoryIndexes(batch) + } else { + rawdb.DeleteTrienodeHistoryIndexMetadata(batch) + rawdb.DeleteTrienodeHistoryIndexes(batch) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to purge history index", "type", typ, "err", err) + } version := "unknown" if err == nil { version = fmt.Sprintf("%d", m.Version) } - - batch := disk.NewBatch() - rawdb.DeleteStateHistoryIndexMetadata(batch) - rawdb.DeleteStateHistoryIndex(batch) - if err := batch.Write(); err != nil { - log.Crit("Failed to purge state history index", "err", err) - } - log.Info("Cleaned up obsolete state history index", "version", version, "want", stateIndexVersion) + log.Info("Cleaned up obsolete history index", "type", typ, "version", version, "want", version) } // newHistoryIndexer constructs the history indexer and launches the background diff --git a/triedb/pathdb/history_state.go b/triedb/pathdb/history_state.go index 9d1e4dfb09..bc21915dba 100644 --- a/triedb/pathdb/history_state.go +++ b/triedb/pathdb/history_state.go @@ -605,9 +605,9 @@ func writeStateHistory(writer ethdb.AncientWriter, dl *diffLayer) error { if err := rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData); err != nil { return err } - historyDataBytesMeter.Mark(int64(dataSize)) - historyIndexBytesMeter.Mark(int64(indexSize)) - historyBuildTimeMeter.UpdateSince(start) + stateHistoryDataBytesMeter.Mark(int64(dataSize)) + stateHistoryIndexBytesMeter.Mark(int64(indexSize)) + stateHistoryBuildTimeMeter.UpdateSince(start) log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start))) return nil diff --git a/triedb/pathdb/history_state_test.go b/triedb/pathdb/history_state_test.go index 5718081566..4046fb9640 100644 --- a/triedb/pathdb/history_state_test.go +++ b/triedb/pathdb/history_state_test.go @@ -98,13 +98,13 @@ func testEncodeDecodeStateHistory(t *testing.T, rawStorageKey bool) { if !compareSet(dec.accounts, obj.accounts) { t.Fatal("account data is mismatched") } - if !compareStorages(dec.storages, obj.storages) { + if !compareMapSet(dec.storages, obj.storages) { t.Fatal("storage data is mismatched") } if !compareList(dec.accountList, obj.accountList) { t.Fatal("account list is mismatched") } - if !compareStorageList(dec.storageList, obj.storageList) { + if !compareMapList(dec.storageList, obj.storageList) { t.Fatal("storage list is mismatched") } } @@ -292,32 +292,32 @@ func compareList[k comparable](a, b []k) bool { return true } -func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool { +func compareMapSet[K1 comparable, K2 comparable](a, b map[K1]map[K2][]byte) bool { if len(a) != len(b) { return false } - for h, subA := range a { - subB, ok := b[h] + for key, subsetA := range a { + subsetB, ok := b[key] if !ok { return false } - if !compareSet(subA, subB) { + if !compareSet(subsetA, subsetB) { return false } } return true } -func compareStorageList(a, b map[common.Address][]common.Hash) bool { +func compareMapList[K comparable, V comparable](a, b map[K][]V) bool { if len(a) != len(b) { return false } - for h, la := range a { - lb, ok := b[h] + for key, listA := range a { + listB, ok := b[key] if !ok { return false } - if !compareList(la, lb) { + if !compareList(listA, listB) { return false } } diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go new file mode 100644 index 0000000000..2a4459d4ad --- /dev/null +++ b/triedb/pathdb/history_trienode.go @@ -0,0 +1,730 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "encoding/binary" + "fmt" + "iter" + "maps" + "math" + "slices" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// Each trie node history entry consists of three parts (stored in three freezer +// tables according): +// +// # Header +// The header records metadata, including: +// +// - the history version (1 byte) +// - the parent state root (32 bytes) +// - the current state root (32 bytes) +// - block number (8 bytes) +// +// - a lexicographically sorted list of trie IDs +// - the corresponding offsets into the key and value sections for each trie data chunk +// +// Although some fields (e.g., parent state root, block number) are duplicated +// between the state history and the trienode history, these two histories +// operate independently. To ensure each remains self-contained and self-descriptive, +// we have chosen to maintain these duplicate fields. +// +// # Key section +// The key section stores trie node keys (paths) in a compressed format. +// It also contains relative offsets into the value section for resolving +// the corresponding trie node data. Note that these offsets are relative +// to the data chunk for the trie; the chunk offset must be added to obtain +// the absolute position. +// +// # Value section +// The value section is a concatenated byte stream of all trie node data. +// Each trie node can be retrieved using the offset and length specified +// by its index entry. +// +// The header and key sections are sufficient for locating a trie node, +// while a partial read of the value section is enough to retrieve its data. + +// Header section: +// +// +----------+------------------+---------------------+---------------------+-------+------------------+---------------------+---------------------| +// | metadata | TrieID(32 bytes) | key offset(4 bytes) | val offset(4 bytes) | ... | TrieID(32 bytes) | key offset(4 bytes) | val offset(4 bytes) | +// +----------+------------------+---------------------+---------------------+-------+------------------+---------------------+---------------------| +// +// +// Key section: +// +// + restart point + restart point (depends on restart interval) +// / / +// +---------------+---------------+---------------+---------------+---------+ +// | node entry 1 | node entry 2 | ... | node entry n | trailer | +// +---------------+---------------+---------------+---------------+---------+ +// \ / +// +---- restart block ------+ +// +// node entry: +// +// +---- key len ----+ +// / \ +// +-------+---------+-----------+---------+-----------------------+-----------------+ +// | shared (varint) | not shared (varint) | value length (varlen) | key (varlen) | +// +-----------------+---------------------+-----------------------+-----------------+ +// +// trailer: +// +// +---- 4-bytes ----+ +---- 4-bytes ----+ +// / \ / \ +// +----------------------+------------------------+-----+--------------------------+ +// | restart_1 key offset | restart_1 value offset | ... | restart number (4-bytes) | +// +----------------------+------------------------+-----+--------------------------+ +// +// Note: Both the key offset and the value offset are relative to the start of +// the trie data chunk. To obtain the absolute offset, add the offset of the +// trie data chunk itself. +// +// Value section: +// +// +--------------+--------------+-------+---------------+ +// | node data 1 | node data 2 | ... | node data n | +// +--------------+--------------+-------+---------------+ +// +// NOTE: All fixed-length integer are big-endian. + +const ( + trienodeHistoryV0 = uint8(0) // initial version of node history structure + trienodeHistoryVersion = trienodeHistoryV0 // the default node history version + trienodeMetadataSize = 1 + 2*common.HashLength + 8 // the size of metadata in the history + trienodeTrieHeaderSize = 8 + common.HashLength // the size of a single trie header in history + trienodeDataBlockRestartLen = 16 // The restart interval length of trie node block +) + +// trienodeMetadata describes the meta data of trienode history. +type trienodeMetadata struct { + version uint8 // version tag of history object + parent common.Hash // prev-state root before the state transition + root common.Hash // post-state root after the state transition + block uint64 // associated block number +} + +// trienodeHistory represents a set of trie node changes resulting from a state +// transition across the main account trie and all associated storage tries. +type trienodeHistory struct { + meta *trienodeMetadata // Metadata of the history + owners []common.Hash // List of trie identifier sorted lexicographically + nodeList map[common.Hash][]string // Set of node paths sorted lexicographically + nodes map[common.Hash]map[string][]byte // Set of original value of trie nodes before state transition +} + +// newTrienodeHistory constructs a trienode history with the provided trie nodes. +func newTrienodeHistory(root common.Hash, parent common.Hash, block uint64, nodes map[common.Hash]map[string][]byte) *trienodeHistory { + nodeList := make(map[common.Hash][]string) + for owner, subset := range nodes { + keys := sort.StringSlice(slices.Collect(maps.Keys(subset))) + keys.Sort() + nodeList[owner] = keys + } + return &trienodeHistory{ + meta: &trienodeMetadata{ + version: trienodeHistoryVersion, + parent: parent, + root: root, + block: block, + }, + owners: slices.SortedFunc(maps.Keys(nodes), common.Hash.Cmp), + nodeList: nodeList, + nodes: nodes, + } +} + +// sharedLen returns the length of the common prefix shared by a and b. +func sharedLen(a, b []byte) int { + n := min(len(a), len(b)) + for i := 0; i < n; i++ { + if a[i] != b[i] { + return i + } + } + return n +} + +// typ implements the history interface, returning the historical data type held. +func (h *trienodeHistory) typ() historyType { + return typeTrienodeHistory +} + +// forEach implements the history interface, returning an iterator to traverse the +// state entries in the history. +func (h *trienodeHistory) forEach() iter.Seq[stateIdent] { + return func(yield func(stateIdent) bool) { + for _, owner := range h.owners { + for _, path := range h.nodeList[owner] { + if !yield(newTrienodeIdent(owner, path)) { + return + } + } + } + } +} + +// encode serializes the contained trie nodes into bytes. +func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { + var ( + buf = make([]byte, 64) + headerSection bytes.Buffer + keySection bytes.Buffer + valueSection bytes.Buffer + ) + binary.Write(&headerSection, binary.BigEndian, h.meta.version) // 1 byte + headerSection.Write(h.meta.parent.Bytes()) // 32 bytes + headerSection.Write(h.meta.root.Bytes()) // 32 bytes + binary.Write(&headerSection, binary.BigEndian, h.meta.block) // 8 byte + + for _, owner := range h.owners { + // Fill the header section with offsets at key and value section + headerSection.Write(owner.Bytes()) // 32 bytes + binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes + + // The offset to the value section is theoretically unnecessary, since the + // individual value offset is already tracked in the key section. However, + // we still keep it here for two reasons: + // - It's cheap to store (only 4 bytes for each trie). + // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode). + binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes + + // Fill the key section with node index + var ( + prevKey []byte + restarts []uint32 + prefixLen int + + internalKeyOffset uint32 // key offset for the trie internally + internalValOffset uint32 // value offset for the trie internally + ) + for i, path := range h.nodeList[owner] { + key := []byte(path) + if i%trienodeDataBlockRestartLen == 0 { + restarts = append(restarts, internalKeyOffset) + restarts = append(restarts, internalValOffset) + prefixLen = 0 + } else { + prefixLen = sharedLen(prevKey, key) + } + value := h.nodes[owner][path] + + // key section + n := binary.PutUvarint(buf[0:], uint64(prefixLen)) // key length shared (varint) + n += binary.PutUvarint(buf[n:], uint64(len(key)-prefixLen)) // key length not shared (varint) + n += binary.PutUvarint(buf[n:], uint64(len(value))) // value length (varint) + + if _, err := keySection.Write(buf[:n]); err != nil { + return nil, nil, nil, err + } + // unshared key + if _, err := keySection.Write(key[prefixLen:]); err != nil { + return nil, nil, nil, err + } + n += len(key) - prefixLen + prevKey = key + + // value section + if _, err := valueSection.Write(value); err != nil { + return nil, nil, nil, err + } + internalKeyOffset += uint32(n) + internalValOffset += uint32(len(value)) + } + + // Encode trailer, the number of restart sections is len(restarts))/2, + // as we track the offsets of both key and value sections. + var trailer []byte + for _, number := range append(restarts, uint32(len(restarts))/2) { + binary.BigEndian.PutUint32(buf[:4], number) + trailer = append(trailer, buf[:4]...) + } + if _, err := keySection.Write(trailer); err != nil { + return nil, nil, nil, err + } + } + return headerSection.Bytes(), keySection.Bytes(), valueSection.Bytes(), nil +} + +// decodeHeader resolves the metadata from the header section. An error +// should be returned if the header section is corrupted. +func decodeHeader(data []byte) (*trienodeMetadata, []common.Hash, []uint32, []uint32, error) { + if len(data) < trienodeMetadataSize { + return nil, nil, nil, nil, fmt.Errorf("trienode history is too small, index size: %d", len(data)) + } + version := data[0] + if version != trienodeHistoryVersion { + return nil, nil, nil, nil, fmt.Errorf("unregonized trienode history version: %d", version) + } + parent := common.BytesToHash(data[1 : common.HashLength+1]) // 32 bytes + root := common.BytesToHash(data[common.HashLength+1 : common.HashLength*2+1]) // 32 bytes + block := binary.BigEndian.Uint64(data[common.HashLength*2+1 : trienodeMetadataSize]) // 8 bytes + + size := len(data) - trienodeMetadataSize + if size%trienodeTrieHeaderSize != 0 { + return nil, nil, nil, nil, fmt.Errorf("truncated trienode history data, size %d", len(data)) + } + count := size / trienodeTrieHeaderSize + + var ( + owners = make([]common.Hash, 0, count) + keyOffsets = make([]uint32, 0, count) + valOffsets = make([]uint32, 0, count) + ) + for i := 0; i < count; i++ { + n := trienodeMetadataSize + trienodeTrieHeaderSize*i + owner := common.BytesToHash(data[n : n+common.HashLength]) + if i != 0 && bytes.Compare(owner.Bytes(), owners[i-1].Bytes()) <= 0 { + return nil, nil, nil, nil, fmt.Errorf("trienode owners are out of order, prev: %v, cur: %v", owners[i-1], owner) + } + owners = append(owners, owner) + + // Decode the offset to the key section + keyOffset := binary.BigEndian.Uint32(data[n+common.HashLength : n+common.HashLength+4]) + if i != 0 && keyOffset <= keyOffsets[i-1] { + return nil, nil, nil, nil, fmt.Errorf("key offset is out of order, prev: %v, cur: %v", keyOffsets[i-1], keyOffset) + } + keyOffsets = append(keyOffsets, keyOffset) + + // Decode the offset into the value section. Note that identical value offsets + // are valid if the node values in the last trie chunk are all zero (e.g., after + // a trie deletion). + valOffset := binary.BigEndian.Uint32(data[n+common.HashLength+4 : n+common.HashLength+8]) + if i != 0 && valOffset < valOffsets[i-1] { + return nil, nil, nil, nil, fmt.Errorf("value offset is out of order, prev: %v, cur: %v", valOffsets[i-1], valOffset) + } + valOffsets = append(valOffsets, valOffset) + } + return &trienodeMetadata{ + version: version, + parent: parent, + root: root, + block: block, + }, owners, keyOffsets, valOffsets, nil +} + +func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]string, error) { + var ( + prevKey []byte + items int + keyOffsets []uint32 + valOffsets []uint32 + + keyOff int // the key offset within the single trie data + valOff int // the value offset within the single trie data + + keys []string + ) + // Decode the number of restart section + if len(keySection) < 4 { + return nil, fmt.Errorf("key section too short, size: %d", len(keySection)) + } + nRestarts := binary.BigEndian.Uint32(keySection[len(keySection)-4:]) + + if len(keySection) < int(8*nRestarts)+4 { + return nil, fmt.Errorf("key section too short, restarts: %d, size: %d", nRestarts, len(keySection)) + } + for i := 0; i < int(nRestarts); i++ { + o := len(keySection) - 4 - (int(nRestarts)-i)*8 + keyOffset := binary.BigEndian.Uint32(keySection[o : o+4]) + if i != 0 && keyOffset <= keyOffsets[i-1] { + return nil, fmt.Errorf("key offset is out of order, prev: %v, cur: %v", keyOffsets[i-1], keyOffset) + } + keyOffsets = append(keyOffsets, keyOffset) + + // Same value offset is allowed just in case all the trie nodes in the last + // section have zero-size value. + valOffset := binary.BigEndian.Uint32(keySection[o+4 : o+8]) + if i != 0 && valOffset < valOffsets[i-1] { + return nil, fmt.Errorf("value offset is out of order, prev: %v, cur: %v", valOffsets[i-1], valOffset) + } + valOffsets = append(valOffsets, valOffset) + } + keyLimit := len(keySection) - 4 - int(nRestarts)*8 + + // Decode data + for keyOff < keyLimit { + // Validate the key and value offsets within the single trie data chunk + if items%trienodeDataBlockRestartLen == 0 { + if keyOff != int(keyOffsets[items/trienodeDataBlockRestartLen]) { + return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[items/trienodeDataBlockRestartLen], keyOff) + } + if valOff != int(valOffsets[items/trienodeDataBlockRestartLen]) { + return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[items/trienodeDataBlockRestartLen], valOff) + } + } + // Resolve the entry from key section + nShared, nn := binary.Uvarint(keySection[keyOff:]) // key length shared (varint) + keyOff += nn + nUnshared, nn := binary.Uvarint(keySection[keyOff:]) // key length not shared (varint) + keyOff += nn + nValue, nn := binary.Uvarint(keySection[keyOff:]) // value length (varint) + keyOff += nn + + // Resolve unshared key + if keyOff+int(nUnshared) > len(keySection) { + return nil, fmt.Errorf("key length too long, unshared key length: %d, off: %d, section size: %d", nUnshared, keyOff, len(keySection)) + } + unsharedKey := keySection[keyOff : keyOff+int(nUnshared)] + keyOff += int(nUnshared) + + // Assemble the full key + var key []byte + if items%trienodeDataBlockRestartLen == 0 { + if nShared != 0 { + return nil, fmt.Errorf("unexpected non-zero shared key prefix: %d", nShared) + } + key = unsharedKey + } else { + if int(nShared) > len(prevKey) { + return nil, fmt.Errorf("unexpected shared key prefix: %d, prefix key length: %d", nShared, len(prevKey)) + } + key = append([]byte{}, prevKey[:nShared]...) + key = append(key, unsharedKey...) + } + if items != 0 && bytes.Compare(prevKey, key) >= 0 { + return nil, fmt.Errorf("trienode paths are out of order, prev: %v, cur: %v", prevKey, key) + } + prevKey = key + + // Resolve value + if onValue != nil { + if err := onValue(key, valOff, valOff+int(nValue)); err != nil { + return nil, err + } + } + valOff += int(nValue) + + items++ + keys = append(keys, string(key)) + } + if keyOff != keyLimit { + return nil, fmt.Errorf("excessive key data after decoding, offset: %d, size: %d", keyOff, keyLimit) + } + return keys, nil +} + +func decodeSingleWithValue(keySection []byte, valueSection []byte) ([]string, map[string][]byte, error) { + var ( + offset int + nodes = make(map[string][]byte) + ) + paths, err := decodeSingle(keySection, func(key []byte, start int, limit int) error { + if start != offset { + return fmt.Errorf("gapped value section offset: %d, want: %d", start, offset) + } + // start == limit is allowed for zero-value trie node (e.g., non-existent node) + if start > limit { + return fmt.Errorf("invalid value offsets, start: %d, limit: %d", start, limit) + } + if start > len(valueSection) || limit > len(valueSection) { + return fmt.Errorf("value section out of range: start: %d, limit: %d, size: %d", start, limit, len(valueSection)) + } + nodes[string(key)] = valueSection[start:limit] + + offset = limit + return nil + }) + if err != nil { + return nil, nil, err + } + if offset != len(valueSection) { + return nil, nil, fmt.Errorf("excessive value data after decoding, offset: %d, size: %d", offset, len(valueSection)) + } + return paths, nodes, nil +} + +// decode deserializes the contained trie nodes from the provided bytes. +func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection []byte) error { + metadata, owners, keyOffsets, valueOffsets, err := decodeHeader(header) + if err != nil { + return err + } + h.meta = metadata + h.owners = owners + h.nodeList = make(map[common.Hash][]string) + h.nodes = make(map[common.Hash]map[string][]byte) + + for i := 0; i < len(owners); i++ { + // Resolve the boundary of key section + keyStart := keyOffsets[i] + keyLimit := len(keySection) + if i != len(owners)-1 { + keyLimit = int(keyOffsets[i+1]) + } + if int(keyStart) > len(keySection) || keyLimit > len(keySection) { + return fmt.Errorf("invalid key offsets: keyStart: %d, keyLimit: %d, size: %d", keyStart, keyLimit, len(keySection)) + } + + // Resolve the boundary of value section + valStart := valueOffsets[i] + valLimit := len(valueSection) + if i != len(owners)-1 { + valLimit = int(valueOffsets[i+1]) + } + if int(valStart) > len(valueSection) || valLimit > len(valueSection) { + return fmt.Errorf("invalid value offsets: valueStart: %d, valueLimit: %d, size: %d", valStart, valLimit, len(valueSection)) + } + + // Decode the key and values for this specific trie + paths, nodes, err := decodeSingleWithValue(keySection[keyStart:keyLimit], valueSection[valStart:valLimit]) + if err != nil { + return err + } + h.nodeList[owners[i]] = paths + h.nodes[owners[i]] = nodes + } + return nil +} + +type iRange struct { + start uint32 + limit uint32 +} + +// singleTrienodeHistoryReader provides read access to a single trie within the +// trienode history. It stores an offset to the trie's position in the history, +// along with a set of per-node offsets that can be resolved on demand. +type singleTrienodeHistoryReader struct { + id uint64 + reader ethdb.AncientReader + valueRange iRange // value range within the total value section + valueInternalOffsets map[string]iRange // value offset within the single trie data +} + +func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) { + // TODO(rjl493456442) partial freezer read should be supported + keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id) + if err != nil { + return nil, err + } + keyStart := int(keyRange.start) + keyLimit := int(keyRange.limit) + if keyLimit == math.MaxUint32 { + keyLimit = len(keyData) + } + if len(keyData) < keyStart || len(keyData) < keyLimit { + return nil, fmt.Errorf("key section too short, start: %d, limit: %d, size: %d", keyStart, keyLimit, len(keyData)) + } + + valueOffsets := make(map[string]iRange) + _, err = decodeSingle(keyData[keyStart:keyLimit], func(key []byte, start int, limit int) error { + valueOffsets[string(key)] = iRange{ + start: uint32(start), + limit: uint32(limit), + } + return nil + }) + if err != nil { + return nil, err + } + return &singleTrienodeHistoryReader{ + id: id, + reader: reader, + valueRange: valueRange, + valueInternalOffsets: valueOffsets, + }, nil +} + +// read retrieves the trie node data with the provided node path. +func (sr *singleTrienodeHistoryReader) read(path string) ([]byte, error) { + offset, exists := sr.valueInternalOffsets[path] + if !exists { + return nil, fmt.Errorf("trienode %v not found", []byte(path)) + } + // TODO(rjl493456442) partial freezer read should be supported + valueData, err := rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id) + if err != nil { + return nil, err + } + if len(valueData) < int(sr.valueRange.start) { + return nil, fmt.Errorf("value section too short, start: %d, size: %d", sr.valueRange.start, len(valueData)) + } + entryStart := sr.valueRange.start + offset.start + entryLimit := sr.valueRange.start + offset.limit + if len(valueData) < int(entryStart) || len(valueData) < int(entryLimit) { + return nil, fmt.Errorf("value section too short, start: %d, limit: %d, size: %d", entryStart, entryLimit, len(valueData)) + } + return valueData[int(entryStart):int(entryLimit)], nil +} + +// trienodeHistoryReader provides read access to node data in the trie node history. +// It resolves data from the underlying ancient store only when needed, minimizing +// I/O overhead. +type trienodeHistoryReader struct { + id uint64 // ID of the associated trienode history + reader ethdb.AncientReader // Database reader of ancient store + keyRanges map[common.Hash]iRange // Key ranges identifying trie chunks + valRanges map[common.Hash]iRange // Value ranges identifying trie chunks + iReaders map[common.Hash]*singleTrienodeHistoryReader // readers for each individual trie chunk +} + +// newTrienodeHistoryReader constructs the reader for specific trienode history. +func newTrienodeHistoryReader(id uint64, reader ethdb.AncientReader) (*trienodeHistoryReader, error) { + r := &trienodeHistoryReader{ + id: id, + reader: reader, + keyRanges: make(map[common.Hash]iRange), + valRanges: make(map[common.Hash]iRange), + iReaders: make(map[common.Hash]*singleTrienodeHistoryReader), + } + if err := r.decodeHeader(); err != nil { + return nil, err + } + return r, nil +} + +// decodeHeader decodes the header section of trienode history. +func (r *trienodeHistoryReader) decodeHeader() error { + header, err := rawdb.ReadTrienodeHistoryHeader(r.reader, r.id) + if err != nil { + return err + } + _, owners, keyOffsets, valOffsets, err := decodeHeader(header) + if err != nil { + return err + } + for i, owner := range owners { + // Decode the key range for this trie chunk + var keyLimit uint32 + if i == len(owners)-1 { + keyLimit = math.MaxUint32 + } else { + keyLimit = keyOffsets[i+1] + } + r.keyRanges[owner] = iRange{ + start: keyOffsets[i], + limit: keyLimit, + } + + // Decode the value range for this trie chunk + var valLimit uint32 + if i == len(owners)-1 { + valLimit = math.MaxUint32 + } else { + valLimit = valOffsets[i+1] + } + r.valRanges[owner] = iRange{ + start: valOffsets[i], + limit: valLimit, + } + } + return nil +} + +// read retrieves the trie node data with the provided TrieID and node path. +func (r *trienodeHistoryReader) read(owner common.Hash, path string) ([]byte, error) { + ir, ok := r.iReaders[owner] + if !ok { + keyRange, exists := r.keyRanges[owner] + if !exists { + return nil, fmt.Errorf("trie %x is unknown", owner) + } + valRange, exists := r.valRanges[owner] + if !exists { + return nil, fmt.Errorf("trie %x is unknown", owner) + } + var err error + ir, err = newSingleTrienodeHistoryReader(r.id, r.reader, keyRange, valRange) + if err != nil { + return nil, err + } + r.iReaders[owner] = ir + } + return ir.read(path) +} + +// writeTrienodeHistory persists the trienode history associated with the given diff layer. +// nolint:unused +func writeTrienodeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { + start := time.Now() + h := newTrienodeHistory(dl.rootHash(), dl.parent.rootHash(), dl.block, dl.nodes.nodeOrigin) + header, keySection, valueSection, err := h.encode() + if err != nil { + return err + } + // Write history data into five freezer table respectively. + if err := rawdb.WriteTrienodeHistory(writer, dl.stateID(), header, keySection, valueSection); err != nil { + return err + } + trienodeHistoryDataBytesMeter.Mark(int64(len(valueSection))) + trienodeHistoryIndexBytesMeter.Mark(int64(len(header) + len(keySection))) + trienodeHistoryBuildTimeMeter.UpdateSince(start) + + log.Debug( + "Stored trienode history", "id", dl.stateID(), "block", dl.block, + "header", common.StorageSize(len(header)), + "keySection", common.StorageSize(len(keySection)), + "valueSection", common.StorageSize(len(valueSection)), + "elapsed", common.PrettyDuration(time.Since(start)), + ) + return nil +} + +// readTrienodeMetadata resolves the metadata of the specified trienode history. +// nolint:unused +func readTrienodeMetadata(reader ethdb.AncientReader, id uint64) (*trienodeMetadata, error) { + header, err := rawdb.ReadTrienodeHistoryHeader(reader, id) + if err != nil { + return nil, err + } + metadata, _, _, _, err := decodeHeader(header) + if err != nil { + return nil, err + } + return metadata, nil +} + +// readTrienodeHistory resolves a single trienode history object with specific id. +func readTrienodeHistory(reader ethdb.AncientReader, id uint64) (*trienodeHistory, error) { + header, keySection, valueSection, err := rawdb.ReadTrienodeHistory(reader, id) + if err != nil { + return nil, err + } + var h trienodeHistory + if err := h.decode(header, keySection, valueSection); err != nil { + return nil, err + } + return &h, nil +} + +// readTrienodeHistories resolves a list of trienode histories with the specific range. +func readTrienodeHistories(reader ethdb.AncientReader, start uint64, count uint64) ([]history, error) { + headers, keySections, valueSections, err := rawdb.ReadTrienodeHistoryList(reader, start, count) + if err != nil { + return nil, err + } + var res []history + for i, header := range headers { + var h trienodeHistory + if err := h.decode(header, keySections[i], valueSections[i]); err != nil { + return nil, err + } + res = append(res, &h) + } + return res, nil +} diff --git a/triedb/pathdb/history_trienode_test.go b/triedb/pathdb/history_trienode_test.go new file mode 100644 index 0000000000..d6b80f61f5 --- /dev/null +++ b/triedb/pathdb/history_trienode_test.go @@ -0,0 +1,736 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "encoding/binary" + "math/rand" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/testrand" +) + +// randomTrienodes generates a random trienode set. +func randomTrienodes(n int) (map[common.Hash]map[string][]byte, common.Hash) { + var ( + root common.Hash + nodes = make(map[common.Hash]map[string][]byte) + ) + for i := 0; i < n; i++ { + owner := testrand.Hash() + if i == 0 { + owner = common.Hash{} + } + nodes[owner] = make(map[string][]byte) + + for j := 0; j < 10; j++ { + path := testrand.Bytes(rand.Intn(10)) + for z := 0; z < len(path); z++ { + nodes[owner][string(path[:z])] = testrand.Bytes(rand.Intn(128)) + } + } + // zero-size trie node, representing it was non-existent before + for j := 0; j < 10; j++ { + path := testrand.Bytes(32) + nodes[owner][string(path)] = nil + } + // root node with zero-size path + rnode := testrand.Bytes(256) + nodes[owner][""] = rnode + if owner == (common.Hash{}) { + root = crypto.Keccak256Hash(rnode) + } + } + return nodes, root +} + +func makeTrienodeHistory() *trienodeHistory { + nodes, root := randomTrienodes(10) + return newTrienodeHistory(root, common.Hash{}, 1, nodes) +} + +func makeTrienodeHistories(n int) []*trienodeHistory { + var ( + parent common.Hash + result []*trienodeHistory + ) + for i := 0; i < n; i++ { + nodes, root := randomTrienodes(10) + result = append(result, newTrienodeHistory(root, parent, uint64(i+1), nodes)) + parent = root + } + return result +} + +func TestEncodeDecodeTrienodeHistory(t *testing.T) { + var ( + dec trienodeHistory + obj = makeTrienodeHistory() + ) + header, keySection, valueSection, err := obj.encode() + if err != nil { + t.Fatalf("Failed to encode trienode history: %v", err) + } + if err := dec.decode(header, keySection, valueSection); err != nil { + t.Fatalf("Failed to decode trienode history: %v", err) + } + + if !reflect.DeepEqual(obj.meta, dec.meta) { + t.Fatal("trienode metadata is mismatched") + } + if !compareList(dec.owners, obj.owners) { + t.Fatal("trie owner list is mismatched") + } + if !compareMapList(dec.nodeList, obj.nodeList) { + t.Fatal("trienode list is mismatched") + } + if !compareMapSet(dec.nodes, obj.nodes) { + t.Fatal("trienode content is mismatched") + } + + // Re-encode again, ensuring the encoded blob still match + header2, keySection2, valueSection2, err := dec.encode() + if err != nil { + t.Fatalf("Failed to encode trienode history: %v", err) + } + if !bytes.Equal(header, header2) { + t.Fatal("re-encoded header is mismatched") + } + if !bytes.Equal(keySection, keySection2) { + t.Fatal("re-encoded key section is mismatched") + } + if !bytes.Equal(valueSection, valueSection2) { + t.Fatal("re-encoded value section is mismatched") + } +} + +func TestTrienodeHistoryReader(t *testing.T) { + var ( + hs = makeTrienodeHistories(10) + freezer, _ = rawdb.NewTrienodeFreezer(t.TempDir(), false, false) + ) + defer freezer.Close() + + for i, h := range hs { + header, keySection, valueSection, _ := h.encode() + if err := rawdb.WriteTrienodeHistory(freezer, uint64(i+1), header, keySection, valueSection); err != nil { + t.Fatalf("Failed to write trienode history: %v", err) + } + } + for i, h := range hs { + tr, err := newTrienodeHistoryReader(uint64(i+1), freezer) + if err != nil { + t.Fatalf("Failed to construct the history reader: %v", err) + } + for _, owner := range h.owners { + nodes := h.nodes[owner] + for key, value := range nodes { + blob, err := tr.read(owner, key) + if err != nil { + t.Fatalf("Failed to read trienode history: %v", err) + } + if !bytes.Equal(blob, value) { + t.Fatalf("Unexpected trie node data, want: %v, got: %v", value, blob) + } + } + } + } + for i, h := range hs { + metadata, err := readTrienodeMetadata(freezer, uint64(i+1)) + if err != nil { + t.Fatalf("Failed to read trienode history metadata: %v", err) + } + if !reflect.DeepEqual(h.meta, metadata) { + t.Fatalf("Unexpected trienode metadata, want: %v, got: %v", h.meta, metadata) + } + } +} + +// TestEmptyTrienodeHistory tests encoding/decoding of empty trienode history +func TestEmptyTrienodeHistory(t *testing.T) { + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, make(map[common.Hash]map[string][]byte)) + + // Test encoding empty history + header, keySection, valueSection, err := h.encode() + if err != nil { + t.Fatalf("Failed to encode empty trienode history: %v", err) + } + + // Verify sections are minimal but valid + if len(header) == 0 { + t.Fatal("Header should not be empty") + } + if len(keySection) != 0 { + t.Fatal("Key section should be empty for empty history") + } + if len(valueSection) != 0 { + t.Fatal("Value section should be empty for empty history") + } + + // Test decoding empty history + var decoded trienodeHistory + if err := decoded.decode(header, keySection, valueSection); err != nil { + t.Fatalf("Failed to decode empty trienode history: %v", err) + } + + if len(decoded.owners) != 0 { + t.Fatal("Decoded history should have no owners") + } + if len(decoded.nodeList) != 0 { + t.Fatal("Decoded history should have no node lists") + } + if len(decoded.nodes) != 0 { + t.Fatal("Decoded history should have no nodes") + } +} + +// TestSingleTrieHistory tests encoding/decoding of history with single trie +func TestSingleTrieHistory(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + owner := testrand.Hash() + nodes[owner] = make(map[string][]byte) + + // Add some nodes with various sizes + nodes[owner][""] = testrand.Bytes(32) // empty key + nodes[owner]["a"] = testrand.Bytes(1) // small value + nodes[owner]["bb"] = testrand.Bytes(100) // medium value + nodes[owner]["ccc"] = testrand.Bytes(1000) // large value + nodes[owner]["dddd"] = testrand.Bytes(0) // empty value + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + testEncodeDecode(t, h) +} + +// TestMultipleTries tests multiple tries with different node counts +func TestMultipleTries(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + + // First trie with many small nodes + owner1 := testrand.Hash() + nodes[owner1] = make(map[string][]byte) + for i := 0; i < 100; i++ { + key := string(testrand.Bytes(rand.Intn(10))) + nodes[owner1][key] = testrand.Bytes(rand.Intn(50)) + } + + // Second trie with few large nodes + owner2 := testrand.Hash() + nodes[owner2] = make(map[string][]byte) + for i := 0; i < 5; i++ { + key := string(testrand.Bytes(rand.Intn(20))) + nodes[owner2][key] = testrand.Bytes(1000 + rand.Intn(1000)) + } + + // Third trie with nil values (zero-size nodes) + owner3 := testrand.Hash() + nodes[owner3] = make(map[string][]byte) + for i := 0; i < 10; i++ { + key := string(testrand.Bytes(rand.Intn(15))) + nodes[owner3][key] = nil + } + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + testEncodeDecode(t, h) +} + +// TestLargeNodeValues tests encoding/decoding with very large node values +func TestLargeNodeValues(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + owner := testrand.Hash() + nodes[owner] = make(map[string][]byte) + + // Test with progressively larger values + sizes := []int{1024, 10 * 1024, 100 * 1024, 1024 * 1024} // 1KB, 10KB, 100KB, 1MB + for _, size := range sizes { + key := string(testrand.Bytes(10)) + nodes[owner][key] = testrand.Bytes(size) + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + testEncodeDecode(t, h) + t.Logf("Successfully tested encoding/decoding with %dKB value", size/1024) + } +} + +// TestNilNodeValues tests encoding/decoding with nil (zero-length) node values +func TestNilNodeValues(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + owner := testrand.Hash() + nodes[owner] = make(map[string][]byte) + + // Mix of nil and non-nil values + nodes[owner]["nil"] = nil + nodes[owner]["data1"] = []byte("some data") + nodes[owner]["data2"] = []byte("more data") + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + testEncodeDecode(t, h) + + // Verify nil values are preserved + _, ok := h.nodes[owner]["nil"] + if !ok { + t.Fatal("Nil value should be preserved") + } +} + +// TestCorruptedHeader tests error handling for corrupted header data +func TestCorruptedHeader(t *testing.T) { + h := makeTrienodeHistory() + header, keySection, valueSection, _ := h.encode() + + // Test corrupted version + corruptedHeader := make([]byte, len(header)) + copy(corruptedHeader, header) + corruptedHeader[0] = 0xFF // Invalid version + + var decoded trienodeHistory + if err := decoded.decode(corruptedHeader, keySection, valueSection); err == nil { + t.Fatal("Expected error for corrupted version") + } + + // Test truncated header + truncatedHeader := header[:len(header)-5] + if err := decoded.decode(truncatedHeader, keySection, valueSection); err == nil { + t.Fatal("Expected error for truncated header") + } + + // Test header with invalid trie header size + invalidHeader := make([]byte, len(header)) + copy(invalidHeader, header) + invalidHeader = invalidHeader[:trienodeMetadataSize+5] // Not divisible by trie header size + + if err := decoded.decode(invalidHeader, keySection, valueSection); err == nil { + t.Fatal("Expected error for invalid header size") + } +} + +// TestCorruptedKeySection tests error handling for corrupted key section data +func TestCorruptedKeySection(t *testing.T) { + h := makeTrienodeHistory() + header, keySection, valueSection, _ := h.encode() + + // Test empty key section when header indicates data + if len(keySection) > 0 { + var decoded trienodeHistory + if err := decoded.decode(header, []byte{}, valueSection); err == nil { + t.Fatal("Expected error for empty key section with non-empty header") + } + } + + // Test truncated key section + if len(keySection) > 10 { + truncatedKeySection := keySection[:len(keySection)-10] + var decoded trienodeHistory + if err := decoded.decode(header, truncatedKeySection, valueSection); err == nil { + t.Fatal("Expected error for truncated key section") + } + } + + // Test corrupted key section with invalid varint + corruptedKeySection := make([]byte, len(keySection)) + copy(corruptedKeySection, keySection) + if len(corruptedKeySection) > 5 { + corruptedKeySection[5] = 0xFF // Corrupt varint encoding + var decoded trienodeHistory + if err := decoded.decode(header, corruptedKeySection, valueSection); err == nil { + t.Fatal("Expected error for corrupted varint in key section") + } + } +} + +// TestCorruptedValueSection tests error handling for corrupted value section data +func TestCorruptedValueSection(t *testing.T) { + h := makeTrienodeHistory() + header, keySection, valueSection, _ := h.encode() + + // Test truncated value section + if len(valueSection) > 10 { + truncatedValueSection := valueSection[:len(valueSection)-10] + var decoded trienodeHistory + if err := decoded.decode(header, keySection, truncatedValueSection); err == nil { + t.Fatal("Expected error for truncated value section") + } + } + + // Test empty value section when key section indicates data exists + if len(valueSection) > 0 { + var decoded trienodeHistory + if err := decoded.decode(header, keySection, []byte{}); err == nil { + t.Fatal("Expected error for empty value section with non-empty key section") + } + } +} + +// TestInvalidOffsets tests error handling for invalid offsets in encoded data +func TestInvalidOffsets(t *testing.T) { + h := makeTrienodeHistory() + header, keySection, valueSection, _ := h.encode() + + // Corrupt key offset in header (make it larger than key section) + corruptedHeader := make([]byte, len(header)) + copy(corruptedHeader, header) + corruptedHeader[trienodeMetadataSize+common.HashLength] = 0xff + + var dec1 trienodeHistory + if err := dec1.decode(corruptedHeader, keySection, valueSection); err == nil { + t.Fatal("Expected error for invalid key offset") + } + + // Corrupt value offset in header (make it larger than value section) + corruptedHeader = make([]byte, len(header)) + copy(corruptedHeader, header) + corruptedHeader[trienodeMetadataSize+common.HashLength+4] = 0xff + + var dec2 trienodeHistory + if err := dec2.decode(corruptedHeader, keySection, valueSection); err == nil { + t.Fatal("Expected error for invalid value offset") + } +} + +// TestTrienodeHistoryReaderNonExistentPath tests reading non-existent paths +func TestTrienodeHistoryReaderNonExistentPath(t *testing.T) { + var ( + h = makeTrienodeHistory() + freezer, _ = rawdb.NewTrienodeFreezer(t.TempDir(), false, false) + ) + defer freezer.Close() + + header, keySection, valueSection, _ := h.encode() + if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { + t.Fatalf("Failed to write trienode history: %v", err) + } + + tr, err := newTrienodeHistoryReader(1, freezer) + if err != nil { + t.Fatalf("Failed to construct history reader: %v", err) + } + + // Try to read a non-existent path + _, err = tr.read(testrand.Hash(), "nonexistent") + if err == nil { + t.Fatal("Expected error for non-existent trie owner") + } + + // Try to read from existing owner but non-existent path + owner := h.owners[0] + _, err = tr.read(owner, "nonexistent-path") + if err == nil { + t.Fatal("Expected error for non-existent path") + } +} + +// TestTrienodeHistoryReaderNilValues tests reading nil (zero-length) values +func TestTrienodeHistoryReaderNilValues(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + owner := testrand.Hash() + nodes[owner] = make(map[string][]byte) + + // Add some nil values + nodes[owner]["nil1"] = nil + nodes[owner]["nil2"] = nil + nodes[owner]["data1"] = []byte("some data") + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + + var freezer, _ = rawdb.NewTrienodeFreezer(t.TempDir(), false, false) + defer freezer.Close() + + header, keySection, valueSection, _ := h.encode() + if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { + t.Fatalf("Failed to write trienode history: %v", err) + } + + tr, err := newTrienodeHistoryReader(1, freezer) + if err != nil { + t.Fatalf("Failed to construct history reader: %v", err) + } + + // Test reading nil values + data1, err := tr.read(owner, "nil1") + if err != nil { + t.Fatalf("Failed to read nil value: %v", err) + } + if len(data1) != 0 { + t.Fatal("Expected nil data for nil value") + } + + data2, err := tr.read(owner, "nil2") + if err != nil { + t.Fatalf("Failed to read nil value: %v", err) + } + if len(data2) != 0 { + t.Fatal("Expected nil data for nil value") + } + + // Test reading non-nil value + data3, err := tr.read(owner, "data1") + if err != nil { + t.Fatalf("Failed to read non-nil value: %v", err) + } + if !bytes.Equal(data3, []byte("some data")) { + t.Fatal("Data mismatch for non-nil value") + } +} + +// TestTrienodeHistoryReaderNilKey tests reading nil (zero-length) key +func TestTrienodeHistoryReaderNilKey(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + owner := testrand.Hash() + nodes[owner] = make(map[string][]byte) + + // Add some nil values + nodes[owner][""] = []byte("some data") + nodes[owner]["data1"] = []byte("some data") + + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + + var freezer, _ = rawdb.NewTrienodeFreezer(t.TempDir(), false, false) + defer freezer.Close() + + header, keySection, valueSection, _ := h.encode() + if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { + t.Fatalf("Failed to write trienode history: %v", err) + } + + tr, err := newTrienodeHistoryReader(1, freezer) + if err != nil { + t.Fatalf("Failed to construct history reader: %v", err) + } + + // Test reading nil values + data1, err := tr.read(owner, "") + if err != nil { + t.Fatalf("Failed to read nil value: %v", err) + } + if !bytes.Equal(data1, []byte("some data")) { + t.Fatal("Data mismatch for nil key") + } + + // Test reading non-nil value + data2, err := tr.read(owner, "data1") + if err != nil { + t.Fatalf("Failed to read non-nil value: %v", err) + } + if !bytes.Equal(data2, []byte("some data")) { + t.Fatal("Data mismatch for non-nil key") + } +} + +// TestTrienodeHistoryReaderIterator tests the iterator functionality +func TestTrienodeHistoryReaderIterator(t *testing.T) { + h := makeTrienodeHistory() + + // Count expected entries + expectedCount := 0 + expectedNodes := make(map[stateIdent]bool) + for owner, nodeList := range h.nodeList { + expectedCount += len(nodeList) + for _, node := range nodeList { + expectedNodes[stateIdent{ + typ: typeTrienode, + addressHash: owner, + path: node, + }] = true + } + } + + // Test the iterator + actualCount := 0 + for x := range h.forEach() { + _ = x + actualCount++ + } + if actualCount != expectedCount { + t.Fatalf("Iterator count mismatch: expected %d, got %d", expectedCount, actualCount) + } + + // Test that iterator yields expected state identifiers + seen := make(map[stateIdent]bool) + for ident := range h.forEach() { + if ident.typ != typeTrienode { + t.Fatal("Iterator should only yield trienode history identifiers") + } + key := stateIdent{typ: ident.typ, addressHash: ident.addressHash, path: ident.path} + if seen[key] { + t.Fatal("Iterator yielded duplicate identifier") + } + seen[key] = true + + if !expectedNodes[key] { + t.Fatalf("Unexpected yielded identifier %v", key) + } + } +} + +// TestSharedLen tests the sharedLen helper function +func TestSharedLen(t *testing.T) { + tests := []struct { + a, b []byte + expected int + }{ + // Empty strings + {[]byte(""), []byte(""), 0}, + // One empty string + {[]byte(""), []byte("abc"), 0}, + {[]byte("abc"), []byte(""), 0}, + // No common prefix + {[]byte("abc"), []byte("def"), 0}, + // Partial common prefix + {[]byte("abc"), []byte("abx"), 2}, + {[]byte("prefix"), []byte("pref"), 4}, + // Complete common prefix (shorter first) + {[]byte("ab"), []byte("abcd"), 2}, + // Complete common prefix (longer first) + {[]byte("abcd"), []byte("ab"), 2}, + // Identical strings + {[]byte("identical"), []byte("identical"), 9}, + // Binary data + {[]byte{0x00, 0x01, 0x02}, []byte{0x00, 0x01, 0x03}, 2}, + // Large strings + {bytes.Repeat([]byte("a"), 1000), bytes.Repeat([]byte("a"), 1000), 1000}, + {bytes.Repeat([]byte("a"), 1000), append(bytes.Repeat([]byte("a"), 999), []byte("b")...), 999}, + } + + for i, test := range tests { + result := sharedLen(test.a, test.b) + if result != test.expected { + t.Errorf("Test %d: sharedLen(%q, %q) = %d, expected %d", + i, test.a, test.b, result, test.expected) + } + // Test commutativity + resultReverse := sharedLen(test.b, test.a) + if result != resultReverse { + t.Errorf("Test %d: sharedLen is not commutative: sharedLen(a,b)=%d, sharedLen(b,a)=%d", + i, result, resultReverse) + } + } +} + +// TestDecodeHeaderCorruptedData tests decodeHeader with corrupted data +func TestDecodeHeaderCorruptedData(t *testing.T) { + // Create valid header data first + h := makeTrienodeHistory() + header, _, _, _ := h.encode() + + // Test with empty header + _, _, _, _, err := decodeHeader([]byte{}) + if err == nil { + t.Fatal("Expected error for empty header") + } + + // Test with invalid version + corruptedVersion := make([]byte, len(header)) + copy(corruptedVersion, header) + corruptedVersion[0] = 0xFF + _, _, _, _, err = decodeHeader(corruptedVersion) + if err == nil { + t.Fatal("Expected error for invalid version") + } + + // Test with truncated header (not divisible by trie header size) + truncated := header[:trienodeMetadataSize+5] + _, _, _, _, err = decodeHeader(truncated) + if err == nil { + t.Fatal("Expected error for truncated header") + } + + // Test with unordered trie owners + unordered := make([]byte, len(header)) + copy(unordered, header) + + // Swap two owner hashes to make them unordered + hash1Start := trienodeMetadataSize + hash2Start := trienodeMetadataSize + trienodeTrieHeaderSize + hash1 := unordered[hash1Start : hash1Start+common.HashLength] + hash2 := unordered[hash2Start : hash2Start+common.HashLength] + + // Only swap if they would be out of order + copy(unordered[hash1Start:hash1Start+common.HashLength], hash2) + copy(unordered[hash2Start:hash2Start+common.HashLength], hash1) + + _, _, _, _, err = decodeHeader(unordered) + if err == nil { + t.Fatal("Expected error for unordered trie owners") + } +} + +// TestDecodeSingleCorruptedData tests decodeSingle with corrupted data +func TestDecodeSingleCorruptedData(t *testing.T) { + h := makeTrienodeHistory() + _, keySection, _, _ := h.encode() + + // Test with empty key section + _, err := decodeSingle([]byte{}, nil) + if err == nil { + t.Fatal("Expected error for empty key section") + } + + // Test with key section too small for trailer + if len(keySection) > 0 { + _, err := decodeSingle(keySection[:3], nil) // Less than 4 bytes for trailer + if err == nil { + t.Fatal("Expected error for key section too small for trailer") + } + } + + // Test with corrupted varint in key section + corrupted := make([]byte, len(keySection)) + copy(corrupted, keySection) + corrupted[5] = 0xFF // Corrupt varint + _, err = decodeSingle(corrupted, nil) + if err == nil { + t.Fatal("Expected error for corrupted varint") + } + + // Test with corrupted trailer (invalid restart count) + corrupted = make([]byte, len(keySection)) + copy(corrupted, keySection) + // Set restart count to something too large + binary.BigEndian.PutUint32(corrupted[len(corrupted)-4:], 10000) + _, err = decodeSingle(corrupted, nil) + if err == nil { + t.Fatal("Expected error for invalid restart count") + } +} + +// Helper function to test encode/decode cycle +func testEncodeDecode(t *testing.T, h *trienodeHistory) { + header, keySection, valueSection, err := h.encode() + if err != nil { + t.Fatalf("Failed to encode trienode history: %v", err) + } + + var decoded trienodeHistory + if err := decoded.decode(header, keySection, valueSection); err != nil { + t.Fatalf("Failed to decode trienode history: %v", err) + } + + // Compare the decoded history with original + if !compareList(decoded.owners, h.owners) { + t.Fatal("Trie owner list mismatch") + } + if !compareMapList(decoded.nodeList, h.nodeList) { + t.Fatal("Trienode list mismatch") + } + if !compareMapSet(decoded.nodes, h.nodes) { + t.Fatal("Trienode content mismatch") + } +} diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go index 779f9d813f..31c40053fc 100644 --- a/triedb/pathdb/metrics.go +++ b/triedb/pathdb/metrics.go @@ -69,12 +69,21 @@ var ( gcStorageMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/count", nil) gcStorageBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/bytes", nil) - historyBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/time", nil) - historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil) - historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil) + stateHistoryBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/state/time", nil) + stateHistoryDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/state/bytes/data", nil) + stateHistoryIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/state/bytes/index", nil) - indexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/index/time", nil) - unindexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/unindex/time", nil) + //nolint:unused + trienodeHistoryBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/time", nil) + //nolint:unused + trienodeHistoryDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/trienode/bytes/data", nil) + //nolint:unused + trienodeHistoryIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/trienode/bytes/index", nil) + + stateIndexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/state/index/time", nil) + stateUnindexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/state/unindex/time", nil) + trienodeIndexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/index/time", nil) + trienodeUnindexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/unindex/time", nil) lookupAddLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/add/time", nil) lookupRemoveLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/remove/time", nil) From 659342a52300d9cd8218face5528a91e7434a8fb Mon Sep 17 00:00:00 2001 From: 10gic Date: Fri, 10 Oct 2025 17:47:33 +0800 Subject: [PATCH 026/277] ethclient: add SubscribeTransactionReceipts (#32869) Add `SubscribeTransactionReceipts` for ethclient. This is a complement to https://github.com/ethereum/go-ethereum/pull/32697. --- eth/filters/api.go | 24 ++++++++++++++++++------ ethclient/ethclient.go | 9 +++++++++ interfaces.go | 12 ++++++++++++ 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/eth/filters/api.go b/eth/filters/api.go index 9f9209aea7..a3ed00f33b 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -299,15 +299,27 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc return rpcSub, nil } -// TransactionReceiptsFilter defines criteria for transaction receipts subscription. -// If TransactionHashes is nil or empty, receipts for all transactions included in new blocks will be delivered. -// Otherwise, only receipts for the specified transactions will be delivered. -type TransactionReceiptsFilter struct { - TransactionHashes []common.Hash `json:"transactionHashes,omitempty"` +// TransactionReceiptsQuery defines criteria for transaction receipts subscription. +// Same as ethereum.TransactionReceiptsQuery but with UnmarshalJSON() method. +type TransactionReceiptsQuery ethereum.TransactionReceiptsQuery + +// UnmarshalJSON sets *args fields with given data. +func (args *TransactionReceiptsQuery) UnmarshalJSON(data []byte) error { + type input struct { + TransactionHashes []common.Hash `json:"transactionHashes"` + } + + var raw input + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + args.TransactionHashes = raw.TransactionHashes + return nil } // TransactionReceipts creates a subscription that fires transaction receipts when transactions are included in blocks. -func (api *FilterAPI) TransactionReceipts(ctx context.Context, filter *TransactionReceiptsFilter) (*rpc.Subscription, error) { +func (api *FilterAPI) TransactionReceipts(ctx context.Context, filter *TransactionReceiptsQuery) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 1195929f7d..8b26f5b3ca 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -350,6 +350,15 @@ func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (* return r, err } +// SubscribeTransactionReceipts subscribes to notifications about transaction receipts. +func (ec *Client) SubscribeTransactionReceipts(ctx context.Context, q *ethereum.TransactionReceiptsQuery, ch chan<- []*types.Receipt) (ethereum.Subscription, error) { + sub, err := ec.c.EthSubscribe(ctx, ch, "transactionReceipts", q) + if err != nil { + return nil, err + } + return sub, nil +} + // SyncProgress retrieves the current progress of the sync algorithm. If there's // no sync currently running, it returns nil. func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) { diff --git a/interfaces.go b/interfaces.go index be5b970851..2828af1cc9 100644 --- a/interfaces.go +++ b/interfaces.go @@ -62,6 +62,13 @@ type ChainReader interface { SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (Subscription, error) } +// TransactionReceiptsQuery defines criteria for transaction receipts subscription. +// If TransactionHashes is empty, receipts for all transactions included in new blocks will be delivered. +// Otherwise, only receipts for the specified transactions will be delivered. +type TransactionReceiptsQuery struct { + TransactionHashes []common.Hash +} + // TransactionReader provides access to past transactions and their receipts. // Implementations may impose arbitrary restrictions on the transactions and receipts that // can be retrieved. Historic transactions may not be available. @@ -81,6 +88,11 @@ type TransactionReader interface { // transaction may not be included in the current canonical chain even if a receipt // exists. TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + // SubscribeTransactionReceipts subscribes to notifications about transaction receipts. + // The receipts are delivered in batches when transactions are included in blocks. + // If q is nil or has empty TransactionHashes, all receipts from new blocks will be delivered. + // Otherwise, only receipts for the specified transaction hashes will be delivered. + SubscribeTransactionReceipts(ctx context.Context, q *TransactionReceiptsQuery, ch chan<- []*types.Receipt) (Subscription, error) } // ChainStateReader wraps access to the state trie of the canonical blockchain. Note that From a3aae29845736cbf40c2bedbdee1c3396dd2f88c Mon Sep 17 00:00:00 2001 From: Luke Ma Date: Mon, 13 Oct 2025 15:26:35 +0800 Subject: [PATCH 027/277] node: fix error condition in gzipResponseWriter.init() (#32896) --- node/rpcstack.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/rpcstack.go b/node/rpcstack.go index 655f7db9e4..a1cc832f9f 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -500,7 +500,7 @@ func (w *gzipResponseWriter) init() { hdr := w.resp.Header() length := hdr.Get("content-length") if len(length) > 0 { - if n, err := strconv.ParseUint(length, 10, 64); err != nil { + if n, err := strconv.ParseUint(length, 10, 64); err == nil { w.hasLength = true w.contentLength = n } From 2010781c2998557ccf9883155b6ff4e73d1d64a0 Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 13 Oct 2025 16:39:10 +0800 Subject: [PATCH 028/277] core/types: optimize MergeBloom by using bitutil (#32882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` goos: darwin goarch: amd64 pkg: github.com/ethereum/go-ethereum/core/types cpu: VirtualApple @ 2.50GHz │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ CreateBloom/small-createbloom-10 1.676µ ± 4% 1.646µ ± 1% -1.76% (p=0.000 n=10) CreateBloom/large-createbloom-10 164.8µ ± 3% 164.3µ ± 0% ~ (p=0.247 n=10) CreateBloom/small-mergebloom-10 231.60n ± 0% 68.00n ± 0% -70.64% (p=0.000 n=10) CreateBloom/large-mergebloom-10 21.803µ ± 3% 5.107µ ± 1% -76.58% (p=0.000 n=10) geomean 6.111µ 3.113µ -49.06% │ old.txt │ new.txt │ │ B/op │ B/op vs base │ CreateBloom/small-createbloom-10 112.0 ± 0% 112.0 ± 0% ~ (p=1.000 n=10) ¹ CreateBloom/large-createbloom-10 10.94Ki ± 0% 10.94Ki ± 0% ~ (p=0.474 n=10) CreateBloom/small-mergebloom-10 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ CreateBloom/large-mergebloom-10 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ geomean ² +0.00% ² ¹ all samples are equal ² summaries must be >0 to compute geomean │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ CreateBloom/small-createbloom-10 6.000 ± 0% 6.000 ± 0% ~ (p=1.000 n=10) ¹ CreateBloom/large-createbloom-10 600.0 ± 0% 600.0 ± 0% ~ (p=1.000 n=10) ¹ CreateBloom/small-mergebloom-10 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ CreateBloom/large-mergebloom-10 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ geomean ² +0.00% ² ¹ all samples are equal ² summaries must be >0 to compute geomean ``` --- core/types/bloom9.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/types/bloom9.go b/core/types/bloom9.go index 5a6e49c220..1d57e8e4bc 100644 --- a/core/types/bloom9.go +++ b/core/types/bloom9.go @@ -21,6 +21,7 @@ import ( "fmt" "math/big" + "github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" ) @@ -125,9 +126,7 @@ func MergeBloom(receipts Receipts) Bloom { for _, receipt := range receipts { if len(receipt.Logs) != 0 { bl := receipt.Bloom.Bytes() - for i := range bin { - bin[i] |= bl[i] - } + bitutil.ORBytes(bin[:], bin[:], bl) } } return bin From 85e9977faecd23909b0373ae4c8268e8bf62b6a3 Mon Sep 17 00:00:00 2001 From: Delweng Date: Mon, 13 Oct 2025 16:40:08 +0800 Subject: [PATCH 029/277] p2p: rm unused var seedMinTableTime (#32876) --- p2p/discover/table.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 6a1c7494ee..e5b2c7c8c5 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -54,9 +54,8 @@ const ( bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 tableIPLimit, tableSubnet = 10, 24 - seedMinTableTime = 5 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour + seedCount = 30 + seedMaxAge = 5 * 24 * time.Hour ) // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps From bc0a21a1d5e69513530cbe781b73c6b116856627 Mon Sep 17 00:00:00 2001 From: Lewis Date: Mon, 13 Oct 2025 20:10:44 +0900 Subject: [PATCH 030/277] eth/filters: uninstall subscription in filter apis on error (#32894) Fix https://github.com/ethereum/go-ethereum/issues/32893. In the previous https://github.com/ethereum/go-ethereum/pull/32794, it only handles the pending tx filter, while there are also head and log filters. This PR applies the patch to all filter APIs and uses `defer` to maintain code consistency. --- eth/filters/api.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth/filters/api.go b/eth/filters/api.go index a3ed00f33b..58baf2c3aa 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -143,6 +143,7 @@ func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { api.filtersMu.Unlock() go func() { + defer pendingTxSub.Unsubscribe() for { select { case pTx := <-pendingTxs: @@ -155,7 +156,6 @@ func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { api.filtersMu.Lock() delete(api.filters, pendingTxSub.ID) api.filtersMu.Unlock() - pendingTxSub.Unsubscribe() return } } @@ -218,6 +218,7 @@ func (api *FilterAPI) NewBlockFilter() rpc.ID { api.filtersMu.Unlock() go func() { + defer headerSub.Unsubscribe() for { select { case h := <-headers: @@ -403,6 +404,7 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { api.filtersMu.Unlock() go func() { + defer logsSub.Unsubscribe() for { select { case l := <-logs: From a7359ceb69fcd183e59f651e34aae873cf03e5a0 Mon Sep 17 00:00:00 2001 From: Delweng Date: Mon, 13 Oct 2025 19:40:03 +0800 Subject: [PATCH 031/277] triedb, core/rawdb: implement the partial read in freezer (#32132) This PR implements the partial read functionalities in the freezer, optimizing the state history reader by resolving less data from freezer. --------- Signed-off-by: jsvisa Co-authored-by: Gary Rong --- core/rawdb/accessors_state.go | 24 +++--------- core/rawdb/chain_freezer.go | 4 ++ core/rawdb/database.go | 6 +++ core/rawdb/freezer.go | 9 +++++ core/rawdb/freezer_memory.go | 25 ++++++++++++ core/rawdb/freezer_resettable.go | 9 +++++ core/rawdb/freezer_table.go | 65 ++++++++++++++++++++++++++++++++ core/rawdb/freezer_table_test.go | 62 ++++++++++++++++++++++++++++++ core/rawdb/table.go | 6 +++ ethdb/database.go | 4 ++ ethdb/remotedb/remotedb.go | 4 ++ triedb/pathdb/history_reader.go | 38 +++++++------------ 12 files changed, 214 insertions(+), 42 deletions(-) diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 298ad04f40..714c1f77d6 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -188,24 +188,16 @@ func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { // data in the concatenated storage data table. Compute the position of state // history in freezer by minus one since the id of first state history starts // from one (zero for initial state). -func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { - blob, err := db.Ancient(stateHistoryStorageIndex, id-1) - if err != nil { - return nil - } - return blob +func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) { + return db.AncientBytes(stateHistoryStorageIndex, id-1, uint64(offset), uint64(length)) } // ReadStateAccountHistory retrieves the concatenated account data blob for the // specified state history. Offsets and lengths are resolved via the account // index. Compute the position of state history in freezer by minus one since // the id of first state history starts from one (zero for initial state). -func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { - blob, err := db.Ancient(stateHistoryAccountData, id-1) - if err != nil { - return nil - } - return blob +func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) { + return db.AncientBytes(stateHistoryAccountData, id-1, uint64(offset), uint64(length)) } // ReadStateStorageHistory retrieves the concatenated storage slot data blob for @@ -213,12 +205,8 @@ func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { // storage indexes. Compute the position of state history in freezer by minus // one since the id of first state history starts from one (zero for initial // state). -func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte { - blob, err := db.Ancient(stateHistoryStorageData, id-1) - if err != nil { - return nil - } - return blob +func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64, offset, length int) ([]byte, error) { + return db.AncientBytes(stateHistoryStorageData, id-1, uint64(offset), uint64(length)) } // ReadStateHistory retrieves the state history from database with provided id. diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index c12f2ab8fe..d33f7ce33d 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -403,6 +403,10 @@ func (f *chainFreezer) AncientRange(kind string, start, count, maxBytes uint64) return f.ancients.AncientRange(kind, start, count, maxBytes) } +func (f *chainFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return f.ancients.AncientBytes(kind, id, offset, length) +} + func (f *chainFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { return f.ancients.ModifyAncients(fn) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 626d390c0d..724c90ead6 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -100,6 +100,12 @@ func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) return nil, errNotSupported } +// AncientBytes retrieves the value segment of the element specified by the id +// and value offsets. +func (db *nofreezedb) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return nil, errNotSupported +} + // Ancients returns an error as we don't have a backing chain freezer. func (db *nofreezedb) Ancients() (uint64, error) { return 0, errNotSupported diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 98ad174ce0..42cd2a7999 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -202,6 +202,15 @@ func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][] return nil, errUnknownTable } +// AncientBytes retrieves the value segment of the element specified by the id +// and value offsets. +func (f *Freezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + if table := f.tables[kind]; table != nil { + return table.RetrieveBytes(id, offset, length) + } + return nil, errUnknownTable +} + // Ancients returns the length of the frozen items. func (f *Freezer) Ancients() (uint64, error) { return f.frozen.Load(), nil diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go index f5621ac4c6..8cb4cc2006 100644 --- a/core/rawdb/freezer_memory.go +++ b/core/rawdb/freezer_memory.go @@ -412,3 +412,28 @@ func (f *MemoryFreezer) Reset() error { func (f *MemoryFreezer) AncientDatadir() (string, error) { return "", nil } + +// AncientBytes retrieves the value segment of the element specified by the id +// and value offsets. +func (f *MemoryFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + table := f.tables[kind] + if table == nil { + return nil, errUnknownTable + } + entries, err := table.retrieve(id, 1, 0) + if err != nil { + return nil, err + } + if len(entries) == 0 { + return nil, errOutOfBounds + } + data := entries[0] + + if offset > uint64(len(data)) || offset+length > uint64(len(data)) { + return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", len(data), offset, length) + } + return data[offset : offset+length], nil +} diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 9db71cfd0e..f531e668c3 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -126,6 +126,15 @@ func (f *resettableFreezer) AncientRange(kind string, start, count, maxBytes uin return f.freezer.AncientRange(kind, start, count, maxBytes) } +// AncientBytes retrieves the value segment of the element specified by the id +// and value offsets. +func (f *resettableFreezer) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.freezer.AncientBytes(kind, id, offset, length) +} + // Ancients returns the length of the frozen items. func (f *resettableFreezer) Ancients() (uint64, error) { f.lock.RLock() diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index d3a29a73c6..01a754c5c8 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -1107,6 +1107,71 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i return output, sizes, nil } +// RetrieveBytes retrieves the value segment of the element specified by the id +// and value offsets. +func (t *freezerTable) RetrieveBytes(item, offset, length uint64) ([]byte, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + if t.index == nil || t.head == nil || t.metadata.file == nil { + return nil, errClosed + } + items, hidden := t.items.Load(), t.itemHidden.Load() + if items <= item || hidden > item { + return nil, errOutOfBounds + } + + // Retrieves the index entries for the specified ID and its immediate successor + indices, err := t.getIndices(item, 1) + if err != nil { + return nil, err + } + index0, index1 := indices[0], indices[1] + + itemStart, itemLimit, fileId := index0.bounds(index1) + itemSize := itemLimit - itemStart + + dataFile, exist := t.files[fileId] + if !exist { + return nil, fmt.Errorf("missing data file %d", fileId) + } + + // Perform the partial read if no-compression was enabled upon + if t.config.noSnappy { + if offset > uint64(itemSize) || offset+length > uint64(itemSize) { + return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", itemSize, offset, length) + } + itemStart += uint32(offset) + + buf := make([]byte, length) + _, err = dataFile.ReadAt(buf, int64(itemStart)) + if err != nil { + return nil, err + } + t.readMeter.Mark(int64(length)) + return buf, nil + } else { + // If compressed, read the full item, decompress, then slice. + // Unfortunately, in this case, there is no performance gain + // by performing the partial read at all. + buf := make([]byte, itemSize) + _, err = dataFile.ReadAt(buf, int64(itemStart)) + if err != nil { + return nil, err + } + t.readMeter.Mark(int64(itemSize)) + + data, err := snappy.Decode(nil, buf) + if err != nil { + return nil, err + } + if offset > uint64(len(data)) || offset+length > uint64(len(data)) { + return nil, fmt.Errorf("requested range out of bounds: item size %d, offset %d, length %d", len(data), offset, length) + } + return data[offset : offset+length], nil + } +} + // size returns the total data size in the freezer table. func (t *freezerTable) size() (uint64, error) { t.lock.RLock() diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go index 96edac7e4a..fc21ea6c63 100644 --- a/core/rawdb/freezer_table_test.go +++ b/core/rawdb/freezer_table_test.go @@ -1571,3 +1571,65 @@ func TestTailTruncationCrash(t *testing.T) { t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset) } } + +func TestFreezerAncientBytes(t *testing.T) { + t.Parallel() + types := []struct { + name string + config freezerTableConfig + }{ + {"uncompressed", freezerTableConfig{noSnappy: true}}, + {"compressed", freezerTableConfig{noSnappy: false}}, + } + for _, typ := range types { + t.Run(typ.name, func(t *testing.T) { + f, err := newTable(os.TempDir(), fmt.Sprintf("ancientbytes-%s-%d", typ.name, rand.Uint64()), metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 1000, typ.config, false) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + for i := 0; i < 10; i++ { + data := getChunk(100, i) + batch := f.newBatch() + require.NoError(t, batch.AppendRaw(uint64(i), data)) + require.NoError(t, batch.commit()) + } + + for i := 0; i < 10; i++ { + full, err := f.Retrieve(uint64(i)) + require.NoError(t, err) + + // Full read + got, err := f.RetrieveBytes(uint64(i), 0, uint64(len(full))) + require.NoError(t, err) + if !bytes.Equal(got, full) { + t.Fatalf("full read mismatch for entry %d", i) + } + // Empty read + got, err = f.RetrieveBytes(uint64(i), 0, 0) + require.NoError(t, err) + if !bytes.Equal(got, full[:0]) { + t.Fatalf("empty read mismatch for entry %d", i) + } + // Middle slice + got, err = f.RetrieveBytes(uint64(i), 10, 50) + require.NoError(t, err) + if !bytes.Equal(got, full[10:60]) { + t.Fatalf("middle slice mismatch for entry %d", i) + } + // Single byte + got, err = f.RetrieveBytes(uint64(i), 99, 1) + require.NoError(t, err) + if !bytes.Equal(got, full[99:100]) { + t.Fatalf("single byte mismatch for entry %d", i) + } + // Out of bounds + _, err = f.RetrieveBytes(uint64(i), 100, 1) + if err == nil { + t.Fatalf("expected error for out-of-bounds read for entry %d", i) + } + } + }) + } +} diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 45c8aecf0c..d38afdaa35 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -62,6 +62,12 @@ func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]by return t.db.AncientRange(kind, start, count, maxBytes) } +// AncientBytes is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + return t.db.AncientBytes(kind, id, offset, length) +} + // Ancients is a noop passthrough that just forwards the request to the underlying // database. func (t *table) Ancients() (uint64, error) { diff --git a/ethdb/database.go b/ethdb/database.go index e665a84a61..534fcad4fc 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -121,6 +121,10 @@ type AncientReaderOp interface { // - if maxBytes is not specified, 'count' items will be returned if they are present AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) + // AncientBytes retrieves the value segment of the element specified by the id + // and value offsets. + AncientBytes(kind string, id, offset, length uint64) ([]byte, error) + // Ancients returns the ancient item numbers in the ancient store. Ancients() (uint64, error) diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go index 7fe154ea95..0d0d854fe4 100644 --- a/ethdb/remotedb/remotedb.go +++ b/ethdb/remotedb/remotedb.go @@ -140,6 +140,10 @@ func (db *Database) Close() error { return nil } +func (db *Database) AncientBytes(kind string, id, offset, length uint64) ([]byte, error) { + panic("not supported") +} + func New(client *rpc.Client) ethdb.Database { if client == nil { return nil diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index ce6aa693d1..1bf4cf648d 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -144,25 +144,17 @@ func (r *historyReader) readAccountMetadata(address common.Address, historyID ui // readStorageMetadata resolves the storage slot metadata within the specified // state history. func (r *historyReader) readStorageMetadata(storageKey common.Hash, storageHash common.Hash, historyID uint64, slotOffset, slotNumber int) ([]byte, error) { - // TODO(rj493456442) optimize it with partial read - blob := rawdb.ReadStateStorageIndex(r.freezer, historyID) - if len(blob) == 0 { - return nil, fmt.Errorf("storage index is truncated, historyID: %d", historyID) + data, err := rawdb.ReadStateStorageIndex(r.freezer, historyID, slotIndexSize*slotOffset, slotIndexSize*slotNumber) + if err != nil { + msg := fmt.Sprintf("id: %d, slot-offset: %d, slot-length: %d", historyID, slotOffset, slotNumber) + return nil, fmt.Errorf("storage indices corrupted, %s, %w", msg, err) } - if len(blob)%slotIndexSize != 0 { - return nil, fmt.Errorf("storage indices is corrupted, historyID: %d, size: %d", historyID, len(blob)) - } - if slotIndexSize*(slotOffset+slotNumber) > len(blob) { - return nil, fmt.Errorf("storage indices is truncated, historyID: %d, size: %d, offset: %d, length: %d", historyID, len(blob), slotOffset, slotNumber) - } - subSlice := blob[slotIndexSize*slotOffset : slotIndexSize*(slotOffset+slotNumber)] - // TODO(rj493456442) get rid of the metadata resolution var ( m meta target common.Hash ) - blob = rawdb.ReadStateHistoryMeta(r.freezer, historyID) + blob := rawdb.ReadStateHistoryMeta(r.freezer, historyID) if err := m.decode(blob); err != nil { return nil, err } @@ -172,17 +164,17 @@ func (r *historyReader) readStorageMetadata(storageKey common.Hash, storageHash target = storageKey } pos := sort.Search(slotNumber, func(i int) bool { - slotID := subSlice[slotIndexSize*i : slotIndexSize*i+common.HashLength] + slotID := data[slotIndexSize*i : slotIndexSize*i+common.HashLength] return bytes.Compare(slotID, target.Bytes()) >= 0 }) if pos == slotNumber { return nil, fmt.Errorf("storage metadata is not found, slot key: %#x, historyID: %d", storageKey, historyID) } offset := slotIndexSize * pos - if target != common.BytesToHash(subSlice[offset:offset+common.HashLength]) { + if target != common.BytesToHash(data[offset:offset+common.HashLength]) { return nil, fmt.Errorf("storage metadata is not found, slot key: %#x, historyID: %d", storageKey, historyID) } - return subSlice[offset : slotIndexSize*(pos+1)], nil + return data[offset : slotIndexSize*(pos+1)], nil } // readAccount retrieves the account data from the specified state history. @@ -194,12 +186,11 @@ func (r *historyReader) readAccount(address common.Address, historyID uint64) ([ length := int(metadata[common.AddressLength]) // one byte for account data length offset := int(binary.BigEndian.Uint32(metadata[common.AddressLength+1 : common.AddressLength+5])) // four bytes for the account data offset - // TODO(rj493456442) optimize it with partial read - data := rawdb.ReadStateAccountHistory(r.freezer, historyID) - if len(data) < length+offset { + data, err := rawdb.ReadStateAccountHistory(r.freezer, historyID, offset, length) + if err != nil { return nil, fmt.Errorf("account data is truncated, address: %#x, historyID: %d, size: %d, offset: %d, len: %d", address, historyID, len(data), offset, length) } - return data[offset : offset+length], nil + return data, nil } // readStorage retrieves the storage slot data from the specified state history. @@ -222,12 +213,11 @@ func (r *historyReader) readStorage(address common.Address, storageKey common.Ha length := int(slotMetadata[common.HashLength]) // one byte for slot data length offset := int(binary.BigEndian.Uint32(slotMetadata[common.HashLength+1 : common.HashLength+5])) // four bytes for slot data offset - // TODO(rj493456442) optimize it with partial read - data := rawdb.ReadStateStorageHistory(r.freezer, historyID) - if len(data) < offset+length { + data, err := rawdb.ReadStateStorageHistory(r.freezer, historyID, offset, length) + if err != nil { return nil, fmt.Errorf("storage data is truncated, address: %#x, key: %#x, historyID: %d, size: %d, offset: %d, len: %d", address, storageKey, historyID, len(data), offset, length) } - return data[offset : offset+length], nil + return data, nil } // read retrieves the state element data associated with the stateID. From 5c6ba6b40042bf6fdf7a36d2620b658d041bd3ee Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 13 Oct 2025 20:00:43 +0800 Subject: [PATCH 032/277] p2p/enode: optimize LogDist (#32887) This speeds up LogDist by 75% using 64-bit operations instead of byte-wise XOR. --------- Co-authored-by: Felix Lange --- p2p/enode/node.go | 11 +++++++---- p2p/enode/node_test.go | 22 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/p2p/enode/node.go b/p2p/enode/node.go index d6f2ac7ff5..8198050353 100644 --- a/p2p/enode/node.go +++ b/p2p/enode/node.go @@ -19,6 +19,7 @@ package enode import ( "crypto/ecdsa" "encoding/base64" + "encoding/binary" "encoding/hex" "errors" "fmt" @@ -373,12 +374,14 @@ func DistCmp(target, a, b ID) int { // LogDist returns the logarithmic distance between a and b, log2(a ^ b). func LogDist(a, b ID) int { lz := 0 - for i := range a { - x := a[i] ^ b[i] + for i := 0; i < len(a); i += 8 { + ai := binary.BigEndian.Uint64(a[i : i+8]) + bi := binary.BigEndian.Uint64(b[i : i+8]) + x := ai ^ bi if x == 0 { - lz += 8 + lz += 64 } else { - lz += bits.LeadingZeros8(x) + lz += bits.LeadingZeros64(x) break } } diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go index e9fe631f34..f276af6638 100644 --- a/p2p/enode/node_test.go +++ b/p2p/enode/node_test.go @@ -378,6 +378,28 @@ func TestID_logdist(t *testing.T) { } } +func makeIDs() (ID, ID) { + var a, b ID + size := len(a) + // last byte differs + for i := 0; i < size-1; i++ { + a[i] = 0xAA + b[i] = 0xAA + } + a[size-1] = 0xAA + b[size-1] = 0xAB + return a, b +} + +// Benchmark LogDist +func BenchmarkLogDist(b *testing.B) { + aID, bID := makeIDs() // 256-bit ID + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = LogDist(aID, bID) + } +} + // The random tests is likely to miss the case where a and b are equal, // this test checks it explicitly. func TestID_logdistEqual(t *testing.T) { From b87581f2977393c842123d18c6e29e268f9b26cb Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 13 Oct 2025 22:16:07 +0800 Subject: [PATCH 033/277] p2p/enode: optimize DistCmp (#32888) This speeds up DistCmp by 75% through using 64-bit operations instead of byte-wise XOR. --- p2p/enode/node.go | 7 ++++--- p2p/enode/node_test.go | 10 ++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/p2p/enode/node.go b/p2p/enode/node.go index 8198050353..dafde51d6a 100644 --- a/p2p/enode/node.go +++ b/p2p/enode/node.go @@ -359,9 +359,10 @@ func ParseID(in string) (ID, error) { // Returns -1 if a is closer to target, 1 if b is closer to target // and 0 if they are equal. func DistCmp(target, a, b ID) int { - for i := range target { - da := a[i] ^ target[i] - db := b[i] ^ target[i] + for i := 0; i < len(target); i += 8 { + tn := binary.BigEndian.Uint64(target[i : i+8]) + da := tn ^ binary.BigEndian.Uint64(a[i:i+8]) + db := tn ^ binary.BigEndian.Uint64(b[i:i+8]) if da > db { return 1 } else if da < db { diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go index f276af6638..51bc4ebe15 100644 --- a/p2p/enode/node_test.go +++ b/p2p/enode/node_test.go @@ -368,6 +368,16 @@ func TestID_distcmpEqual(t *testing.T) { } } +func BenchmarkDistCmp(b *testing.B) { + base := ID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + aID := ID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + bID := ID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1} + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = DistCmp(base, aID, bID) + } +} + func TestID_logdist(t *testing.T) { logdistBig := func(a, b ID) int { abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) From 7b693ea17c9e5e950a36df29262fab7862ffda23 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 13 Oct 2025 19:07:36 +0200 Subject: [PATCH 034/277] core/txpool/legacypool: move queue out of main txpool (#32270) This PR move the queue out of the main transaction pool. For now there should be no functional changes. I see this as a first step to refactor the legacypool and make the queue a fully separate concept from the main pending pool. --------- Signed-off-by: Csaba Kiraly Co-authored-by: Csaba Kiraly --- core/txpool/legacypool/legacypool.go | 241 +++++-------------- core/txpool/legacypool/legacypool_test.go | 67 +++--- core/txpool/legacypool/queue.go | 271 ++++++++++++++++++++++ 3 files changed, 370 insertions(+), 209 deletions(-) create mode 100644 core/txpool/legacypool/queue.go diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index e199d21c7a..b36d86dd19 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -23,7 +23,6 @@ import ( "math" "math/big" "slices" - "sort" "sync" "sync/atomic" "time" @@ -238,11 +237,10 @@ type LegacyPool struct { pendingNonces *noncer // Pending state tracking virtual nonces reserver txpool.Reserver // Address reserver to ensure exclusivity across subpools - pending map[common.Address]*list // All currently processable transactions - queue map[common.Address]*list // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *lookup // All transactions to allow lookups - priced *pricedList // All transactions sorted by price + pending map[common.Address]*list // All currently processable transactions + queue *queue + all *lookup // All transactions to allow lookups + priced *pricedList // All transactions sorted by price reqResetCh chan *txpoolResetRequest reqPromoteCh chan *accountSet @@ -266,14 +264,14 @@ func New(config Config, chain BlockChain) *LegacyPool { config = (&config).sanitize() // Create the transaction pool with its initial settings + signer := types.LatestSigner(chain.Config()) pool := &LegacyPool{ config: config, chain: chain, chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), + signer: signer, pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), + queue: newQueue(config, signer), all: newLookup(), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), @@ -369,15 +367,9 @@ func (pool *LegacyPool) loop() { // Handle inactive account transaction eviction case <-evict.C: pool.mu.Lock() - for addr := range pool.queue { - // Any old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTx(tx.Hash(), true, true) - } - queuedEvictionMeter.Mark(int64(len(list))) - } + evicted := pool.queue.evict(false) + for _, hash := range evicted { + pool.removeTx(hash, true, true) } pool.mu.Unlock() } @@ -459,11 +451,7 @@ func (pool *LegacyPool) stats() (int, int) { for _, list := range pool.pending { pending += list.Len() } - queued := 0 - for _, list := range pool.queue { - queued += list.Len() - } - return pending, queued + return pending, pool.queue.stats() } // Content retrieves the data content of the transaction pool, returning all the @@ -476,10 +464,7 @@ func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[ for addr, list := range pool.pending { pending[addr] = list.Flatten() } - queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) - for addr, list := range pool.queue { - queued[addr] = list.Flatten() - } + queued := pool.queue.content() return pending, queued } @@ -493,10 +478,7 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, if list, ok := pool.pending[addr]; ok { pending = list.Flatten() } - var queued []*types.Transaction - if list, ok := pool.queue[addr]; ok { - queued = list.Flatten() - } + queued := pool.queue.contentFrom(addr) return pending, queued } @@ -644,7 +626,7 @@ func (pool *LegacyPool) validateAuth(tx *types.Transaction) error { if pending := pool.pending[auth]; pending != nil { count += pending.Len() } - if queue := pool.queue[auth]; queue != nil { + if queue, ok := pool.queue.get(auth); ok { count += queue.Len() } if count > 1 { @@ -691,7 +673,7 @@ func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) { // only by this subpool until all transactions are evicted var ( _, hasPending = pool.pending[from] - _, hasQueued = pool.queue[from] + _, hasQueued = pool.queue.get(from) ) if !hasPending && !hasQueued { if err := pool.reserver.Hold(from); err != nil { @@ -790,7 +772,7 @@ func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) { log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) // Successful promotion, bump the heartbeat - pool.beats[from] = time.Now() + pool.queue.bump(from) return old != nil, nil } // New transaction isn't replacing a pending one, push into queue @@ -815,7 +797,7 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo } // The transaction has a nonce gap with pending list, it's only considered // as executable if transactions in queue can fill up the nonce gap. - queue, ok := pool.queue[from] + queue, ok := pool.queue.get(from) if !ok { return true } @@ -831,25 +813,12 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo // // Note, this method assumes the pool lock is held! func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) { - // Try to insert the transaction into the future queue - from, _ := types.Sender(pool.signer, tx) // already validated - if pool.queue[from] == nil { - pool.queue[from] = newList(false) + replaced, err := pool.queue.add(hash, tx) + if err != nil { + return false, err } - inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - queuedDiscardMeter.Mark(1) - return false, txpool.ErrReplaceUnderpriced - } - // Discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - queuedReplaceMeter.Mark(1) - } else { - // Nothing was replaced, bump the queued counter - queuedGauge.Inc(1) + if replaced != nil { + pool.removeTx(*replaced, true, true) } // If the transaction isn't in lookup set but it's expected to be there, // show the error log. @@ -860,11 +829,7 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAl pool.all.Add(tx) pool.priced.Put(tx) } - // If we never record the heartbeat, do it right now. - if _, exist := pool.beats[from]; !exist { - pool.beats[from] = time.Now() - } - return old != nil, nil + return replaced != nil, nil } // promoteTx adds a transaction to the pending (processable) list of transactions @@ -899,7 +864,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ pool.pendingNonces.set(addr, tx.Nonce()+1) // Successful promotion, bump the heartbeat - pool.beats[addr] = time.Now() + pool.queue.bump(addr) return true } @@ -1019,7 +984,7 @@ func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus { if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { return txpool.TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + } else if txList, ok := pool.queue.get(from); ok && txList.txs.items[tx.Nonce()] != nil { return txpool.TxStatusQueued } return txpool.TxStatusUnknown @@ -1096,7 +1061,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo defer func() { var ( _, hasPending = pool.pending[addr] - _, hasQueued = pool.queue[addr] + _, hasQueued = pool.queue.get(addr) ) if !hasPending && !hasQueued { pool.reserver.Release(addr) @@ -1128,16 +1093,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo } } // Transaction is in the future queue - if future := pool.queue[addr]; future != nil { - if removed, _ := future.Remove(tx); removed { - // Reduce the queued counter - queuedGauge.Dec(1) - } - if future.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } + pool.queue.removeTx(addr, tx) return 0 } @@ -1285,10 +1241,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, } } // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) - } + promoteAddrs = append(promoteAddrs, pool.queue.addresses()...) } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) @@ -1442,62 +1395,32 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction - - // Iterate over all accounts and promote any executable transactions gasLimit := pool.currentHead.Load().GasLimit - for _, addr := range accounts { - list := pool.queue[addr] - if list == nil { - continue // Just in case someone calls with a non existing account - } - // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) - for _, tx := range forwards { - pool.all.Remove(tx.Hash()) - } - log.Trace("Removed old queued transactions", "count", len(forwards)) - // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) - for _, tx := range drops { - pool.all.Remove(tx.Hash()) - } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Mark(int64(len(drops))) + promotable, dropped, removedAddresses := pool.queue.promoteExecutables(accounts, gasLimit, pool.currentState, pool.pendingNonces) + promoted := make([]*types.Transaction, 0, len(promotable)) - // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) - for _, tx := range readies { - hash := tx.Hash() - if pool.promoteTx(addr, hash, tx) { - promoted = append(promoted, tx) - } - } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Dec(int64(len(readies))) - - // Drop all transactions over the allowed limit - var caps = list.Cap(int(pool.config.AccountQueue)) - for _, tx := range caps { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) - } - queuedRateLimitMeter.Mark(int64(len(caps))) - // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - - // Delete the entire queue entry if it became empty. - if list.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - if _, ok := pool.pending[addr]; !ok { - pool.reserver.Release(addr) - } + // promote all promoteable transactions + for _, tx := range promotable { + from, _ := pool.signer.Sender(tx) + if pool.promoteTx(from, tx.Hash(), tx) { + promoted = append(promoted, tx) } } + + // remove all removable transactions + for _, hash := range dropped { + pool.all.Remove(hash) + } + + // release all accounts that have no more transactions in the pool + for _, addr := range removedAddresses { + _, hasPending := pool.pending[addr] + _, hasQueued := pool.queue.get(addr) + if !hasPending && !hasQueued { + pool.reserver.Release(addr) + } + } + return promoted } @@ -1585,43 +1508,17 @@ func (pool *LegacyPool) truncatePending() { // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. func (pool *LegacyPool) truncateQueue() { - queued := uint64(0) - for _, list := range pool.queue { - queued += uint64(list.Len()) - } - if queued <= pool.config.GlobalQueue { - return + removed, removedAddresses := pool.queue.truncate() + + // remove all removable transactions + for _, hash := range removed { + pool.all.Remove(hash) } - // Sort all accounts with queued transactions by heartbeat - addresses := make(addressesByHeartbeat, 0, len(pool.queue)) - for addr := range pool.queue { - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } - sort.Sort(sort.Reverse(addresses)) - - // Drop transactions until the total is below the limit - for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] - - addresses = addresses[:len(addresses)-1] - - // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash(), true, true) - } - drop -= size - queuedRateLimitMeter.Mark(int64(size)) - continue - } - // Otherwise drop only last few transactions - txs := list.Flatten() - for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash(), true, true) - drop-- - queuedRateLimitMeter.Mark(1) + for _, addr := range removedAddresses { + _, hasPending := pool.pending[addr] + if !hasPending { + pool.reserver.Release(addr) } } } @@ -1679,25 +1576,13 @@ func (pool *LegacyPool) demoteUnexecutables() { // Delete the entire pending entry if it became empty. if list.Empty() { delete(pool.pending, addr) - if _, ok := pool.queue[addr]; !ok { + if _, ok := pool.queue.get(addr); !ok { pool.reserver.Release(addr) } } } } -// addressByHeartbeat is an account address tagged with its last activity timestamp. -type addressByHeartbeat struct { - address common.Address - heartbeat time.Time -} - -type addressesByHeartbeat []addressByHeartbeat - -func (a addressesByHeartbeat) Len() int { return len(a) } -func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { @@ -1938,17 +1823,17 @@ func (pool *LegacyPool) Clear() { // acquire the subpool lock until the transaction addition is completed. for addr := range pool.pending { - if _, ok := pool.queue[addr]; !ok { + if _, ok := pool.queue.get(addr); !ok { pool.reserver.Release(addr) } } - for addr := range pool.queue { + for _, addr := range pool.queue.addresses() { pool.reserver.Release(addr) } pool.all.Clear() pool.priced.Reheap() pool.pending = make(map[common.Address]*list) - pool.queue = make(map[common.Address]*list) + pool.queue = newQueue(pool.config, pool.signer) pool.pendingNonces = newNoncer(pool.currentState) } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 0c8642659d..fb994d8208 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -466,8 +466,8 @@ func TestQueue(t *testing.T) { if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { t.Error("expected transaction to be in tx pool") } - if len(pool.queue) > 0 { - t.Error("expected transaction queue to be empty. is", len(pool.queue)) + if len(pool.queue.queued) > 0 { + t.Error("expected transaction queue to be empty. is", len(pool.queue.queued)) } } @@ -492,8 +492,8 @@ func TestQueue2(t *testing.T) { if len(pool.pending) != 1 { t.Error("expected pending length to be 1, got", len(pool.pending)) } - if pool.queue[from].Len() != 2 { - t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) + if list, _ := pool.queue.get(from); list.Len() != 2 { + t.Error("expected len(queue) == 2, got", list.Len()) } } @@ -639,8 +639,8 @@ func TestMissingNonce(t *testing.T) { if len(pool.pending) != 0 { t.Error("expected 0 pending transactions, got", len(pool.pending)) } - if pool.queue[addr].Len() != 1 { - t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) + if list, _ := pool.queue.get(addr); list.Len() != 1 { + t.Error("expected 1 queued transaction, got", list.Len()) } if pool.all.Count() != 1 { t.Error("expected 1 total transactions, got", pool.all.Count()) @@ -712,8 +712,8 @@ func TestDropping(t *testing.T) { if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } - if pool.queue[account].Len() != 3 { - t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + if list, _ := pool.queue.get(account); list.Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", list.Len(), 3) } if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) @@ -722,8 +722,8 @@ func TestDropping(t *testing.T) { if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } - if pool.queue[account].Len() != 3 { - t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + if list, _ := pool.queue.get(account); list.Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", list.Len(), 3) } if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) @@ -741,13 +741,14 @@ func TestDropping(t *testing.T) { if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { t.Errorf("out-of-fund pending transaction present: %v", tx1) } - if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + list, _ := pool.queue.get(account) + if _, ok := list.txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } - if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok { + if _, ok := list.txs.items[tx11.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } - if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { + if _, ok := list.txs.items[tx12.Nonce()]; ok { t.Errorf("out-of-fund queued transaction present: %v", tx11) } if pool.all.Count() != 4 { @@ -763,10 +764,11 @@ func TestDropping(t *testing.T) { if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { t.Errorf("over-gased pending transaction present: %v", tx1) } - if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + list, _ = pool.queue.get(account) + if _, ok := list.txs.items[tx10.Nonce()]; !ok { t.Errorf("funded queued transaction missing: %v", tx10) } - if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { + if _, ok := list.txs.items[tx11.Nonce()]; ok { t.Errorf("over-gased queued transaction present: %v", tx11) } if pool.all.Count() != 2 { @@ -820,8 +822,8 @@ func TestPostponing(t *testing.T) { if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } - if len(pool.queue) != 0 { - t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + if len(pool.queue.addresses()) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue.addresses()), 0) } if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) @@ -830,8 +832,8 @@ func TestPostponing(t *testing.T) { if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } - if len(pool.queue) != 0 { - t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + if len(pool.queue.addresses()) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue.addresses()), 0) } if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) @@ -847,7 +849,8 @@ func TestPostponing(t *testing.T) { if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) } - if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { + list, _ := pool.queue.get(accs[0]) + if _, ok := list.txs.items[txs[0].Nonce()]; ok { t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) } for i, tx := range txs[1:100] { @@ -855,14 +858,14 @@ func TestPostponing(t *testing.T) { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) } - if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok { + if _, ok := list.txs.items[tx.Nonce()]; !ok { t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) } } else { if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) } - if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok { + if _, ok := list.txs.items[tx.Nonce()]; ok { t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) } } @@ -872,13 +875,14 @@ func TestPostponing(t *testing.T) { if pool.pending[accs[1]] != nil { t.Errorf("invalidated account still has pending transactions") } + list, _ = pool.queue.get(accs[1]) for i, tx := range txs[100:] { if i%2 == 1 { - if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { + if _, ok := list.txs.items[tx.Nonce()]; !ok { t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) } } else { - if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok { + if _, ok := list.txs.items[tx.Nonce()]; ok { t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) } } @@ -963,13 +967,14 @@ func TestQueueAccountLimiting(t *testing.T) { if len(pool.pending) != 0 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) } + list, _ := pool.queue.get(account) if i <= testTxPoolConfig.AccountQueue { - if pool.queue[account].Len() != int(i) { - t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) + if list.Len() != int(i) { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, list.Len(), i) } } else { - if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) { - t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue) + if list.Len() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, list.Len(), testTxPoolConfig.AccountQueue) } } } @@ -1020,7 +1025,7 @@ func TestQueueGlobalLimiting(t *testing.T) { pool.addRemotesSync(txs) queued := 0 - for addr, list := range pool.queue { + for addr, list := range pool.queue.queued { if list.Len() > int(config.AccountQueue) { t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) } @@ -1179,8 +1184,8 @@ func TestPendingLimiting(t *testing.T) { if pool.pending[account].Len() != int(i)+1 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) } - if len(pool.queue) != 0 { - t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) + if len(pool.queue.addresses()) != 0 { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, len(pool.queue.addresses()), 0) } } if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) { diff --git a/core/txpool/legacypool/queue.go b/core/txpool/legacypool/queue.go new file mode 100644 index 0000000000..b8417064f7 --- /dev/null +++ b/core/txpool/legacypool/queue.go @@ -0,0 +1,271 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package legacypool + +import ( + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +type queue struct { + config Config + signer types.Signer + queued map[common.Address]*list // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account +} + +func newQueue(config Config, signer types.Signer) *queue { + return &queue{ + signer: signer, + config: config, + queued: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + } +} + +func (q *queue) evict(force bool) []common.Hash { + removed := make([]common.Hash, 0) + for addr, list := range q.queued { + // Any transactions old enough should be removed + if force || time.Since(q.beats[addr]) > q.config.Lifetime { + list := list.Flatten() + for _, tx := range list { + q.removeTx(addr, tx) + removed = append(removed, tx.Hash()) + } + queuedEvictionMeter.Mark(int64(len(list))) + } + } + return removed +} + +func (q *queue) stats() int { + queued := 0 + for _, list := range q.queued { + queued += list.Len() + } + return queued +} + +func (q *queue) content() map[common.Address][]*types.Transaction { + queued := make(map[common.Address][]*types.Transaction, len(q.queued)) + for addr, list := range q.queued { + queued[addr] = list.Flatten() + } + return queued +} + +func (q *queue) contentFrom(addr common.Address) []*types.Transaction { + var queued []*types.Transaction + if list, ok := q.get(addr); ok { + queued = list.Flatten() + } + return queued +} + +func (q *queue) get(addr common.Address) (*list, bool) { + l, ok := q.queued[addr] + return l, ok +} + +func (q *queue) bump(addr common.Address) { + q.beats[addr] = time.Now() +} + +func (q *queue) addresses() []common.Address { + addrs := make([]common.Address, 0, len(q.queued)) + for addr := range q.queued { + addrs = append(addrs, addr) + } + return addrs +} + +func (q queue) removeTx(addr common.Address, tx *types.Transaction) { + if future := q.queued[addr]; future != nil { + if txOld := future.txs.Get(tx.Nonce()); txOld != nil && txOld.Hash() != tx.Hash() { + // Edge case, a different transaction + // with the same nonce is in the queued, just ignore + return + } + if removed, _ := future.Remove(tx); removed { + // Reduce the queued counter + queuedGauge.Dec(1) + } + if future.Empty() { + delete(q.queued, addr) + delete(q.beats, addr) + } + } +} + +func (q *queue) add(hash common.Hash, tx *types.Transaction) (*common.Hash, error) { + // Try to insert the transaction into the future queue + from, _ := types.Sender(q.signer, tx) // already validated + if q.queued[from] == nil { + q.queued[from] = newList(false) + } + inserted, old := q.queued[from].Add(tx, q.config.PriceBump) + if !inserted { + // An older transaction was better, discard this + queuedDiscardMeter.Mark(1) + return nil, txpool.ErrReplaceUnderpriced + } + // If we never record the heartbeat, do it right now. + if _, exist := q.beats[from]; !exist { + q.beats[from] = time.Now() + } + if old == nil { + // Nothing was replaced, bump the queued counter + queuedGauge.Inc(1) + return nil, nil + } + h := old.Hash() + // Transaction was replaced, bump the replacement counter + queuedReplaceMeter.Mark(1) + return &h, nil +} + +// promoteExecutables iterates over all accounts with queued transactions, selecting +// for promotion any that are now executable. It also drops any transactions that are +// deemed too old (nonce too low) or too costly (insufficient funds or over gas limit). +// +// Returns three lists: all transactions that were removed from the queue and selected +// for promotion; all other transactions that were removed from the queue and dropped; +// the list of addresses removed. +func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, currentState *state.StateDB, nonces *noncer) ([]*types.Transaction, []common.Hash, []common.Address) { + // Track the promotable transactions to broadcast them at once + var promotable []*types.Transaction + var dropped []common.Hash + var removedAddresses []common.Address + + // Iterate over all accounts and promote any executable transactions + for _, addr := range accounts { + list := q.queued[addr] + if list == nil { + continue // Just in case someone calls with a non existing account + } + // Drop all transactions that are deemed too old (low nonce) + forwards := list.Forward(currentState.GetNonce(addr)) + for _, tx := range forwards { + dropped = append(dropped, tx.Hash()) + } + log.Trace("Removing old queued transactions", "count", len(forwards)) + // Drop all transactions that are too costly (low balance or out of gas) + drops, _ := list.Filter(currentState.GetBalance(addr), gasLimit) + for _, tx := range drops { + dropped = append(dropped, tx.Hash()) + } + log.Trace("Removing unpayable queued transactions", "count", len(drops)) + queuedNofundsMeter.Mark(int64(len(drops))) + + // Gather all executable transactions and promote them + readies := list.Ready(nonces.get(addr)) + promotable = append(promotable, readies...) + log.Trace("Promoting queued transactions", "count", len(promotable)) + queuedGauge.Dec(int64(len(readies))) + + // Drop all transactions over the allowed limit + var caps = list.Cap(int(q.config.AccountQueue)) + for _, tx := range caps { + hash := tx.Hash() + dropped = append(dropped, hash) + log.Trace("Removing cap-exceeding queued transaction", "hash", hash) + } + queuedRateLimitMeter.Mark(int64(len(caps))) + + // Delete the entire queue entry if it became empty. + if list.Empty() { + delete(q.queued, addr) + delete(q.beats, addr) + removedAddresses = append(removedAddresses, addr) + } + } + queuedGauge.Dec(int64(len(dropped))) + return promotable, dropped, removedAddresses +} + +// truncate drops the oldest transactions from the queue until the total +// number is below the configured limit. +// Returns the hashes of all dropped transactions, and the addresses of +// accounts that became empty due to the truncation. +func (q *queue) truncate() ([]common.Hash, []common.Address) { + queued := uint64(0) + for _, list := range q.queued { + queued += uint64(list.Len()) + } + if queued <= q.config.GlobalQueue { + return nil, nil + } + + // Sort all accounts with queued transactions by heartbeat + addresses := make(addressesByHeartbeat, 0, len(q.queued)) + for addr := range q.queued { + addresses = append(addresses, addressByHeartbeat{addr, q.beats[addr]}) + } + sort.Sort(sort.Reverse(addresses)) + removed := make([]common.Hash, 0) + removedAddresses := make([]common.Address, 0) + + // Drop transactions until the total is below the limit + for drop := queued - q.config.GlobalQueue; drop > 0 && len(addresses) > 0; { + addr := addresses[len(addresses)-1] + list := q.queued[addr.address] + + addresses = addresses[:len(addresses)-1] + + // Drop all transactions if they are less than the overflow + if size := uint64(list.Len()); size <= drop { + for _, tx := range list.Flatten() { + q.removeTx(addr.address, tx) + removed = append(removed, tx.Hash()) + } + drop -= size + queuedRateLimitMeter.Mark(int64(size)) + removedAddresses = append(removedAddresses, addr.address) + continue + } + // Otherwise drop only last few transactions + txs := list.Flatten() + for i := len(txs) - 1; i >= 0 && drop > 0; i-- { + q.removeTx(addr.address, txs[i]) + removed = append(removed, txs[i].Hash()) + drop-- + queuedRateLimitMeter.Mark(1) + } + } + + // no need to clear empty accounts, removeTx already does that + return removed, removedAddresses +} + +// addressByHeartbeat is an account address tagged with its last activity timestamp. +type addressByHeartbeat struct { + address common.Address + heartbeat time.Time +} + +type addressesByHeartbeat []addressByHeartbeat + +func (a addressesByHeartbeat) Len() int { return len(a) } +func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } +func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } From b28241ba85a294ad0f860390943170329c37a53b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Mon, 13 Oct 2025 19:21:01 +0200 Subject: [PATCH 035/277] cmd/workload: filter fuzzer test (#31613) This PR adds a `filterfuzz` subcommand to the workload tester that generates requests similarly to `filtergen` (though with a much smaller block length limit) and also verifies the results by retrieving all block receipts in the range and locally filtering out relevant results. Unlike `filtergen` that operates on the finalized chain range only, `filterfuzz` does check the head region, actually it seeds a new query at every new chain head. --- cmd/workload/filtertest.go | 15 +- cmd/workload/filtertestfuzz.go | 337 +++++++++++++++++++++++++++++++++ cmd/workload/filtertestgen.go | 59 +++--- cmd/workload/main.go | 1 + 4 files changed, 378 insertions(+), 34 deletions(-) create mode 100644 cmd/workload/filtertestfuzz.go diff --git a/cmd/workload/filtertest.go b/cmd/workload/filtertest.go index 9f0b6cab44..d77cbc5768 100644 --- a/cmd/workload/filtertest.go +++ b/cmd/workload/filtertest.go @@ -182,13 +182,14 @@ func (s *filterTestSuite) loadQueries() error { // filterQuery is a single query for testing. type filterQuery struct { - FromBlock int64 `json:"fromBlock"` - ToBlock int64 `json:"toBlock"` - Address []common.Address `json:"address"` - Topics [][]common.Hash `json:"topics"` - ResultHash *common.Hash `json:"resultHash,omitempty"` - results []types.Log - Err error `json:"error,omitempty"` + FromBlock int64 `json:"fromBlock"` + ToBlock int64 `json:"toBlock"` + lastBlockHash common.Hash + Address []common.Address `json:"address"` + Topics [][]common.Hash `json:"topics"` + ResultHash *common.Hash `json:"resultHash,omitempty"` + results []types.Log + Err error `json:"error,omitempty"` } func (fq *filterQuery) isWildcard() bool { diff --git a/cmd/workload/filtertestfuzz.go b/cmd/workload/filtertestfuzz.go new file mode 100644 index 0000000000..3549f4db56 --- /dev/null +++ b/cmd/workload/filtertestfuzz.go @@ -0,0 +1,337 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "fmt" + "math/big" + "reflect" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +const maxFilterRangeForTestFuzz = 300 + +var ( + filterFuzzCommand = &cli.Command{ + Name: "filterfuzz", + Usage: "Generates queries and compares results against matches derived from receipts", + ArgsUsage: "", + Action: filterFuzzCmd, + Flags: []cli.Flag{}, + } +) + +// filterFuzzCmd is the main function of the filter fuzzer. +func filterFuzzCmd(ctx *cli.Context) error { + f := newFilterTestGen(ctx, maxFilterRangeForTestFuzz) + var lastHead *types.Header + headerCache := lru.NewCache[common.Hash, *types.Header](200) + + commonAncestor := func(oldPtr, newPtr *types.Header) *types.Header { + if oldPtr == nil || newPtr == nil { + return nil + } + if newPtr.Number.Uint64() > oldPtr.Number.Uint64()+100 || oldPtr.Number.Uint64() > newPtr.Number.Uint64()+100 { + return nil + } + for oldPtr.Hash() != newPtr.Hash() { + if newPtr.Number.Uint64() >= oldPtr.Number.Uint64() { + if parent, _ := headerCache.Get(newPtr.ParentHash); parent != nil { + newPtr = parent + } else { + newPtr, _ = getHeaderByHash(f.client, newPtr.ParentHash) + if newPtr == nil { + return nil + } + headerCache.Add(newPtr.Hash(), newPtr) + } + } + if oldPtr.Number.Uint64() > newPtr.Number.Uint64() { + oldPtr, _ = headerCache.Get(oldPtr.ParentHash) + if oldPtr == nil { + return nil + } + } + } + return newPtr + } + + fetchHead := func() (*types.Header, bool) { + currentHead, err := getLatestHeader(f.client) + if err != nil { + fmt.Println("Could not fetch head block", err) + return nil, false + } + headerCache.Add(currentHead.Hash(), currentHead) + if lastHead != nil && currentHead.Hash() == lastHead.Hash() { + return currentHead, false + } + f.blockLimit = currentHead.Number.Int64() + ca := commonAncestor(lastHead, currentHead) + fmt.Print("*** New head ", f.blockLimit) + if ca == nil { + fmt.Println(" ") + } else { + if reorged := lastHead.Number.Uint64() - ca.Number.Uint64(); reorged > 0 { + fmt.Print(" reorged ", reorged) + } + if missed := currentHead.Number.Uint64() - ca.Number.Uint64() - 1; missed > 0 { + fmt.Print(" missed ", missed) + } + fmt.Println() + } + lastHead = currentHead + return currentHead, true + } + + tryExtendQuery := func(query *filterQuery) *filterQuery { + for { + extQuery := f.extendRange(query) + if extQuery == nil { + return query + } + extQuery.checkLastBlockHash(f.client) + extQuery.run(f.client, nil) + if extQuery.Err == nil && len(extQuery.results) == 0 { + // query is useless now due to major reorg; abandon and continue + fmt.Println("Zero length results") + return nil + } + if extQuery.Err != nil { + extQuery.printError() + return nil + } + if len(extQuery.results) > maxFilterResultSize { + return query + } + query = extQuery + } + } + + var ( + mmQuery *filterQuery + mmRetry, mmNextRetry int + ) + +mainLoop: + for { + select { + case <-ctx.Done(): + return nil + default: + } + var query *filterQuery + if mmQuery != nil { + if mmRetry == 0 { + query = mmQuery + mmRetry = mmNextRetry + mmNextRetry *= 2 + query.checkLastBlockHash(f.client) + query.run(f.client, nil) + if query.Err != nil { + query.printError() + continue + } + fmt.Println("Retrying query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results)) + } else { + mmRetry-- + } + } + if query == nil { + currentHead, isNewHead := fetchHead() + if currentHead == nil { + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Second): + } + continue mainLoop + } + if isNewHead { + query = f.newHeadSeedQuery(currentHead.Number.Int64()) + } else { + query = f.newQuery() + } + query.checkLastBlockHash(f.client) + query.run(f.client, nil) + if query.Err != nil { + query.printError() + continue + } + fmt.Println("New query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results)) + if len(query.results) == 0 || len(query.results) > maxFilterResultSize { + continue mainLoop + } + if query = tryExtendQuery(query); query == nil { + continue mainLoop + } + } + if !query.checkLastBlockHash(f.client) { + fmt.Println("Reorg during search") + continue mainLoop + } + // now we have a new query; check results + results, err := query.getResultsFromReceipts(f.client) + if err != nil { + fmt.Println("Could not fetch results from receipts", err) + continue mainLoop + } + if !query.checkLastBlockHash(f.client) { + fmt.Println("Reorg during search") + continue mainLoop + } + if !reflect.DeepEqual(query.results, results) { + fmt.Println("Results mismatch from:", query.FromBlock, "to:", query.ToBlock, "addresses:", query.Address, "topics:", query.Topics) + resShared, resGetLogs, resReceipts := compareResults(query.results, results) + fmt.Println(" shared:", len(resShared)) + fmt.Println(" only from getLogs:", len(resGetLogs), resGetLogs) + fmt.Println(" only from receipts:", len(resReceipts), resReceipts) + if mmQuery != query { + mmQuery = query + mmRetry = 0 + mmNextRetry = 1 + } + continue mainLoop + } + fmt.Println("Successful query from:", query.FromBlock, "to:", query.ToBlock, "results:", len(query.results)) + f.storeQuery(query) + } +} + +func compareResults(a, b []types.Log) (shared, onlya, onlyb []types.Log) { + for len(a) > 0 && len(b) > 0 { + if reflect.DeepEqual(a[0], b[0]) { + shared = append(shared, a[0]) + a = a[1:] + b = b[1:] + } else { + for i := 1; ; i++ { + if i >= len(a) { // b[0] not found in a + onlyb = append(onlyb, b[0]) + b = b[1:] + break + } + if i >= len(b) { // a[0] not found in b + onlya = append(onlya, a[0]) + a = a[1:] + break + } + if reflect.DeepEqual(b[0], a[i]) { // a[:i] not found in b + onlya = append(onlya, a[:i]...) + a = a[i:] + break + } + if reflect.DeepEqual(a[0], b[i]) { // b[:i] not found in a + onlyb = append(onlyb, b[:i]...) + b = b[i:] + break + } + } + } + } + onlya = append(onlya, a...) + onlyb = append(onlyb, b...) + return +} + +func getLatestHeader(client *client) (*types.Header, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + return client.Eth.HeaderByNumber(ctx, big.NewInt(int64(rpc.LatestBlockNumber))) +} + +func getHeaderByHash(client *client, hash common.Hash) (*types.Header, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + return client.Eth.HeaderByHash(ctx, hash) +} + +// newHeadSeedQuery creates a query that gets all logs from the latest head. +func (s *filterTestGen) newHeadSeedQuery(head int64) *filterQuery { + return &filterQuery{ + FromBlock: head, + ToBlock: head, + } +} + +func (fq *filterQuery) checkLastBlockHash(client *client) bool { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + header, err := client.Eth.HeaderByNumber(ctx, big.NewInt(fq.ToBlock)) + if err != nil { + fmt.Println("Cound not fetch last block hash of query number:", fq.ToBlock, "error:", err) + fq.lastBlockHash = common.Hash{} + return false + } + hash := header.Hash() + if fq.lastBlockHash == hash { + return true + } + fq.lastBlockHash = hash + return false +} + +func (fq *filterQuery) filterLog(log *types.Log) bool { + if len(fq.Address) > 0 && !slices.Contains(fq.Address, log.Address) { + return false + } + // If the to filtered topics is greater than the amount of topics in logs, skip. + if len(fq.Topics) > len(log.Topics) { + return false + } + for i, sub := range fq.Topics { + if len(sub) == 0 { + continue // empty rule set == wildcard + } + if !slices.Contains(sub, log.Topics[i]) { + return false + } + } + return true +} + +func (fq *filterQuery) getResultsFromReceipts(client *client) ([]types.Log, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + var results []types.Log + for blockNumber := fq.FromBlock; blockNumber <= fq.ToBlock; blockNumber++ { + receipts, err := client.Eth.BlockReceipts(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber))) + if err != nil { + return nil, err + } + for _, receipt := range receipts { + for _, log := range receipt.Logs { + if fq.filterLog(log) { + results = append(results, *log) + } + } + } + } + return results, nil +} diff --git a/cmd/workload/filtertestgen.go b/cmd/workload/filtertestgen.go index 6d1f639819..603e3dea67 100644 --- a/cmd/workload/filtertestgen.go +++ b/cmd/workload/filtertestgen.go @@ -32,6 +32,17 @@ import ( "github.com/urfave/cli/v2" ) +const ( + // Parameter of the random filter query generator. + maxFilterRangeForTestGen = 100000000000 + maxFilterResultSize = 1000 + filterBuckets = 10 + maxFilterBucketSize = 100 + filterSeedChance = 10 + filterMergeChance = 45 + filterExtendChance = 50 +) + var ( filterGenerateCommand = &cli.Command{ Name: "filtergen", @@ -58,7 +69,7 @@ var ( // filterGenCmd is the main function of the filter tests generator. func filterGenCmd(ctx *cli.Context) error { - f := newFilterTestGen(ctx) + f := newFilterTestGen(ctx, maxFilterRangeForTestGen) lastWrite := time.Now() for { select { @@ -67,7 +78,7 @@ func filterGenCmd(ctx *cli.Context) error { default: } - f.updateFinalizedBlock() + f.setLimitToFinalizedBlock() query := f.newQuery() query.run(f.client, nil) if query.Err != nil { @@ -75,7 +86,7 @@ func filterGenCmd(ctx *cli.Context) error { exit("filter query failed") } if len(query.results) > 0 && len(query.results) <= maxFilterResultSize { - for { + for rand.Intn(100) < filterExtendChance { extQuery := f.extendRange(query) if extQuery == nil { break @@ -108,39 +119,32 @@ func filterGenCmd(ctx *cli.Context) error { // filterTestGen is the filter query test generator. type filterTestGen struct { - client *client - queryFile string + client *client + queryFile string + maxFilterRange int64 - finalizedBlock int64 - queries [filterBuckets][]*filterQuery + blockLimit int64 + queries [filterBuckets][]*filterQuery } -func newFilterTestGen(ctx *cli.Context) *filterTestGen { +func newFilterTestGen(ctx *cli.Context, maxFilterRange int64) *filterTestGen { return &filterTestGen{ - client: makeClient(ctx), - queryFile: ctx.String(filterQueryFileFlag.Name), + client: makeClient(ctx), + queryFile: ctx.String(filterQueryFileFlag.Name), + maxFilterRange: maxFilterRange, } } -func (s *filterTestGen) updateFinalizedBlock() { - s.finalizedBlock = mustGetFinalizedBlock(s.client) +func (s *filterTestGen) setLimitToFinalizedBlock() { + s.blockLimit = mustGetFinalizedBlock(s.client) } -const ( - // Parameter of the random filter query generator. - maxFilterRange = 10000000 - maxFilterResultSize = 300 - filterBuckets = 10 - maxFilterBucketSize = 100 - filterSeedChance = 10 - filterMergeChance = 45 -) - // storeQuery adds a filter query to the output file. func (s *filterTestGen) storeQuery(query *filterQuery) { query.ResultHash = new(common.Hash) *query.ResultHash = query.calculateHash() - logRatio := math.Log(float64(len(query.results))*float64(s.finalizedBlock)/float64(query.ToBlock+1-query.FromBlock)) / math.Log(float64(s.finalizedBlock)*maxFilterResultSize) + maxFilterRange := min(s.maxFilterRange, s.blockLimit) + logRatio := math.Log(float64(len(query.results))*float64(maxFilterRange)/float64(query.ToBlock+1-query.FromBlock)) / math.Log(float64(maxFilterRange)*maxFilterResultSize) bucket := int(math.Floor(logRatio * filterBuckets)) if bucket >= filterBuckets { bucket = filterBuckets - 1 @@ -160,13 +164,13 @@ func (s *filterTestGen) storeQuery(query *filterQuery) { func (s *filterTestGen) extendRange(q *filterQuery) *filterQuery { rangeLen := q.ToBlock + 1 - q.FromBlock extLen := rand.Int63n(rangeLen) + 1 - if rangeLen+extLen > s.finalizedBlock { + if rangeLen+extLen > min(s.maxFilterRange, s.blockLimit) { return nil } extBefore := min(rand.Int63n(extLen+1), q.FromBlock) extAfter := extLen - extBefore - if q.ToBlock+extAfter > s.finalizedBlock { - d := q.ToBlock + extAfter - s.finalizedBlock + if q.ToBlock+extAfter > s.blockLimit { + d := q.ToBlock + extAfter - s.blockLimit extAfter -= d if extBefore+d <= q.FromBlock { extBefore += d @@ -203,7 +207,7 @@ func (s *filterTestGen) newQuery() *filterQuery { // newSeedQuery creates a query that gets all logs in a random non-finalized block. func (s *filterTestGen) newSeedQuery() *filterQuery { - block := rand.Int63n(s.finalizedBlock + 1) + block := rand.Int63n(s.blockLimit + 1) return &filterQuery{ FromBlock: block, ToBlock: block, @@ -358,6 +362,7 @@ func (s *filterTestGen) writeQueries() { func mustGetFinalizedBlock(client *client) int64 { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() + header, err := client.Eth.HeaderByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) if err != nil { exit(fmt.Errorf("could not fetch finalized header (error: %v)", err)) diff --git a/cmd/workload/main.go b/cmd/workload/main.go index 32618d6a79..8ac0e5b6cb 100644 --- a/cmd/workload/main.go +++ b/cmd/workload/main.go @@ -49,6 +49,7 @@ func init() { filterGenerateCommand, traceGenerateCommand, filterPerfCommand, + filterFuzzCommand, } } From 6337577434abcd99a24ff5e14dce9bd6381efbeb Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 14 Oct 2025 01:58:50 +0800 Subject: [PATCH 036/277] p2p/discover: wait for bootstrap to be done (#32881) This ensures the node is ready to accept other nodes into the table before it is used in a test. Closes #32863 --- p2p/discover/v4_udp_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 44863183fa..287f0c34fa 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -575,6 +575,13 @@ func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 { if err != nil { t.Fatal(err) } + + // Wait for bootstrap to complete. + select { + case <-udp.tab.initDone: + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for table initialization") + } return udp } From 52c484de868dd6842b9205eede1d6add781bd424 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 14 Oct 2025 03:23:05 +0200 Subject: [PATCH 037/277] triedb/pathdb: catch int conversion overflow in 32-bit (#32899) The limit check for `MaxUint32` is done after the cast to `int`. On 64 bits machines, that will work without a problem. On 32 bits machines, that will always fail. The compiler catches it and refuses to build. Note that this only fixes the compiler build. ~~If the limit is above `MaxInt32` but strictly below `MaxUint32` then this will fail at runtime and we have another issue.~~ I checked and this should not happen during regular execution, although it might happen in tests. --- triedb/pathdb/history_trienode.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 2a4459d4ad..f5eb590a9a 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -161,7 +161,7 @@ func newTrienodeHistory(root common.Hash, parent common.Hash, block uint64, node // sharedLen returns the length of the common prefix shared by a and b. func sharedLen(a, b []byte) int { n := min(len(a), len(b)) - for i := 0; i < n; i++ { + for i := range n { if a[i] != b[i] { return i } @@ -295,7 +295,7 @@ func decodeHeader(data []byte) (*trienodeMetadata, []common.Hash, []uint32, []ui keyOffsets = make([]uint32, 0, count) valOffsets = make([]uint32, 0, count) ) - for i := 0; i < count; i++ { + for i := range count { n := trienodeMetadataSize + trienodeTrieHeaderSize*i owner := common.BytesToHash(data[n : n+common.HashLength]) if i != 0 && bytes.Compare(owner.Bytes(), owners[i-1].Bytes()) <= 0 { @@ -348,7 +348,7 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st if len(keySection) < int(8*nRestarts)+4 { return nil, fmt.Errorf("key section too short, restarts: %d, size: %d", nRestarts, len(keySection)) } - for i := 0; i < int(nRestarts); i++ { + for i := range int(nRestarts) { o := len(keySection) - 4 - (int(nRestarts)-i)*8 keyOffset := binary.BigEndian.Uint32(keySection[o : o+4]) if i != 0 && keyOffset <= keyOffsets[i-1] { @@ -469,7 +469,7 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection h.nodeList = make(map[common.Hash][]string) h.nodes = make(map[common.Hash]map[string][]byte) - for i := 0; i < len(owners); i++ { + for i := range len(owners) { // Resolve the boundary of key section keyStart := keyOffsets[i] keyLimit := len(keySection) @@ -524,7 +524,7 @@ func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRa } keyStart := int(keyRange.start) keyLimit := int(keyRange.limit) - if keyLimit == math.MaxUint32 { + if keyRange.limit == math.MaxUint32 { keyLimit = len(keyData) } if len(keyData) < keyStart || len(keyData) < keyLimit { From 00f6f2b32fd49255dd7fcab24f0326c1da91d1e5 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Tue, 14 Oct 2025 05:01:43 +0200 Subject: [PATCH 038/277] eth/catalyst: remove useless log on enabling Engine API (#32901) --- eth/catalyst/api.go | 1 - 1 file changed, 1 deletion(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 6dfe24f729..75b263bf6b 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -47,7 +47,6 @@ import ( // Register adds the engine API to the full node. func Register(stack *node.Node, backend *eth.Ethereum) error { - log.Warn("Engine API enabled", "protocol", "eth") stack.RegisterAPIs([]rpc.API{ { Namespace: "engine", From fb8d2298b615aedd964635c6fb45587246da67ed Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Tue, 14 Oct 2025 05:03:31 +0200 Subject: [PATCH 039/277] eth: do not warn on switching from snap sync to full sync (#32900) This happens normally after a restart, so it is better to use Info level here. Signed-off-by: Csaba Kiraly --- eth/handler.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 304560a158..ff970e2ba6 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -181,8 +181,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } else { head := h.chain.CurrentBlock() if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { - // Print warning log if database is not empty to run snap sync. - log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") + log.Info("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") } else { // If snap sync was requested and our database is empty, grant it h.snapSync.Store(true) From e03d97a42052097d817eb13c22a2f1a6459518e1 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 14 Oct 2025 14:40:04 +0800 Subject: [PATCH 040/277] core/txpool/legacypool: fix pricedList updates (#32906) This pr addresses a few issues brought by the #32270 - Add updates to pricedList after dropping transactions. - Remove redundant deletions in queue.evictList, since pool.removeTx(hash, true, true) already performs the removal. - Prevent duplicate addresses during promotion when Reset is not nil. --- core/txpool/legacypool/legacypool.go | 21 +++++------ core/txpool/legacypool/queue.go | 56 +++++++++++++++------------- 2 files changed, 40 insertions(+), 37 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index b36d86dd19..ceedc74a53 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -367,8 +367,7 @@ func (pool *LegacyPool) loop() { // Handle inactive account transaction eviction case <-evict.C: pool.mu.Lock() - evicted := pool.queue.evict(false) - for _, hash := range evicted { + for _, hash := range pool.queue.evictList() { pool.removeTx(hash, true, true) } pool.mu.Unlock() @@ -813,7 +812,7 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo // // Note, this method assumes the pool lock is held! func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) { - replaced, err := pool.queue.add(hash, tx) + replaced, err := pool.queue.add(tx) if err != nil { return false, err } @@ -1093,7 +1092,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo } } // Transaction is in the future queue - pool.queue.removeTx(addr, tx) + pool.queue.remove(addr, tx) return 0 } @@ -1241,7 +1240,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, } } // Reset needs promote for all addresses - promoteAddrs = append(promoteAddrs, pool.queue.addresses()...) + promoteAddrs = pool.queue.addresses() } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) @@ -1397,9 +1396,9 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { gasLimit := pool.currentHead.Load().GasLimit promotable, dropped, removedAddresses := pool.queue.promoteExecutables(accounts, gasLimit, pool.currentState, pool.pendingNonces) - promoted := make([]*types.Transaction, 0, len(promotable)) - // promote all promoteable transactions + // promote all promotable transactions + promoted := make([]*types.Transaction, 0, len(promotable)) for _, tx := range promotable { from, _ := pool.signer.Sender(tx) if pool.promoteTx(from, tx.Hash(), tx) { @@ -1411,16 +1410,15 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T for _, hash := range dropped { pool.all.Remove(hash) } + pool.priced.Removed(len(dropped)) // release all accounts that have no more transactions in the pool for _, addr := range removedAddresses { _, hasPending := pool.pending[addr] - _, hasQueued := pool.queue.get(addr) - if !hasPending && !hasQueued { + if !hasPending { pool.reserver.Release(addr) } } - return promoted } @@ -1510,10 +1508,11 @@ func (pool *LegacyPool) truncatePending() { func (pool *LegacyPool) truncateQueue() { removed, removedAddresses := pool.queue.truncate() - // remove all removable transactions + // Remove all removable transactions from the lookup and global price list for _, hash := range removed { pool.all.Remove(hash) } + pool.priced.Removed(len(removed)) for _, addr := range removedAddresses { _, hasPending := pool.pending[addr] diff --git a/core/txpool/legacypool/queue.go b/core/txpool/legacypool/queue.go index b8417064f7..a889debe37 100644 --- a/core/txpool/legacypool/queue.go +++ b/core/txpool/legacypool/queue.go @@ -27,6 +27,8 @@ import ( "github.com/ethereum/go-ethereum/log" ) +// queue manages nonce-gapped transactions that have been validated but are +// not yet processable. type queue struct { config Config signer types.Signer @@ -43,19 +45,17 @@ func newQueue(config Config, signer types.Signer) *queue { } } -func (q *queue) evict(force bool) []common.Hash { - removed := make([]common.Hash, 0) +// evictList returns the hashes of transactions that are old enough to be evicted. +func (q *queue) evictList() []common.Hash { + var removed []common.Hash for addr, list := range q.queued { - // Any transactions old enough should be removed - if force || time.Since(q.beats[addr]) > q.config.Lifetime { - list := list.Flatten() - for _, tx := range list { - q.removeTx(addr, tx) + if time.Since(q.beats[addr]) > q.config.Lifetime { + for _, tx := range list.Flatten() { removed = append(removed, tx.Hash()) } - queuedEvictionMeter.Mark(int64(len(list))) } } + queuedEvictionMeter.Mark(int64(len(removed))) return removed } @@ -100,7 +100,7 @@ func (q *queue) addresses() []common.Address { return addrs } -func (q queue) removeTx(addr common.Address, tx *types.Transaction) { +func (q *queue) remove(addr common.Address, tx *types.Transaction) { if future := q.queued[addr]; future != nil { if txOld := future.txs.Get(tx.Nonce()); txOld != nil && txOld.Hash() != tx.Hash() { // Edge case, a different transaction @@ -118,7 +118,7 @@ func (q queue) removeTx(addr common.Address, tx *types.Transaction) { } } -func (q *queue) add(hash common.Hash, tx *types.Transaction) (*common.Hash, error) { +func (q *queue) add(tx *types.Transaction) (*common.Hash, error) { // Try to insert the transaction into the future queue from, _ := types.Sender(q.signer, tx) // already validated if q.queued[from] == nil { @@ -149,15 +149,17 @@ func (q *queue) add(hash common.Hash, tx *types.Transaction) (*common.Hash, erro // for promotion any that are now executable. It also drops any transactions that are // deemed too old (nonce too low) or too costly (insufficient funds or over gas limit). // -// Returns three lists: all transactions that were removed from the queue and selected -// for promotion; all other transactions that were removed from the queue and dropped; -// the list of addresses removed. +// Returns three lists: +// - all transactions that were removed from the queue and selected for promotion; +// - all other transactions that were removed from the queue and dropped; +// - the list of addresses removed. func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, currentState *state.StateDB, nonces *noncer) ([]*types.Transaction, []common.Hash, []common.Address) { // Track the promotable transactions to broadcast them at once - var promotable []*types.Transaction - var dropped []common.Hash - var removedAddresses []common.Address - + var ( + promotable []*types.Transaction + dropped []common.Hash + removedAddresses []common.Address + ) // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { list := q.queued[addr] @@ -170,6 +172,7 @@ func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, c dropped = append(dropped, tx.Hash()) } log.Trace("Removing old queued transactions", "count", len(forwards)) + // Drop all transactions that are too costly (low balance or out of gas) drops, _ := list.Filter(currentState.GetBalance(addr), gasLimit) for _, tx := range drops { @@ -205,9 +208,9 @@ func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, c } // truncate drops the oldest transactions from the queue until the total -// number is below the configured limit. -// Returns the hashes of all dropped transactions, and the addresses of -// accounts that became empty due to the truncation. +// number is below the configured limit. Returns the hashes of all dropped +// transactions and the addresses of accounts that became empty due to +// the truncation. func (q *queue) truncate() ([]common.Hash, []common.Address) { queued := uint64(0) for _, list := range q.queued { @@ -223,10 +226,12 @@ func (q *queue) truncate() ([]common.Hash, []common.Address) { addresses = append(addresses, addressByHeartbeat{addr, q.beats[addr]}) } sort.Sort(sort.Reverse(addresses)) - removed := make([]common.Hash, 0) - removedAddresses := make([]common.Address, 0) // Drop transactions until the total is below the limit + var ( + removed = make([]common.Hash, 0) + removedAddresses = make([]common.Address, 0) + ) for drop := queued - q.config.GlobalQueue; drop > 0 && len(addresses) > 0; { addr := addresses[len(addresses)-1] list := q.queued[addr.address] @@ -236,7 +241,7 @@ func (q *queue) truncate() ([]common.Hash, []common.Address) { // Drop all transactions if they are less than the overflow if size := uint64(list.Len()); size <= drop { for _, tx := range list.Flatten() { - q.removeTx(addr.address, tx) + q.remove(addr.address, tx) removed = append(removed, tx.Hash()) } drop -= size @@ -247,14 +252,13 @@ func (q *queue) truncate() ([]common.Hash, []common.Address) { // Otherwise drop only last few transactions txs := list.Flatten() for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - q.removeTx(addr.address, txs[i]) + q.remove(addr.address, txs[i]) removed = append(removed, txs[i].Hash()) drop-- queuedRateLimitMeter.Mark(1) } } - - // no need to clear empty accounts, removeTx already does that + // No need to clear empty accounts, remove already does that return removed, removedAddresses } From 55a53208b7f6fe656fe13e0f34a652aaedad6e9e Mon Sep 17 00:00:00 2001 From: cui Date: Tue, 14 Oct 2025 17:07:48 +0800 Subject: [PATCH 041/277] accounts/abi: check presence of payable fallback or receive before proceeding with transfer (#32374) remove todo --- accounts/abi/abigen/bind_test.go | 6 +++--- accounts/abi/bind/v2/base.go | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/accounts/abi/abigen/bind_test.go b/accounts/abi/abigen/bind_test.go index b3c52e81e5..1651e637c8 100644 --- a/accounts/abi/abigen/bind_test.go +++ b/accounts/abi/abigen/bind_test.go @@ -485,13 +485,13 @@ var bindTests = []struct { contract Defaulter { address public caller; - function() { + fallback() external payable { caller = msg.sender; } } `, - []string{`6060604052606a8060106000396000f360606040523615601d5760e060020a6000350463fc9c8d3981146040575b605e6000805473ffffffffffffffffffffffffffffffffffffffff191633179055565b606060005473ffffffffffffffffffffffffffffffffffffffff1681565b005b6060908152602090f3`}, - []string{`[{"constant":true,"inputs":[],"name":"caller","outputs":[{"name":"","type":"address"}],"type":"function"}]`}, + []string{`608060405234801561000f575f80fd5b5061013d8061001d5f395ff3fe608060405260043610610021575f3560e01c8063fc9c8d391461006257610022565b5b335f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055005b34801561006d575f80fd5b5061007661008c565b60405161008391906100ee565b60405180910390f35b5f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6100d8826100af565b9050919050565b6100e8816100ce565b82525050565b5f6020820190506101015f8301846100df565b9291505056fea26469706673582212201e9273ecfb1f534644c77f09a25c21baaba81cf1c444ebc071e12a225a23c72964736f6c63430008140033`}, + []string{`[{"stateMutability":"payable","type":"fallback"},{"inputs":[],"name":"caller","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"}]`}, ` "math/big" diff --git a/accounts/abi/bind/v2/base.go b/accounts/abi/bind/v2/base.go index f714848efb..4f2013b4a3 100644 --- a/accounts/abi/bind/v2/base.go +++ b/accounts/abi/bind/v2/base.go @@ -277,8 +277,10 @@ func (c *BoundContract) RawCreationTransact(opts *TransactOpts, calldata []byte) // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) { - // todo(rjl493456442) check the payable fallback or receive is defined - // or not, reject invalid transaction at the first place + // Check if payable fallback or receive is defined + if !c.abi.HasReceive() && !(c.abi.HasFallback() && c.abi.Fallback.IsPayable()) { + return nil, fmt.Errorf("contract does not have a payable fallback or receive function") + } return c.transact(opts, &c.address, nil) } From f6064f32c4abeffa99c4c29674fe9ba756e90e08 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 14 Oct 2025 14:40:54 +0200 Subject: [PATCH 042/277] internal/ethapi: convert legacy blobtx proofs in sendRawTransaction (#32849) This adds a temporary conversion path for blob transactions with legacy proof sidecar. This feature will activate after Fusaka. We will phase this out when the fork has sufficiently settled and client side libraries have been upgraded to send the new proofs. --- internal/ethapi/api.go | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index a2cb28d3b2..c10a4754af 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1619,16 +1619,9 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction // processing (signing + broadcast). func (api *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { // Set some sanity defaults and terminate on failure - sidecarVersion := types.BlobSidecarVersion0 - if len(args.Blobs) > 0 { - h := api.b.CurrentHeader() - if api.b.ChainConfig().IsOsaka(h.Number, h.Time) { - sidecarVersion = types.BlobSidecarVersion1 - } - } config := sidecarConfig{ blobSidecarAllowed: true, - blobSidecarVersion: sidecarVersion, + blobSidecarVersion: api.currentBlobSidecarVersion(), } if err := args.setDefaults(ctx, api.b, config); err != nil { return nil, err @@ -1642,6 +1635,14 @@ func (api *TransactionAPI) FillTransaction(ctx context.Context, args Transaction return &SignTransactionResult{data, tx}, nil } +func (api *TransactionAPI) currentBlobSidecarVersion() byte { + h := api.b.CurrentHeader() + if api.b.ChainConfig().IsOsaka(h.Number, h.Time) { + return types.BlobSidecarVersion1 + } + return types.BlobSidecarVersion0 +} + // SendRawTransaction will add the signed transaction to the transaction pool. // The sender is responsible for signing the transaction and using the correct nonce. func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { @@ -1649,6 +1650,19 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil if err := tx.UnmarshalBinary(input); err != nil { return common.Hash{}, err } + + // Convert legacy blob transaction proofs. + // TODO: remove in go-ethereum v1.17.x + if sc := tx.BlobTxSidecar(); sc != nil { + exp := api.currentBlobSidecarVersion() + if sc.Version == types.BlobSidecarVersion0 && exp == types.BlobSidecarVersion1 { + if err := sc.ToV1(); err != nil { + return common.Hash{}, fmt.Errorf("blob sidecar conversion failed: %v", err) + } + tx = tx.WithBlobTxSidecar(sc) + } + } + return SubmitTransaction(ctx, api.b, tx) } From 3cfc33477bcd84aefa8cfbc8cee33625b8cfea6d Mon Sep 17 00:00:00 2001 From: mishraa-G Date: Wed, 15 Oct 2025 13:09:00 +0530 Subject: [PATCH 043/277] rpc: fix flaky test TestServerWebsocketReadLimit (#32889) --- rpc/server_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/server_test.go b/rpc/server_test.go index a38a64b080..8334d4e80d 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -273,7 +273,8 @@ func TestServerWebsocketReadLimit(t *testing.T) { } } else if !errors.Is(err, websocket.ErrReadLimit) && !strings.Contains(strings.ToLower(err.Error()), "1009") && - !strings.Contains(strings.ToLower(err.Error()), "message too big") { + !strings.Contains(strings.ToLower(err.Error()), "message too big") && + !strings.Contains(strings.ToLower(err.Error()), "connection reset by peer") { // Not the error we expect from exceeding the message size limit. t.Fatalf("unexpected error for read limit violation: %v", err) } From 40505a9bc065033472be5c0bcaf2882efd421ce2 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 15 Oct 2025 16:24:48 +0800 Subject: [PATCH 044/277] eth/protocols/eth: reject message containing duplicated txs and drop peer (#32728) Drop peer if sending the same transaction multiple times in a single message. Fixes https://github.com/ethereum/go-ethereum/issues/32724 --------- Signed-off-by: Csaba Kiraly Co-authored-by: Gary Rong Co-authored-by: Csaba Kiraly --- eth/protocols/eth/handlers.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 15ad048bcf..aad3353d88 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -494,12 +494,19 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&txs); err != nil { return err } + // Duplicate transactions are not allowed + seen := make(map[common.Hash]struct{}) for i, tx := range txs { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("Transactions: transaction %d is nil", i) } - peer.markTransaction(tx.Hash()) + hash := tx.Hash() + if _, exists := seen[hash]; exists { + return fmt.Errorf("Transactions: multiple copies of the same hash %v", hash) + } + seen[hash] = struct{}{} + peer.markTransaction(hash) } return backend.Handle(peer, &txs) } @@ -514,12 +521,19 @@ func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&txs); err != nil { return err } + // Duplicate transactions are not allowed + seen := make(map[common.Hash]struct{}) for i, tx := range txs.PooledTransactionsResponse { // Validate and mark the remote transaction if tx == nil { return fmt.Errorf("PooledTransactions: transaction %d is nil", i) } - peer.markTransaction(tx.Hash()) + hash := tx.Hash() + if _, exists := seen[hash]; exists { + return fmt.Errorf("PooledTransactions: multiple copies of the same hash %v", hash) + } + seen[hash] = struct{}{} + peer.markTransaction(hash) } requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId) From 7c107c2691fa66a1da60e2b95f5946c3a3921b00 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 15 Oct 2025 11:51:33 +0200 Subject: [PATCH 045/277] p2p/discover: remove hot-spin in table refresh trigger (#32912) This fixes a regression introduced in #32518. In that PR, we removed the slowdown logic that would throttle lookups when the table runs empty. Said logic was originally added in #20389. Usually it's fine, but there exist pathological cases, such as hive tests, where the node can only discover one other node, so it can only ever query that node and won't get any results. In cases like these, we need to throttle the creation of lookups to avoid crazy CPU usage. --- p2p/discover/lookup.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 9cca0118ac..416256fb36 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -153,6 +153,7 @@ type lookupIterator struct { cancel func() lookup *lookup tabRefreshing <-chan struct{} + lastLookup time.Time } type lookupFunc func(ctx context.Context) *lookup @@ -185,6 +186,9 @@ func (it *lookupIterator) Next() bool { return false } if it.lookup == nil { + // Ensure enough time has passed between lookup creations. + it.slowdown() + it.lookup = it.nextLookup(it.ctx) if it.lookup.empty() { // If the lookup is empty right after creation, it means the local table @@ -235,6 +239,25 @@ func (it *lookupIterator) lookupFailed(tab *Table, timeout time.Duration) { tab.waitForNodes(tout, 1) } +// slowdown applies a delay between creating lookups. This exists to prevent hot-spinning +// in some test environments where lookups don't yield any results. +func (it *lookupIterator) slowdown() { + const minInterval = 1 * time.Second + + now := time.Now() + diff := now.Sub(it.lastLookup) + it.lastLookup = now + if diff > minInterval { + return + } + wait := time.NewTimer(diff) + defer wait.Stop() + select { + case <-wait.C: + case <-it.ctx.Done(): + } +} + // Close ends the iterator. func (it *lookupIterator) Close() { it.cancel() From 32ccb548d34b26edb665446d1695870573d136c7 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 16 Oct 2025 09:58:47 +0200 Subject: [PATCH 046/277] version: release go-ethereum v1.16.5 stable --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index db4e5394b9..f50a9892a4 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 5 // Patch version component of the current release - Meta = "unstable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 5 // Patch version component of the current release + Meta = "stable" // Version metadata to append to the version string ) From 367b5fbe4211a01cfa15b3166b68b0d0dc5cecdc Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 16 Oct 2025 09:59:52 +0200 Subject: [PATCH 047/277] version: begin v1.16.6 release cycle --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index f50a9892a4..ead2d04f2a 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 5 // Patch version component of the current release - Meta = "stable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 6 // Patch version component of the current release + Meta = "unstable" // Version metadata to append to the version string ) From 5c535074acc26b76612ff1f6921fe54333d0912d Mon Sep 17 00:00:00 2001 From: Galoretka Date: Thu, 16 Oct 2025 14:49:41 +0300 Subject: [PATCH 048/277] cmd/geth: log current key in expandVerkle instead of keylist[0] (#32689) Fix logging in the verkle dump path to report the actual key being processed. Previously, the loop always logged keylist[0], which misled users when expanding multiple keys and made debugging harder. This change aligns the log with the key passed to root.Get, improving traceability and diagnostics. --- cmd/geth/verkle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 6490f832af..67dc7257c0 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -201,7 +201,7 @@ func expandVerkle(ctx *cli.Context) error { } for i, key := range keylist { - log.Info("Reading key", "index", i, "key", keylist[0]) + log.Info("Reading key", "index", i, "key", key) root.Get(key, chaindb.Get) } From c37bd6701930eb164f5a677327ebb0c43293ccf4 Mon Sep 17 00:00:00 2001 From: hero5512 Date: Thu, 16 Oct 2025 11:32:55 -0400 Subject: [PATCH 049/277] ethclient: add support for eth_simulateV1 (#32856) Adds ethclient support for the eth_simulateV1 RPC method, which allows simulating transactions on top of a base state without making changes to the blockchain. --------- Co-authored-by: Sina Mahmoodi --- ethclient/ethclient.go | 86 +++++++++ ethclient/ethclient_test.go | 247 +++++++++++++++++++++++++ ethclient/gen_simulate_block_result.go | 80 ++++++++ ethclient/gen_simulate_call_result.go | 61 ++++++ ethclient/gethclient/gethclient.go | 98 +--------- interfaces.go | 97 ++++++++++ 6 files changed, 575 insertions(+), 94 deletions(-) create mode 100644 ethclient/gen_simulate_block_result.go create mode 100644 ethclient/gen_simulate_call_result.go diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 8b26f5b3ca..0b676fccfb 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -828,3 +828,89 @@ func (p *rpcProgress) toSyncProgress() *ethereum.SyncProgress { StateIndexRemaining: uint64(p.StateIndexRemaining), } } + +// SimulateOptions represents the options for eth_simulateV1. +type SimulateOptions struct { + BlockStateCalls []SimulateBlock `json:"blockStateCalls"` + TraceTransfers bool `json:"traceTransfers"` + Validation bool `json:"validation"` + ReturnFullTransactions bool `json:"returnFullTransactions"` +} + +// SimulateBlock represents a batch of calls to be simulated. +type SimulateBlock struct { + BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"` + StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"` + Calls []ethereum.CallMsg `json:"calls"` +} + +// MarshalJSON implements json.Marshaler for SimulateBlock. +func (s SimulateBlock) MarshalJSON() ([]byte, error) { + type Alias struct { + BlockOverrides *ethereum.BlockOverrides `json:"blockOverrides,omitempty"` + StateOverrides map[common.Address]ethereum.OverrideAccount `json:"stateOverrides,omitempty"` + Calls []interface{} `json:"calls"` + } + calls := make([]interface{}, len(s.Calls)) + for i, call := range s.Calls { + calls[i] = toCallArg(call) + } + return json.Marshal(Alias{ + BlockOverrides: s.BlockOverrides, + StateOverrides: s.StateOverrides, + Calls: calls, + }) +} + +//go:generate go run github.com/fjl/gencodec -type SimulateCallResult -field-override simulateCallResultMarshaling -out gen_simulate_call_result.go + +// SimulateCallResult is the result of a simulated call. +type SimulateCallResult struct { + ReturnValue []byte `json:"returnData"` + Logs []*types.Log `json:"logs"` + GasUsed uint64 `json:"gasUsed"` + Status uint64 `json:"status"` + Error *CallError `json:"error,omitempty"` +} + +type simulateCallResultMarshaling struct { + ReturnValue hexutil.Bytes + GasUsed hexutil.Uint64 + Status hexutil.Uint64 +} + +// CallError represents an error from a simulated call. +type CallError struct { + Code int `json:"code"` + Message string `json:"message"` + Data string `json:"data,omitempty"` +} + +//go:generate go run github.com/fjl/gencodec -type SimulateBlockResult -field-override simulateBlockResultMarshaling -out gen_simulate_block_result.go + +// SimulateBlockResult represents the result of a simulated block. +type SimulateBlockResult struct { + Number *big.Int `json:"number"` + Hash common.Hash `json:"hash"` + Timestamp uint64 `json:"timestamp"` + GasLimit uint64 `json:"gasLimit"` + GasUsed uint64 `json:"gasUsed"` + FeeRecipient common.Address `json:"miner"` + BaseFeePerGas *big.Int `json:"baseFeePerGas,omitempty"` + Calls []SimulateCallResult `json:"calls"` +} + +type simulateBlockResultMarshaling struct { + Number *hexutil.Big + Timestamp hexutil.Uint64 + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + BaseFeePerGas *hexutil.Big +} + +// SimulateV1 executes transactions on top of a base state. +func (ec *Client) SimulateV1(ctx context.Context, opts SimulateOptions, blockNrOrHash *rpc.BlockNumberOrHash) ([]SimulateBlockResult, error) { + var result []SimulateBlockResult + err := ec.c.CallContext(ctx, &result, "eth_simulateV1", opts, blockNrOrHash) + return result, err +} diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 815bc29de4..302ccf2e16 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -754,3 +754,250 @@ func ExampleRevertErrorData() { // revert: 08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000a75736572206572726f72 // message: user error } + +func TestSimulateV1(t *testing.T) { + backend, _, err := newTestBackend(nil) + if err != nil { + t.Fatalf("Failed to create test backend: %v", err) + } + defer backend.Close() + + client := ethclient.NewClient(backend.Attach()) + defer client.Close() + + ctx := context.Background() + + // Get current base fee + header, err := client.HeaderByNumber(ctx, nil) + if err != nil { + t.Fatalf("Failed to get header: %v", err) + } + + // Simple test: transfer ETH from one account to another + from := testAddr + to := common.HexToAddress("0x0000000000000000000000000000000000000001") + value := big.NewInt(100) + gas := uint64(100000) + maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2)) + + opts := ethclient.SimulateOptions{ + BlockStateCalls: []ethclient.SimulateBlock{ + { + Calls: []ethereum.CallMsg{ + { + From: from, + To: &to, + Value: value, + Gas: gas, + GasFeeCap: maxFeePerGas, + }, + }, + }, + }, + Validation: true, + } + + results, err := client.SimulateV1(ctx, opts, nil) + if err != nil { + t.Fatalf("SimulateV1 failed: %v", err) + } + + if len(results) != 1 { + t.Fatalf("expected 1 block result, got %d", len(results)) + } + + if len(results[0].Calls) != 1 { + t.Fatalf("expected 1 call result, got %d", len(results[0].Calls)) + } + + // Check that the transaction succeeded + if results[0].Calls[0].Status != 1 { + t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status) + } + + if results[0].Calls[0].Error != nil { + t.Errorf("expected no error, got %v", results[0].Calls[0].Error) + } +} + +func TestSimulateV1WithBlockOverrides(t *testing.T) { + backend, _, err := newTestBackend(nil) + if err != nil { + t.Fatalf("Failed to create test backend: %v", err) + } + defer backend.Close() + + client := ethclient.NewClient(backend.Attach()) + defer client.Close() + + ctx := context.Background() + + // Get current base fee + header, err := client.HeaderByNumber(ctx, nil) + if err != nil { + t.Fatalf("Failed to get header: %v", err) + } + + from := testAddr + to := common.HexToAddress("0x0000000000000000000000000000000000000001") + value := big.NewInt(100) + gas := uint64(100000) + maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2)) + + // Override timestamp only + timestamp := uint64(1234567890) + + opts := ethclient.SimulateOptions{ + BlockStateCalls: []ethclient.SimulateBlock{ + { + BlockOverrides: ðereum.BlockOverrides{ + Time: timestamp, + }, + Calls: []ethereum.CallMsg{ + { + From: from, + To: &to, + Value: value, + Gas: gas, + GasFeeCap: maxFeePerGas, + }, + }, + }, + }, + Validation: true, + } + + results, err := client.SimulateV1(ctx, opts, nil) + if err != nil { + t.Fatalf("SimulateV1 with block overrides failed: %v", err) + } + + if len(results) != 1 { + t.Fatalf("expected 1 block result, got %d", len(results)) + } + + // Verify the timestamp was overridden + if results[0].Timestamp != timestamp { + t.Errorf("expected timestamp %d, got %d", timestamp, results[0].Timestamp) + } +} + +func TestSimulateV1WithStateOverrides(t *testing.T) { + backend, _, err := newTestBackend(nil) + if err != nil { + t.Fatalf("Failed to create test backend: %v", err) + } + defer backend.Close() + + client := ethclient.NewClient(backend.Attach()) + defer client.Close() + + ctx := context.Background() + + // Get current base fee + header, err := client.HeaderByNumber(ctx, nil) + if err != nil { + t.Fatalf("Failed to get header: %v", err) + } + + from := testAddr + to := common.HexToAddress("0x0000000000000000000000000000000000000001") + value := big.NewInt(1000000000000000000) // 1 ETH + gas := uint64(100000) + maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2)) + + // Override the balance of the 'from' address + balanceStr := "1000000000000000000000" + balance := new(big.Int) + balance.SetString(balanceStr, 10) + + stateOverrides := map[common.Address]ethereum.OverrideAccount{ + from: { + Balance: balance, + }, + } + + opts := ethclient.SimulateOptions{ + BlockStateCalls: []ethclient.SimulateBlock{ + { + StateOverrides: stateOverrides, + Calls: []ethereum.CallMsg{ + { + From: from, + To: &to, + Value: value, + Gas: gas, + GasFeeCap: maxFeePerGas, + }, + }, + }, + }, + Validation: true, + } + + results, err := client.SimulateV1(ctx, opts, nil) + if err != nil { + t.Fatalf("SimulateV1 with state overrides failed: %v", err) + } + + if len(results) != 1 { + t.Fatalf("expected 1 block result, got %d", len(results)) + } + + if results[0].Calls[0].Status != 1 { + t.Errorf("expected status 1 (success), got %d", results[0].Calls[0].Status) + } +} + +func TestSimulateV1WithBlockNumberOrHash(t *testing.T) { + backend, _, err := newTestBackend(nil) + if err != nil { + t.Fatalf("Failed to create test backend: %v", err) + } + defer backend.Close() + + client := ethclient.NewClient(backend.Attach()) + defer client.Close() + + ctx := context.Background() + + // Get current base fee + header, err := client.HeaderByNumber(ctx, nil) + if err != nil { + t.Fatalf("Failed to get header: %v", err) + } + + from := testAddr + to := common.HexToAddress("0x0000000000000000000000000000000000000001") + value := big.NewInt(100) + gas := uint64(100000) + maxFeePerGas := new(big.Int).Mul(header.BaseFee, big.NewInt(2)) + + opts := ethclient.SimulateOptions{ + BlockStateCalls: []ethclient.SimulateBlock{ + { + Calls: []ethereum.CallMsg{ + { + From: from, + To: &to, + Value: value, + Gas: gas, + GasFeeCap: maxFeePerGas, + }, + }, + }, + }, + Validation: true, + } + + // Simulate on the latest block + latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + results, err := client.SimulateV1(ctx, opts, &latest) + if err != nil { + t.Fatalf("SimulateV1 with latest block failed: %v", err) + } + + if len(results) != 1 { + t.Fatalf("expected 1 block result, got %d", len(results)) + } +} diff --git a/ethclient/gen_simulate_block_result.go b/ethclient/gen_simulate_block_result.go new file mode 100644 index 0000000000..b8cd6ebf2f --- /dev/null +++ b/ethclient/gen_simulate_block_result.go @@ -0,0 +1,80 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package ethclient + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*simulateBlockResultMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s SimulateBlockResult) MarshalJSON() ([]byte, error) { + type SimulateBlockResult struct { + Number *hexutil.Big `json:"number"` + Hash common.Hash `json:"hash"` + Timestamp hexutil.Uint64 `json:"timestamp"` + GasLimit hexutil.Uint64 `json:"gasLimit"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + FeeRecipient common.Address `json:"miner"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"` + Calls []SimulateCallResult `json:"calls"` + } + var enc SimulateBlockResult + enc.Number = (*hexutil.Big)(s.Number) + enc.Hash = s.Hash + enc.Timestamp = hexutil.Uint64(s.Timestamp) + enc.GasLimit = hexutil.Uint64(s.GasLimit) + enc.GasUsed = hexutil.Uint64(s.GasUsed) + enc.FeeRecipient = s.FeeRecipient + enc.BaseFeePerGas = (*hexutil.Big)(s.BaseFeePerGas) + enc.Calls = s.Calls + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *SimulateBlockResult) UnmarshalJSON(input []byte) error { + type SimulateBlockResult struct { + Number *hexutil.Big `json:"number"` + Hash *common.Hash `json:"hash"` + Timestamp *hexutil.Uint64 `json:"timestamp"` + GasLimit *hexutil.Uint64 `json:"gasLimit"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + FeeRecipient *common.Address `json:"miner"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"` + Calls []SimulateCallResult `json:"calls"` + } + var dec SimulateBlockResult + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Number != nil { + s.Number = (*big.Int)(dec.Number) + } + if dec.Hash != nil { + s.Hash = *dec.Hash + } + if dec.Timestamp != nil { + s.Timestamp = uint64(*dec.Timestamp) + } + if dec.GasLimit != nil { + s.GasLimit = uint64(*dec.GasLimit) + } + if dec.GasUsed != nil { + s.GasUsed = uint64(*dec.GasUsed) + } + if dec.FeeRecipient != nil { + s.FeeRecipient = *dec.FeeRecipient + } + if dec.BaseFeePerGas != nil { + s.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas) + } + if dec.Calls != nil { + s.Calls = dec.Calls + } + return nil +} diff --git a/ethclient/gen_simulate_call_result.go b/ethclient/gen_simulate_call_result.go new file mode 100644 index 0000000000..55e14cd697 --- /dev/null +++ b/ethclient/gen_simulate_call_result.go @@ -0,0 +1,61 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package ethclient + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +var _ = (*simulateCallResultMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s SimulateCallResult) MarshalJSON() ([]byte, error) { + type SimulateCallResult struct { + ReturnValue hexutil.Bytes `json:"returnData"` + Logs []*types.Log `json:"logs"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + Status hexutil.Uint64 `json:"status"` + Error *CallError `json:"error,omitempty"` + } + var enc SimulateCallResult + enc.ReturnValue = s.ReturnValue + enc.Logs = s.Logs + enc.GasUsed = hexutil.Uint64(s.GasUsed) + enc.Status = hexutil.Uint64(s.Status) + enc.Error = s.Error + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *SimulateCallResult) UnmarshalJSON(input []byte) error { + type SimulateCallResult struct { + ReturnValue *hexutil.Bytes `json:"returnData"` + Logs []*types.Log `json:"logs"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + Status *hexutil.Uint64 `json:"status"` + Error *CallError `json:"error,omitempty"` + } + var dec SimulateCallResult + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.ReturnValue != nil { + s.ReturnValue = *dec.ReturnValue + } + if dec.Logs != nil { + s.Logs = dec.Logs + } + if dec.GasUsed != nil { + s.GasUsed = uint64(*dec.GasUsed) + } + if dec.Status != nil { + s.Status = uint64(*dec.Status) + } + if dec.Error != nil { + s.Error = dec.Error + } + return nil +} diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go index 54997cbf51..6a0f5eb312 100644 --- a/ethclient/gethclient/gethclient.go +++ b/ethclient/gethclient/gethclient.go @@ -19,7 +19,6 @@ package gethclient import ( "context" - "encoding/json" "fmt" "math/big" "runtime" @@ -280,97 +279,8 @@ func toCallArg(msg ethereum.CallMsg) interface{} { return arg } -// OverrideAccount specifies the state of an account to be overridden. -type OverrideAccount struct { - // Nonce sets nonce of the account. Note: the nonce override will only - // be applied when it is set to a non-zero value. - Nonce uint64 +// OverrideAccount is an alias for ethereum.OverrideAccount. +type OverrideAccount = ethereum.OverrideAccount - // Code sets the contract code. The override will be applied - // when the code is non-nil, i.e. setting empty code is possible - // using an empty slice. - Code []byte - - // Balance sets the account balance. - Balance *big.Int - - // State sets the complete storage. The override will be applied - // when the given map is non-nil. Using an empty map wipes the - // entire contract storage during the call. - State map[common.Hash]common.Hash - - // StateDiff allows overriding individual storage slots. - StateDiff map[common.Hash]common.Hash -} - -func (a OverrideAccount) MarshalJSON() ([]byte, error) { - type acc struct { - Nonce hexutil.Uint64 `json:"nonce,omitempty"` - Code string `json:"code,omitempty"` - Balance *hexutil.Big `json:"balance,omitempty"` - State interface{} `json:"state,omitempty"` - StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"` - } - - output := acc{ - Nonce: hexutil.Uint64(a.Nonce), - Balance: (*hexutil.Big)(a.Balance), - StateDiff: a.StateDiff, - } - if a.Code != nil { - output.Code = hexutil.Encode(a.Code) - } - if a.State != nil { - output.State = a.State - } - return json.Marshal(output) -} - -// BlockOverrides specifies the set of header fields to override. -type BlockOverrides struct { - // Number overrides the block number. - Number *big.Int - // Difficulty overrides the block difficulty. - Difficulty *big.Int - // Time overrides the block timestamp. Time is applied only when - // it is non-zero. - Time uint64 - // GasLimit overrides the block gas limit. GasLimit is applied only when - // it is non-zero. - GasLimit uint64 - // Coinbase overrides the block coinbase. Coinbase is applied only when - // it is different from the zero address. - Coinbase common.Address - // Random overrides the block extra data which feeds into the RANDOM opcode. - // Random is applied only when it is a non-zero hash. - Random common.Hash - // BaseFee overrides the block base fee. - BaseFee *big.Int -} - -func (o BlockOverrides) MarshalJSON() ([]byte, error) { - type override struct { - Number *hexutil.Big `json:"number,omitempty"` - Difficulty *hexutil.Big `json:"difficulty,omitempty"` - Time hexutil.Uint64 `json:"time,omitempty"` - GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"` - Coinbase *common.Address `json:"feeRecipient,omitempty"` - Random *common.Hash `json:"prevRandao,omitempty"` - BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"` - } - - output := override{ - Number: (*hexutil.Big)(o.Number), - Difficulty: (*hexutil.Big)(o.Difficulty), - Time: hexutil.Uint64(o.Time), - GasLimit: hexutil.Uint64(o.GasLimit), - BaseFee: (*hexutil.Big)(o.BaseFee), - } - if o.Coinbase != (common.Address{}) { - output.Coinbase = &o.Coinbase - } - if o.Random != (common.Hash{}) { - output.Random = &o.Random - } - return json.Marshal(output) -} +// BlockOverrides is an alias for ethereum.BlockOverrides. +type BlockOverrides = ethereum.BlockOverrides diff --git a/interfaces.go b/interfaces.go index 2828af1cc9..21d42c6d34 100644 --- a/interfaces.go +++ b/interfaces.go @@ -19,10 +19,12 @@ package ethereum import ( "context" + "encoding/json" "errors" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" ) @@ -293,3 +295,98 @@ type BlockNumberReader interface { type ChainIDReader interface { ChainID(ctx context.Context) (*big.Int, error) } + +// OverrideAccount specifies the state of an account to be overridden. +type OverrideAccount struct { + // Nonce sets nonce of the account. Note: the nonce override will only + // be applied when it is set to a non-zero value. + Nonce uint64 + + // Code sets the contract code. The override will be applied + // when the code is non-nil, i.e. setting empty code is possible + // using an empty slice. + Code []byte + + // Balance sets the account balance. + Balance *big.Int + + // State sets the complete storage. The override will be applied + // when the given map is non-nil. Using an empty map wipes the + // entire contract storage during the call. + State map[common.Hash]common.Hash + + // StateDiff allows overriding individual storage slots. + StateDiff map[common.Hash]common.Hash +} + +func (a OverrideAccount) MarshalJSON() ([]byte, error) { + type acc struct { + Nonce hexutil.Uint64 `json:"nonce,omitempty"` + Code string `json:"code,omitempty"` + Balance *hexutil.Big `json:"balance,omitempty"` + State interface{} `json:"state,omitempty"` + StateDiff map[common.Hash]common.Hash `json:"stateDiff,omitempty"` + } + + output := acc{ + Nonce: hexutil.Uint64(a.Nonce), + Balance: (*hexutil.Big)(a.Balance), + StateDiff: a.StateDiff, + } + if a.Code != nil { + output.Code = hexutil.Encode(a.Code) + } + if a.State != nil { + output.State = a.State + } + return json.Marshal(output) +} + +// BlockOverrides specifies the set of header fields to override. +type BlockOverrides struct { + // Number overrides the block number. + Number *big.Int + // Difficulty overrides the block difficulty. + Difficulty *big.Int + // Time overrides the block timestamp. Time is applied only when + // it is non-zero. + Time uint64 + // GasLimit overrides the block gas limit. GasLimit is applied only when + // it is non-zero. + GasLimit uint64 + // Coinbase overrides the block coinbase. Coinbase is applied only when + // it is different from the zero address. + Coinbase common.Address + // Random overrides the block extra data which feeds into the RANDOM opcode. + // Random is applied only when it is a non-zero hash. + Random common.Hash + // BaseFee overrides the block base fee. + BaseFee *big.Int +} + +func (o BlockOverrides) MarshalJSON() ([]byte, error) { + type override struct { + Number *hexutil.Big `json:"number,omitempty"` + Difficulty *hexutil.Big `json:"difficulty,omitempty"` + Time hexutil.Uint64 `json:"time,omitempty"` + GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"` + Coinbase *common.Address `json:"feeRecipient,omitempty"` + Random *common.Hash `json:"prevRandao,omitempty"` + BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"` + } + + output := override{ + Number: (*hexutil.Big)(o.Number), + Difficulty: (*hexutil.Big)(o.Difficulty), + Time: hexutil.Uint64(o.Time), + GasLimit: hexutil.Uint64(o.GasLimit), + BaseFee: (*hexutil.Big)(o.BaseFee), + } + if o.Coinbase != (common.Address{}) { + output.Coinbase = &o.Coinbase + } + if o.Random != (common.Hash{}) { + output.Random = &o.Random + } + return json.Marshal(output) +} From ff54ca02de232ae770a31cd25d0dc2ddfd08dc9f Mon Sep 17 00:00:00 2001 From: aodhgan <36907214+aodhgan@users.noreply.github.com> Date: Thu, 16 Oct 2025 08:34:47 -0700 Subject: [PATCH 050/277] internal/ethapi: add eth_SendRawTransactionSync (#32830) New RPC method eth_sendRawTransactionSync(rawTx, timeoutMs?) that submits a signed tx and blocks until a receipt is available or a timeout elapses. Two CLI flags to tune server-side limits: --rpc.txsync.defaulttimeout (default wait window) --rpc.txsync.maxtimeout (upper bound; requests are clamped) closes https://github.com/ethereum/go-ethereum/issues/32094 --------- Co-authored-by: aodhgan Co-authored-by: Sina Mahmoodi --- cmd/geth/main.go | 2 + cmd/utils/flags.go | 18 ++++ eth/api_backend.go | 8 ++ eth/ethconfig/config.go | 48 +++++----- ethclient/ethclient.go | 34 +++++++ internal/ethapi/api.go | 87 ++++++++++++++++++ internal/ethapi/api_test.go | 175 ++++++++++++++++++++++++++++++++++-- internal/ethapi/backend.go | 2 + internal/ethapi/errors.go | 11 +++ 9 files changed, 359 insertions(+), 26 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2465b52ad1..cc294b2f30 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -188,6 +188,8 @@ var ( utils.AllowUnprotectedTxs, utils.BatchRequestLimit, utils.BatchResponseMaxSize, + utils.RPCTxSyncDefaultTimeoutFlag, + utils.RPCTxSyncMaxTimeoutFlag, } metricsFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c9da08578c..0c5db9e6d8 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -615,6 +615,18 @@ var ( Value: ethconfig.Defaults.LogQueryLimit, Category: flags.APICategory, } + RPCTxSyncDefaultTimeoutFlag = &cli.DurationFlag{ + Name: "rpc.txsync.defaulttimeout", + Usage: "Default timeout for eth_sendRawTransactionSync (e.g. 2s, 500ms)", + Value: ethconfig.Defaults.TxSyncDefaultTimeout, + Category: flags.APICategory, + } + RPCTxSyncMaxTimeoutFlag = &cli.DurationFlag{ + Name: "rpc.txsync.maxtimeout", + Usage: "Maximum allowed timeout for eth_sendRawTransactionSync (e.g. 5m)", + Value: ethconfig.Defaults.TxSyncMaxTimeout, + Category: flags.APICategory, + } // Authenticated RPC HTTP settings AuthListenFlag = &cli.StringFlag{ Name: "authrpc.addr", @@ -1717,6 +1729,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(RPCGlobalLogQueryLimit.Name) { cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name) } + if ctx.IsSet(RPCTxSyncDefaultTimeoutFlag.Name) { + cfg.TxSyncDefaultTimeout = ctx.Duration(RPCTxSyncDefaultTimeoutFlag.Name) + } + if ctx.IsSet(RPCTxSyncMaxTimeoutFlag.Name) { + cfg.TxSyncMaxTimeout = ctx.Duration(RPCTxSyncMaxTimeoutFlag.Name) + } if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 { // If snap-sync is requested, this flag is also required if cfg.SyncMode == ethconfig.SnapSync { diff --git a/eth/api_backend.go b/eth/api_backend.go index 3ae73e78af..766a99fc1e 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -486,3 +486,11 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*types.Transaction, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } + +func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration { + return b.eth.config.TxSyncDefaultTimeout +} + +func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration { + return b.eth.config.TxSyncMaxTimeout +} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 6020387bcd..c4a0956b3b 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -49,27 +49,29 @@ var FullNodeGPO = gasprice.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ - HistoryMode: history.KeepAll, - SyncMode: SnapSync, - NetworkId: 0, // enable auto configuration of networkID == chainID - TxLookupLimit: 2350000, - TransactionHistory: 2350000, - LogHistory: 2350000, - StateHistory: params.FullImmutabilityThreshold, - DatabaseCache: 512, - TrieCleanCache: 154, - TrieDirtyCache: 256, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 102, - FilterLogCacheSize: 32, - LogQueryLimit: 1000, - Miner: miner.DefaultConfig, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether + HistoryMode: history.KeepAll, + SyncMode: SnapSync, + NetworkId: 0, // enable auto configuration of networkID == chainID + TxLookupLimit: 2350000, + TransactionHistory: 2350000, + LogHistory: 2350000, + StateHistory: params.FullImmutabilityThreshold, + DatabaseCache: 512, + TrieCleanCache: 154, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 102, + FilterLogCacheSize: 32, + LogQueryLimit: 1000, + Miner: miner.DefaultConfig, + TxPool: legacypool.DefaultConfig, + BlobPool: blobpool.DefaultConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether + TxSyncDefaultTimeout: 20 * time.Second, + TxSyncMaxTimeout: 1 * time.Minute, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -183,6 +185,10 @@ type Config struct { // OverrideVerkle (TODO: remove after the fork) OverrideVerkle *uint64 `toml:",omitempty"` + + // EIP-7966: eth_sendRawTransactionSync timeouts + TxSyncDefaultTimeout time.Duration `toml:",omitempty"` + TxSyncMaxTimeout time.Duration `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 0b676fccfb..5008378da6 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "math/big" + "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -705,6 +706,39 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er return ec.c.CallContext(ctx, nil, "eth_sendRawTransaction", hexutil.Encode(data)) } +// SendTransactionSync submits a signed tx and waits for a receipt (or until +// the optional timeout elapses on the server side). If timeout == 0, the server +// uses its default. +func (ec *Client) SendTransactionSync( + ctx context.Context, + tx *types.Transaction, + timeout *time.Duration, +) (*types.Receipt, error) { + raw, err := tx.MarshalBinary() + if err != nil { + return nil, err + } + return ec.SendRawTransactionSync(ctx, raw, timeout) +} + +func (ec *Client) SendRawTransactionSync( + ctx context.Context, + rawTx []byte, + timeout *time.Duration, +) (*types.Receipt, error) { + var ms *hexutil.Uint64 + if timeout != nil { + if d := hexutil.Uint64(timeout.Milliseconds()); d > 0 { + ms = &d + } + } + var receipt types.Receipt + if err := ec.c.CallContext(ctx, &receipt, "eth_sendRawTransactionSync", hexutil.Bytes(rawTx), ms); err != nil { + return nil, err + } + return &receipt, nil +} + // RevertErrorData returns the 'revert reason' data of a contract call. // // This can be used with CallContract and EstimateGas, and only when the server is Geth. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index c10a4754af..eb7a34474c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -55,6 +55,7 @@ import ( const estimateGasErrorRatio = 0.015 var errBlobTxNotSupported = errors.New("signing blob transactions not supported") +var errSubClosed = errors.New("chain subscription closed") // EthereumAPI provides an API to access Ethereum related information. type EthereumAPI struct { @@ -1666,6 +1667,92 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil return SubmitTransaction(ctx, api.b, tx) } +// SendRawTransactionSync will add the signed transaction to the transaction pool +// and wait until the transaction has been included in a block and return the receipt, or the timeout. +func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hexutil.Bytes, timeoutMs *hexutil.Uint64) (map[string]interface{}, error) { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(input); err != nil { + return nil, err + } + + ch := make(chan core.ChainEvent, 128) + sub := api.b.SubscribeChainEvent(ch) + subErrCh := sub.Err() + defer sub.Unsubscribe() + + hash, err := SubmitTransaction(ctx, api.b, tx) + if err != nil { + return nil, err + } + + maxTimeout := api.b.RPCTxSyncMaxTimeout() + defaultTimeout := api.b.RPCTxSyncDefaultTimeout() + + timeout := defaultTimeout + if timeoutMs != nil && *timeoutMs > 0 { + req := time.Duration(*timeoutMs) * time.Millisecond + if req > maxTimeout { + timeout = maxTimeout + } else { + timeout = req + } + } + + receiptCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // Fast path. + if r, err := api.GetTransactionReceipt(receiptCtx, hash); err == nil && r != nil { + return r, nil + } + + for { + select { + case <-receiptCtx.Done(): + // If server-side wait window elapsed, return the structured timeout. + if errors.Is(receiptCtx.Err(), context.DeadlineExceeded) { + return nil, &txSyncTimeoutError{ + msg: fmt.Sprintf("The transaction was added to the transaction pool but wasn't processed in %v.", timeout), + hash: hash, + } + } + return nil, receiptCtx.Err() + + case err, ok := <-subErrCh: + if !ok { + return nil, errSubClosed + } + return nil, err + + case ev, ok := <-ch: + if !ok { + return nil, errSubClosed + } + rs := ev.Receipts + txs := ev.Transactions + if len(rs) == 0 || len(rs) != len(txs) { + continue + } + for i := range rs { + if rs[i].TxHash == hash { + if rs[i].BlockNumber != nil && rs[i].BlockHash != (common.Hash{}) { + signer := types.LatestSigner(api.b.ChainConfig()) + return MarshalReceipt( + rs[i], + rs[i].BlockHash, + rs[i].BlockNumber.Uint64(), + signer, + txs[i], + int(rs[i].TransactionIndex), + ), nil + } + return api.GetTransactionReceipt(receiptCtx, hash) + } + } + } + } +} + // Sign calculates an ECDSA signature for: // keccak256("\x19Ethereum Signed Message:\n" + len(message) + message). // diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index d3278c04e7..aaa002b5ec 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -440,6 +440,19 @@ type testBackend struct { pending *types.Block pendingReceipts types.Receipts + + chainFeed *event.Feed + autoMine bool + + sentTx *types.Transaction + sentTxHash common.Hash + + syncDefaultTimeout time.Duration + syncMaxTimeout time.Duration +} + +func fakeBlockHash(txh common.Hash) common.Hash { + return crypto.Keccak256Hash([]byte("testblock"), txh.Bytes()) } func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend { @@ -466,6 +479,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E acc: acc, pending: blocks[n], pendingReceipts: receipts[n], + chainFeed: new(event.Feed), } return backend } @@ -587,19 +601,64 @@ func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *t return vm.NewEVM(context, state, b.chain.Config(), *vmConfig) } func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { - panic("implement me") + return b.chainFeed.Subscribe(ch) } func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { panic("implement me") } -func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - panic("implement me") +func (b *testBackend) SendTx(ctx context.Context, tx *types.Transaction) error { + b.sentTx = tx + b.sentTxHash = tx.Hash() + + if b.autoMine { + // Synthesize a "mined" receipt at head+1 + num := b.chain.CurrentHeader().Number.Uint64() + 1 + receipt := &types.Receipt{ + TxHash: tx.Hash(), + Status: types.ReceiptStatusSuccessful, + BlockHash: fakeBlockHash(tx.Hash()), + BlockNumber: new(big.Int).SetUint64(num), + TransactionIndex: 0, + CumulativeGasUsed: 21000, + GasUsed: 21000, + } + // Broadcast a ChainEvent that includes the receipts and txs + b.chainFeed.Send(core.ChainEvent{ + Header: &types.Header{ + Number: new(big.Int).SetUint64(num), + }, + Receipts: types.Receipts{receipt}, + Transactions: types.Transactions{tx}, + }) + } + return nil } -func (b testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { +func (b *testBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + // Treat the auto-mined tx as canonically placed at head+1. + if b.autoMine && txHash == b.sentTxHash { + num := b.chain.CurrentHeader().Number.Uint64() + 1 + return true, b.sentTx, fakeBlockHash(txHash), num, 0 + } tx, blockHash, blockNumber, index := rawdb.ReadCanonicalTransaction(b.db, txHash) return tx != nil, tx, blockHash, blockNumber, index } -func (b testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) { +func (b *testBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) { + if b.autoMine && tx != nil && tx.Hash() == b.sentTxHash && + blockHash == fakeBlockHash(tx.Hash()) && + blockIndex == 0 && + blockNumber == b.chain.CurrentHeader().Number.Uint64()+1 { + return &types.Receipt{ + Type: tx.Type(), + Status: types.ReceiptStatusSuccessful, + CumulativeGasUsed: 21000, + GasUsed: 21000, + EffectiveGasPrice: big.NewInt(1), + BlockHash: blockHash, + BlockNumber: new(big.Int).SetUint64(blockNumber), + TransactionIndex: 0, + TxHash: tx.Hash(), + }, nil + } return b.chain.GetCanonicalReceipt(tx, blockHash, blockNumber, blockIndex) } func (b testBackend) TxIndexDone() bool { @@ -3889,3 +3948,109 @@ func (b configTimeBackend) HeaderByNumber(_ context.Context, n rpc.BlockNumber) func (b configTimeBackend) CurrentHeader() *types.Header { return &types.Header{Time: b.time} } + +func (b *testBackend) RPCTxSyncDefaultTimeout() time.Duration { + if b.syncDefaultTimeout != 0 { + return b.syncDefaultTimeout + } + return 2 * time.Second +} +func (b *testBackend) RPCTxSyncMaxTimeout() time.Duration { + if b.syncMaxTimeout != 0 { + return b.syncMaxTimeout + } + return 5 * time.Minute +} +func (b *backendMock) RPCTxSyncDefaultTimeout() time.Duration { return 2 * time.Second } +func (b *backendMock) RPCTxSyncMaxTimeout() time.Duration { return 5 * time.Minute } + +func makeSignedRaw(t *testing.T, api *TransactionAPI, from, to common.Address, value *big.Int) (hexutil.Bytes, *types.Transaction) { + t.Helper() + + fillRes, err := api.FillTransaction(context.Background(), TransactionArgs{ + From: &from, + To: &to, + Value: (*hexutil.Big)(value), + }) + if err != nil { + t.Fatalf("FillTransaction failed: %v", err) + } + signRes, err := api.SignTransaction(context.Background(), argsFromTransaction(fillRes.Tx, from)) + if err != nil { + t.Fatalf("SignTransaction failed: %v", err) + } + return signRes.Raw, signRes.Tx +} + +// makeSelfSignedRaw is a convenience for a 0-ETH self-transfer. +func makeSelfSignedRaw(t *testing.T, api *TransactionAPI, addr common.Address) (hexutil.Bytes, *types.Transaction) { + return makeSignedRaw(t, api, addr, addr, big.NewInt(0)) +} + +func TestSendRawTransactionSync_Success(t *testing.T) { + t.Parallel() + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: types.GenesisAlloc{}, + } + b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil) + b.autoMine = true // immediately “mines” the tx in-memory + + api := NewTransactionAPI(b, new(AddrLocker)) + + raw, _ := makeSelfSignedRaw(t, api, b.acc.Address) + + receipt, err := api.SendRawTransactionSync(context.Background(), raw, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if receipt == nil { + t.Fatalf("expected non-nil receipt") + } + if _, ok := receipt["blockNumber"]; !ok { + t.Fatalf("expected blockNumber in receipt, got %#v", receipt) + } +} + +func TestSendRawTransactionSync_Timeout(t *testing.T) { + t.Parallel() + + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: types.GenesisAlloc{}, + } + b := newTestBackend(t, 0, genesis, ethash.NewFaker(), nil) + b.autoMine = false // don't mine, should time out + + api := NewTransactionAPI(b, new(AddrLocker)) + + raw, _ := makeSelfSignedRaw(t, api, b.acc.Address) + + timeout := hexutil.Uint64(200) // 200ms + receipt, err := api.SendRawTransactionSync(context.Background(), raw, &timeout) + + if receipt != nil { + t.Fatalf("expected nil receipt, got %#v", receipt) + } + if err == nil { + t.Fatalf("expected timeout error, got nil") + } + // assert error shape & data (hash) + var de interface { + ErrorCode() int + ErrorData() interface{} + } + if !errors.As(err, &de) { + t.Fatalf("expected data error with code/data, got %T %v", err, err) + } + if de.ErrorCode() != errCodeTxSyncTimeout { + t.Fatalf("expected code %d, got %d", errCodeTxSyncTimeout, de.ErrorCode()) + } + tx := new(types.Transaction) + if e := tx.UnmarshalBinary(raw); e != nil { + t.Fatal(e) + } + if got, want := de.ErrorData(), tx.Hash().Hex(); got != want { + t.Fatalf("expected ErrorData=%s, got %v", want, got) + } +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index f709a1fcdc..af3d592b82 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -53,6 +53,8 @@ type Backend interface { RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs UnprotectedAllowed() bool // allows only for EIP155 transactions. + RPCTxSyncDefaultTimeout() time.Duration + RPCTxSyncMaxTimeout() time.Duration // Blockchain API SetHead(number uint64) diff --git a/internal/ethapi/errors.go b/internal/ethapi/errors.go index 154938fa0e..30711a0167 100644 --- a/internal/ethapi/errors.go +++ b/internal/ethapi/errors.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/vm" @@ -33,6 +34,11 @@ type revertError struct { reason string // revert reason hex encoded } +type txSyncTimeoutError struct { + msg string + hash common.Hash +} + // ErrorCode returns the JSON error code for a revert. // See: https://ethereum.org/en/developers/docs/apis/json-rpc/#error-codes func (e *revertError) ErrorCode() int { @@ -108,6 +114,7 @@ const ( errCodeInvalidParams = -32602 errCodeReverted = -32000 errCodeVMError = -32015 + errCodeTxSyncTimeout = 4 ) func txValidationError(err error) *invalidTxError { @@ -168,3 +175,7 @@ type blockGasLimitReachedError struct{ message string } func (e *blockGasLimitReachedError) Error() string { return e.message } func (e *blockGasLimitReachedError) ErrorCode() int { return errCodeBlockGasLimitReached } + +func (e *txSyncTimeoutError) Error() string { return e.msg } +func (e *txSyncTimeoutError) ErrorCode() int { return errCodeTxSyncTimeout } +func (e *txSyncTimeoutError) ErrorData() interface{} { return e.hash.Hex() } From b373d797d88e05ff318416c1ee80e87aadb59a13 Mon Sep 17 00:00:00 2001 From: Youssef Azzaoui Date: Thu, 16 Oct 2025 14:19:44 -0300 Subject: [PATCH 051/277] core/state: state copy bugfixes with Verkle Trees (#31696) This change addresses critical issues in the state object duplication process specific to Verkle trie implementations. Without these modifications, updates to state objects fail to propagate correctly through the trie structure after a statedb copy operation, leading to inaccuracies in the computation of the state root hash. --------- Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> --- core/state/database.go | 2 ++ core/state/state_object.go | 15 ++++++++++++++- trie/transition.go | 3 ++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index 3a0ac422ee..58d0ccfe82 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -302,6 +302,8 @@ func mustCopyTrie(t Trie) Trie { return t.Copy() case *trie.VerkleTrie: return t.Copy() + case *trie.TransitionTrie: + return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 2938750503..fdeb4254c1 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" ) @@ -494,8 +495,20 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { selfDestructed: s.selfDestructed, newContract: s.newContract, } - if s.trie != nil { + + switch s.trie.(type) { + case *trie.VerkleTrie: + // Verkle uses only one tree, and the copy has already been + // made in mustCopyTrie. + obj.trie = db.trie + case *trie.TransitionTrie: + // Same thing for the transition tree, since the MPT is + // read-only. + obj.trie = db.trie + case *trie.StateTrie: obj.trie = mustCopyTrie(s.trie) + case nil: + // do nothing } return obj } diff --git a/trie/transition.go b/trie/transition.go index da49c6cdc2..c6eecd3937 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -211,7 +211,8 @@ func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { func (t *TransitionTrie) Copy() *TransitionTrie { return &TransitionTrie{ overlay: t.overlay.Copy(), - base: t.base.Copy(), + // base in immutable, so there is no need to copy it + base: t.base, storage: t.storage, } } From 0a2c21acd59767cb07c950ad86c67ee8b6db49ab Mon Sep 17 00:00:00 2001 From: ucwong Date: Fri, 17 Oct 2025 03:35:44 +0100 Subject: [PATCH 052/277] eth/ethconfig : fix eth generate config (#32929) --- eth/ethconfig/gen_config.go | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 6f6e541368..6f18dc34c5 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -58,10 +58,12 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCGasCap uint64 RPCEVMTimeout time.Duration RPCTxFeeCap float64 - OverrideOsaka *uint64 `toml:",omitempty"` - OverrideBPO1 *uint64 `toml:",omitempty"` - OverrideBPO2 *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverrideOsaka *uint64 `toml:",omitempty"` + OverrideBPO1 *uint64 `toml:",omitempty"` + OverrideBPO2 *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` + TxSyncDefaultTimeout time.Duration `toml:",omitempty"` + TxSyncMaxTimeout time.Duration `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -109,6 +111,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.OverrideBPO1 = c.OverrideBPO1 enc.OverrideBPO2 = c.OverrideBPO2 enc.OverrideVerkle = c.OverrideVerkle + enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout + enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout return &enc, nil } @@ -156,10 +160,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCGasCap *uint64 RPCEVMTimeout *time.Duration RPCTxFeeCap *float64 - OverrideOsaka *uint64 `toml:",omitempty"` - OverrideBPO1 *uint64 `toml:",omitempty"` - OverrideBPO2 *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverrideOsaka *uint64 `toml:",omitempty"` + OverrideBPO1 *uint64 `toml:",omitempty"` + OverrideBPO2 *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` + TxSyncDefaultTimeout *time.Duration `toml:",omitempty"` + TxSyncMaxTimeout *time.Duration `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -300,5 +306,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideVerkle != nil { c.OverrideVerkle = dec.OverrideVerkle } + if dec.TxSyncDefaultTimeout != nil { + c.TxSyncDefaultTimeout = *dec.TxSyncDefaultTimeout + } + if dec.TxSyncMaxTimeout != nil { + c.TxSyncMaxTimeout = *dec.TxSyncMaxTimeout + } return nil } From 342285b13972da269160eec6239c76aa5a97aa35 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 17 Oct 2025 13:34:35 +0800 Subject: [PATCH 053/277] eth, internal: add blob conversion for SendRawTransactionSync (#32930) --- internal/ethapi/api.go | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index eb7a34474c..d7cf47468c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1675,9 +1675,20 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex return nil, err } + // Convert legacy blob transaction proofs. + // TODO: remove in go-ethereum v1.17.x + if sc := tx.BlobTxSidecar(); sc != nil { + exp := api.currentBlobSidecarVersion() + if sc.Version == types.BlobSidecarVersion0 && exp == types.BlobSidecarVersion1 { + if err := sc.ToV1(); err != nil { + return nil, fmt.Errorf("blob sidecar conversion failed: %v", err) + } + tx = tx.WithBlobTxSidecar(sc) + } + } + ch := make(chan core.ChainEvent, 128) sub := api.b.SubscribeChainEvent(ch) - subErrCh := sub.Err() defer sub.Unsubscribe() hash, err := SubmitTransaction(ctx, api.b, tx) @@ -1685,10 +1696,11 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex return nil, err } - maxTimeout := api.b.RPCTxSyncMaxTimeout() - defaultTimeout := api.b.RPCTxSyncDefaultTimeout() - - timeout := defaultTimeout + var ( + maxTimeout = api.b.RPCTxSyncMaxTimeout() + defaultTimeout = api.b.RPCTxSyncDefaultTimeout() + timeout = defaultTimeout + ) if timeoutMs != nil && *timeoutMs > 0 { req := time.Duration(*timeoutMs) * time.Millisecond if req > maxTimeout { @@ -1697,7 +1709,6 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex timeout = req } } - receiptCtx, cancel := context.WithTimeout(ctx, timeout) defer cancel() @@ -1706,19 +1717,20 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex return r, nil } + // Monitor the receipts for { select { case <-receiptCtx.Done(): // If server-side wait window elapsed, return the structured timeout. if errors.Is(receiptCtx.Err(), context.DeadlineExceeded) { return nil, &txSyncTimeoutError{ - msg: fmt.Sprintf("The transaction was added to the transaction pool but wasn't processed in %v.", timeout), + msg: fmt.Sprintf("The transaction was added to the transaction pool but wasn't processed in %v", timeout), hash: hash, } } return nil, receiptCtx.Err() - case err, ok := <-subErrCh: + case err, ok := <-sub.Err(): if !ok { return nil, errSubClosed } @@ -1728,8 +1740,7 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex if !ok { return nil, errSubClosed } - rs := ev.Receipts - txs := ev.Transactions + rs, txs := ev.Receipts, ev.Transactions if len(rs) == 0 || len(rs) != len(txs) { continue } From 0ec63272bf6107d0eb5d256a2f35071543693579 Mon Sep 17 00:00:00 2001 From: CertiK <138698582+CertiK-Geth@users.noreply.github.com> Date: Sat, 18 Oct 2025 19:54:56 +0800 Subject: [PATCH 054/277] cmd/utils: use maximum uint64 value for receipt chain insertion (#32934) --- cmd/utils/cmd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index db7bd691d8..3e337a3d00 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "math" "math/big" "os" "os/signal" @@ -311,7 +312,7 @@ func ImportHistory(chain *core.BlockChain, dir string, network string) error { return fmt.Errorf("error reading receipts %d: %w", it.Number(), err) } encReceipts := types.EncodeBlockReceiptLists([]types.Receipts{receipts}) - if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, 2^64-1); err != nil { + if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, math.MaxUint64); err != nil { return fmt.Errorf("error inserting body %d: %w", it.Number(), err) } imported += 1 From a9e66262aff1d44511d585c570daffc9304616c6 Mon Sep 17 00:00:00 2001 From: Bosul Mun Date: Mon, 20 Oct 2025 11:10:58 +0900 Subject: [PATCH 055/277] eth/fetcher: add metrics for tracking slow peers (#32964) This PR introduces two new metrics to monitor slow peers - One tracks the number of slow peers. - The other measures the time it takes for those peers to become "unfrozen" These metrics help with monitoring and evaluating the need for future optimization of the transaction fetcher and peer management, for example i n peer scoring and prioritization. Additionally, this PR moves the fetcher metrics into a separate file, `eth/fetcher/metrics.go`. --- eth/fetcher/metrics.go | 59 +++++++++++++++++++++++++++++++++++++++ eth/fetcher/tx_fetcher.go | 39 ++++++-------------------- 2 files changed, 68 insertions(+), 30 deletions(-) create mode 100644 eth/fetcher/metrics.go diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go new file mode 100644 index 0000000000..fd1678dd30 --- /dev/null +++ b/eth/fetcher/metrics.go @@ -0,0 +1,59 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see Date: Mon, 20 Oct 2025 11:26:55 +0900 Subject: [PATCH 056/277] eth/fetcher: remove dangling peers from alternates (#32947) This PR removes dangling peers in `alternates` map In the current code, a dropped peer is removed from alternates for only the specific transaction hash it was requesting. If that peer is listed as an alternate for other transaction hashes, those entries still stick around in alternates/announced even though that peer already got dropped. --- eth/fetcher/tx_fetcher.go | 6 +++- eth/fetcher/tx_fetcher_test.go | 50 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 5ba72e6b01..d919ac8a5f 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -795,6 +795,10 @@ func (f *TxFetcher) loop() { if len(f.announced[hash]) == 0 { delete(f.announced, hash) } + delete(f.alternates[hash], drop.peer) + if len(f.alternates[hash]) == 0 { + delete(f.alternates, hash) + } } delete(f.announces, drop.peer) } @@ -858,7 +862,7 @@ func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) { // This method is a bit "flaky" "by design". In theory the timeout timer only ever // should be rescheduled if some request is pending. In practice, a timeout will // cause the timer to be rescheduled every 5 secs (until the peer comes through or -// disconnects). This is a limitation of the fetcher code because we don't trac +// disconnects). This is a limitation of the fetcher code because we don't track // pending requests and timed out requests separately. Without double tracking, if // we simply didn't reschedule the timer on all-timeout then the timer would never // be set again since len(request) > 0 => something's running. diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 0f05a1c995..bb41f62932 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -1858,6 +1858,56 @@ func TestBlobTransactionAnnounce(t *testing.T) { }) } +func TestTransactionFetcherDropAlternates(t *testing.T) { + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(common.Hash) bool { return false }, + func(txs []*types.Transaction) []error { + return make([]error, len(txs)) + }, + func(string, []common.Hash) error { return nil }, + nil, + ) + }, + steps: []interface{}{ + doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, + doWait{time: txArriveTimeout, step: true}, + doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, + + isScheduled{ + tracking: map[string][]announce{ + "A": { + {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, + }, + "B": { + {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {testTxsHashes[0]}, + }, + }, + doDrop("B"), + + isScheduled{ + tracking: map[string][]announce{ + "A": { + {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {testTxsHashes[0]}, + }, + }, + doDrop("A"), + isScheduled{ + tracking: nil, fetching: nil, + }, + }, + }) +} + func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { t.Parallel() testTransactionFetcher(t, tt) From 11c0fb98af8ba14deb6abe77b357cbe927ba05ba Mon Sep 17 00:00:00 2001 From: hero5512 Date: Sun, 19 Oct 2025 22:29:46 -0400 Subject: [PATCH 057/277] triedb/pathdb: fix index out of range panic in decodeSingle (#32937) Fixes TestCorruptedKeySection flaky test failure. https://github.com/ethereum/go-ethereum/actions/runs/18600235182/job/53037084761?pr=32920 --- triedb/pathdb/history_trienode.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index f5eb590a9a..2f31238612 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -370,11 +370,15 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st for keyOff < keyLimit { // Validate the key and value offsets within the single trie data chunk if items%trienodeDataBlockRestartLen == 0 { - if keyOff != int(keyOffsets[items/trienodeDataBlockRestartLen]) { - return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[items/trienodeDataBlockRestartLen], keyOff) + restartIndex := items / trienodeDataBlockRestartLen + if restartIndex >= len(keyOffsets) { + return nil, fmt.Errorf("restart index out of range: %d, available restarts: %d", restartIndex, len(keyOffsets)) } - if valOff != int(valOffsets[items/trienodeDataBlockRestartLen]) { - return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[items/trienodeDataBlockRestartLen], valOff) + if keyOff != int(keyOffsets[restartIndex]) { + return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[restartIndex], keyOff) + } + if valOff != int(valOffsets[restartIndex]) { + return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[restartIndex], valOff) } } // Resolve the entry from key section From 69df6bb8d59027f617c6bf0a24f7af17c06cae39 Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 20 Oct 2025 10:35:14 +0800 Subject: [PATCH 058/277] core/types: prealloc map in HashDifference as in TxDifference (#32946) --- core/types/transaction.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index be8e90364e..e98563b85f 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -648,7 +648,7 @@ func TxDifference(a, b Transactions) Transactions { func HashDifference(a, b []common.Hash) []common.Hash { keep := make([]common.Hash, 0, len(a)) - remove := make(map[common.Hash]struct{}) + remove := make(map[common.Hash]struct{}, len(b)) for _, hash := range b { remove[hash] = struct{}{} } From cfb311148c5227e5dbab750aaa37f1abcbfd3beb Mon Sep 17 00:00:00 2001 From: maskpp Date: Mon, 20 Oct 2025 16:18:17 +0800 Subject: [PATCH 059/277] eth/filters: avoid rebuild the hash map multi times (#32965) --- eth/filters/filter.go | 22 ++-------------------- eth/filters/filter_system.go | 12 ++++++++---- 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 02399bc801..422e5cd67b 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -563,7 +563,7 @@ type ReceiptWithTx struct { // In addition to returning receipts, it also returns the corresponding transactions. // This is because receipts only contain low-level data, while user-facing data // may require additional information from the Transaction. -func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx { +func filterReceipts(txHashes map[common.Hash]bool, ev core.ChainEvent) []*ReceiptWithTx { var ret []*ReceiptWithTx receipts := ev.Receipts @@ -583,27 +583,9 @@ func filterReceipts(txHashes []common.Hash, ev core.ChainEvent) []*ReceiptWithTx Transaction: txs[i], } } - } else if len(txHashes) == 1 { - // Filter by single transaction hash. - // This is a common case, so we distinguish it from filtering by multiple tx hashes and made a small optimization. - for i, receipt := range receipts { - if receipt.TxHash == txHashes[0] { - ret = append(ret, &ReceiptWithTx{ - Receipt: receipt, - Transaction: txs[i], - }) - break - } - } } else { - // Filter by multiple transaction hashes. - txHashMap := make(map[common.Hash]bool, len(txHashes)) - for _, hash := range txHashes { - txHashMap[hash] = true - } - for i, receipt := range receipts { - if txHashMap[receipt.TxHash] { + if txHashes[receipt.TxHash] { ret = append(ret, &ReceiptWithTx{ Receipt: receipt, Transaction: txs[i], diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 02783fa5ec..f10e6a277b 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -185,9 +185,9 @@ type subscription struct { txs chan []*types.Transaction headers chan *types.Header receipts chan []*ReceiptWithTx - txHashes []common.Hash // contains transaction hashes for transactionReceipts subscription filtering - installed chan struct{} // closed when the filter is installed - err chan error // closed when the filter is uninstalled + txHashes map[common.Hash]bool // contains transaction hashes for transactionReceipts subscription filtering + installed chan struct{} // closed when the filter is installed + err chan error // closed when the filter is uninstalled } // EventSystem creates subscriptions, processes events and broadcasts them to the @@ -403,6 +403,10 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc // transactions when they are included in blocks. If txHashes is provided, only receipts // for those specific transaction hashes will be delivered. func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription { + hashSet := make(map[common.Hash]bool) + for _, h := range txHashes { + hashSet[h] = true + } sub := &subscription{ id: rpc.NewID(), typ: TransactionReceiptsSubscription, @@ -411,7 +415,7 @@ func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, rece txs: make(chan []*types.Transaction), headers: make(chan *types.Header), receipts: receipts, - txHashes: txHashes, + txHashes: hashSet, installed: make(chan struct{}), err: make(chan error), } From b6a4ac99610328410881a9ad57b4c02361b0056c Mon Sep 17 00:00:00 2001 From: jwasinger Date: Mon, 20 Oct 2025 17:51:29 +0800 Subject: [PATCH 060/277] core/vm: don't call SetCode after contract creation if initcode didn't return anything (#32916) The code change is a noop here, and the tracing hook shouldn't be invoked if the account code doesn't actually change. --- core/vm/evm.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index 88ef1cf121..8975c791c8 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -601,7 +601,9 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]b } } - evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation) + if len(ret) > 0 { + evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation) + } return ret, nil } From b1809d13d14ee60f35dfdfec710f5baffaec0b98 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 20 Oct 2025 11:52:02 +0200 Subject: [PATCH 061/277] cmd/keeper: use the ziren keccak precompile (#32816) Uses the go module's `replace` directive to delegate keccak computation to precompiles. This is still in draft because it needs more testing. Also, it relies on a PR that I created, that hasn't been merged yet. _Note that this PR doesn't implement the stateful keccak state structure, and it reverts to the current behavior. This is a bit silly since this is what is used in the tree root computation. The runtime doesn't currently export the sponge. I will see if I can fix that in a further PR, but it is going to take more time. In the meantime, this is a useful first step_ --- cmd/keeper/getpayload_ziren.go | 2 +- cmd/keeper/go.mod | 7 ++-- cmd/keeper/go.sum | 4 +-- crypto/crypto.go | 48 ------------------------- crypto/keccak.go | 63 +++++++++++++++++++++++++++++++++ crypto/keccak_ziren.go | 64 ++++++++++++++++++++++++++++++++++ go.mod | 1 + go.sum | 2 ++ 8 files changed, 135 insertions(+), 56 deletions(-) create mode 100644 crypto/keccak.go create mode 100644 crypto/keccak_ziren.go diff --git a/cmd/keeper/getpayload_ziren.go b/cmd/keeper/getpayload_ziren.go index 11c5845bcc..bc373db94f 100644 --- a/cmd/keeper/getpayload_ziren.go +++ b/cmd/keeper/getpayload_ziren.go @@ -19,7 +19,7 @@ package main import ( - zkruntime "github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime" + zkruntime "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime" ) // getInput reads the input payload from the zkVM runtime environment. diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 16094d16b1..2b12297a7a 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -3,8 +3,8 @@ module github.com/ethereum/go-ethereum/cmd/keeper go 1.24.0 require ( + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 github.com/ethereum/go-ethereum v0.0.0-00010101000000-000000000000 - github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 ) require ( @@ -43,7 +43,4 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect ) -replace ( - github.com/ethereum/go-ethereum => ../../ - github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime => github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 -) +replace github.com/ethereum/go-ethereum => ../../ diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 3eaef469dc..b280a368d0 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -1,5 +1,7 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= @@ -117,8 +119,6 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 h1:MxKlbmI7Dta6O6Nsc9OAer/rOltjoL11CVLMqCiYnxU= -github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5/go.mod h1:zk/SUgiiVz2U1ufZ+yM2MHPbD93W25KH5zK3qAxXbT4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= diff --git a/crypto/crypto.go b/crypto/crypto.go index 09596c05ce..db6b6ee071 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -28,12 +28,10 @@ import ( "io" "math/big" "os" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // SignatureLength indicates the byte length required to carry a signature with recovery id. @@ -69,17 +67,6 @@ type KeccakState interface { Read([]byte) (int, error) } -// NewKeccakState creates a new KeccakState -func NewKeccakState() KeccakState { - return sha3.NewLegacyKeccak256().(KeccakState) -} - -var hasherPool = sync.Pool{ - New: func() any { - return sha3.NewLegacyKeccak256().(KeccakState) - }, -} - // HashData hashes the provided data using the KeccakState and returns a 32 byte hash func HashData(kh KeccakState, data []byte) (h common.Hash) { kh.Reset() @@ -88,41 +75,6 @@ func HashData(kh KeccakState, data []byte) (h common.Hash) { return h } -// Keccak256 calculates and returns the Keccak256 hash of the input data. -func Keccak256(data ...[]byte) []byte { - b := make([]byte, 32) - d := hasherPool.Get().(KeccakState) - d.Reset() - for _, b := range data { - d.Write(b) - } - d.Read(b) - hasherPool.Put(d) - return b -} - -// Keccak256Hash calculates and returns the Keccak256 hash of the input data, -// converting it to an internal Hash data structure. -func Keccak256Hash(data ...[]byte) (h common.Hash) { - d := hasherPool.Get().(KeccakState) - d.Reset() - for _, b := range data { - d.Write(b) - } - d.Read(h[:]) - hasherPool.Put(d) - return h -} - -// Keccak512 calculates and returns the Keccak512 hash of the input data. -func Keccak512(data ...[]byte) []byte { - d := sha3.NewLegacyKeccak512() - for _, b := range data { - d.Write(b) - } - return d.Sum(nil) -} - // CreateAddress creates an ethereum address given the bytes and the nonce func CreateAddress(b common.Address, nonce uint64) common.Address { data, _ := rlp.EncodeToBytes([]interface{}{b, nonce}) diff --git a/crypto/keccak.go b/crypto/keccak.go new file mode 100644 index 0000000000..0ad79a63c1 --- /dev/null +++ b/crypto/keccak.go @@ -0,0 +1,63 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !ziren + +package crypto + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/crypto/sha3" +) + +// NewKeccakState creates a new KeccakState +func NewKeccakState() KeccakState { + return sha3.NewLegacyKeccak256().(KeccakState) +} + +var hasherPool = sync.Pool{ + New: func() any { + return sha3.NewLegacyKeccak256().(KeccakState) + }, +} + +// Keccak256 calculates and returns the Keccak256 hash of the input data. +func Keccak256(data ...[]byte) []byte { + b := make([]byte, 32) + d := hasherPool.Get().(KeccakState) + d.Reset() + for _, b := range data { + d.Write(b) + } + d.Read(b) + hasherPool.Put(d) + return b +} + +// Keccak256Hash calculates and returns the Keccak256 hash of the input data, +// converting it to an internal Hash data structure. +func Keccak256Hash(data ...[]byte) (h common.Hash) { + d := hasherPool.Get().(KeccakState) + d.Reset() + for _, b := range data { + d.Write(b) + } + d.Read(h[:]) + hasherPool.Put(d) + return h +} diff --git a/crypto/keccak_ziren.go b/crypto/keccak_ziren.go new file mode 100644 index 0000000000..033c0ec42c --- /dev/null +++ b/crypto/keccak_ziren.go @@ -0,0 +1,64 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build ziren + +package crypto + +import ( + "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime" + "github.com/ethereum/go-ethereum/common" + "golang.org/x/crypto/sha3" +) + +// NewKeccakState creates a new KeccakState +// For now, we fallback to the original implementation for the stateful interface. +// TODO: Implement a stateful wrapper around zkvm_runtime.Keccak256 if needed. +func NewKeccakState() KeccakState { + return sha3.NewLegacyKeccak256().(KeccakState) +} + +// Keccak256 calculates and returns the Keccak256 hash using the Ziren zkvm_runtime implementation. +func Keccak256(data ...[]byte) []byte { + // For multiple data chunks, concatenate them + if len(data) == 0 { + result := zkvm_runtime.Keccak256(nil) + return result[:] + } + if len(data) == 1 { + result := zkvm_runtime.Keccak256(data[0]) + return result[:] + } + + // Concatenate multiple data chunks + var totalLen int + for _, d := range data { + totalLen += len(d) + } + + combined := make([]byte, 0, totalLen) + for _, d := range data { + combined = append(combined, d...) + } + + result := zkvm_runtime.Keccak256(combined) + return result[:] +} + +// Keccak256Hash calculates and returns the Keccak256 hash as a Hash using the Ziren zkvm_runtime implementation. +func Keccak256Hash(data ...[]byte) common.Hash { + return common.Hash(Keccak256(data...)) +} diff --git a/go.mod b/go.mod index c91cc81d21..ae5e4cc114 100644 --- a/go.mod +++ b/go.mod @@ -81,6 +81,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/DataDog/zstd v1.4.5 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 github.com/StackExchange/wmi v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect diff --git a/go.sum b/go.sum index 779bcde846..8122f4b548 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= From b81f03e8ff375b7c8c444c6c4b2b7fce22bd0ac1 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 20 Oct 2025 17:24:07 +0200 Subject: [PATCH 062/277] params: enable osaka on dev mode (#32917) enables the osaka fork on dev mode --------- Co-authored-by: Sina Mahmoodi --- core/genesis.go | 35 ++++++++++++++++++----------------- params/config.go | 2 ++ 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 2fd044c70a..d0d490874d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -669,23 +669,24 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis { BaseFee: big.NewInt(params.InitialBaseFee), Difficulty: big.NewInt(0), Alloc: map[common.Address]types.Account{ - common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover - common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256 - common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD - common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity - common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp - common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd - common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul - common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing - common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b - common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval - common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add - common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp - common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add - common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp - common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing - common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1 - common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2 + common.BytesToAddress([]byte{0x01}): {Balance: big.NewInt(1)}, // ECRecover + common.BytesToAddress([]byte{0x02}): {Balance: big.NewInt(1)}, // SHA256 + common.BytesToAddress([]byte{0x03}): {Balance: big.NewInt(1)}, // RIPEMD + common.BytesToAddress([]byte{0x04}): {Balance: big.NewInt(1)}, // Identity + common.BytesToAddress([]byte{0x05}): {Balance: big.NewInt(1)}, // ModExp + common.BytesToAddress([]byte{0x06}): {Balance: big.NewInt(1)}, // ECAdd + common.BytesToAddress([]byte{0x07}): {Balance: big.NewInt(1)}, // ECScalarMul + common.BytesToAddress([]byte{0x08}): {Balance: big.NewInt(1)}, // ECPairing + common.BytesToAddress([]byte{0x09}): {Balance: big.NewInt(1)}, // BLAKE2b + common.BytesToAddress([]byte{0x0a}): {Balance: big.NewInt(1)}, // KZGPointEval + common.BytesToAddress([]byte{0x0b}): {Balance: big.NewInt(1)}, // BLSG1Add + common.BytesToAddress([]byte{0x0c}): {Balance: big.NewInt(1)}, // BLSG1MultiExp + common.BytesToAddress([]byte{0x0d}): {Balance: big.NewInt(1)}, // BLSG2Add + common.BytesToAddress([]byte{0x0e}): {Balance: big.NewInt(1)}, // BLSG2MultiExp + common.BytesToAddress([]byte{0x0f}): {Balance: big.NewInt(1)}, // BLSG1Pairing + common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1 + common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2 + common.BytesToAddress([]byte{0x1, 00}): {Balance: big.NewInt(1)}, // P256Verify // Pre-deploy system contracts params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, diff --git a/params/config.go b/params/config.go index e796d75535..06288575ae 100644 --- a/params/config.go +++ b/params/config.go @@ -225,9 +225,11 @@ var ( CancunTime: newUint64(0), TerminalTotalDifficulty: big.NewInt(0), PragueTime: newUint64(0), + OsakaTime: newUint64(0), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, }, } From d73bfeb3d91753f6692092650770a8ed79f8e270 Mon Sep 17 00:00:00 2001 From: Kyrin Date: Tue, 21 Oct 2025 15:41:38 +0800 Subject: [PATCH 063/277] core/txpool: Initialize journal writer for tx tracker (#32921) Previously, the journal writer is nil until the first time rejournal (default 1h), which means during this period, txs submitted to this node are not written into journal file (transactions.rlp). If this node is shutdown before the first time rejournal, then txs in pending or queue will get lost. Here, this PR initializes the journal writer soon after launch to solve this issue. --------- Co-authored-by: Gary Rong --- core/txpool/locals/journal.go | 20 +++++++++- core/txpool/locals/tx_tracker.go | 38 +++++++++++------- core/txpool/locals/tx_tracker_test.go | 55 ++++++++++++++++++++++++--- 3 files changed, 92 insertions(+), 21 deletions(-) diff --git a/core/txpool/locals/journal.go b/core/txpool/locals/journal.go index 46fd6de346..cd2be8a794 100644 --- a/core/txpool/locals/journal.go +++ b/core/txpool/locals/journal.go @@ -117,6 +117,25 @@ func (journal *journal) load(add func([]*types.Transaction) []error) error { return failure } +func (journal *journal) setupWriter() error { + if journal.writer != nil { + if err := journal.writer.Close(); err != nil { + return err + } + journal.writer = nil + } + + // Re-open the journal file for appending + // Use O_APPEND to ensure we always write to the end of the file + sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return err + } + journal.writer = sink + + return nil +} + // insert adds the specified transaction to the local disk journal. func (journal *journal) insert(tx *types.Transaction) error { if journal.writer == nil { @@ -177,7 +196,6 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error // close flushes the transaction journal contents to disk and closes the file. func (journal *journal) close() error { var err error - if journal.writer != nil { err = journal.writer.Close() journal.writer = nil diff --git a/core/txpool/locals/tx_tracker.go b/core/txpool/locals/tx_tracker.go index e08384ce71..bb178f175e 100644 --- a/core/txpool/locals/tx_tracker.go +++ b/core/txpool/locals/tx_tracker.go @@ -114,13 +114,14 @@ func (tracker *TxTracker) TrackAll(txs []*types.Transaction) { } // recheck checks and returns any transactions that needs to be resubmitted. -func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transaction, rejournal map[common.Address]types.Transactions) { +func (tracker *TxTracker) recheck(journalCheck bool) []*types.Transaction { tracker.mu.Lock() defer tracker.mu.Unlock() var ( numStales = 0 numOk = 0 + resubmits []*types.Transaction ) for sender, txs := range tracker.byAddr { // Wipe the stales @@ -141,7 +142,7 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac } if journalCheck { // rejournal - rejournal = make(map[common.Address]types.Transactions) + rejournal := make(map[common.Address]types.Transactions) for _, tx := range tracker.all { addr, _ := types.Sender(tracker.signer, tx) rejournal[addr] = append(rejournal[addr], tx) @@ -153,10 +154,18 @@ func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transac return int(a.Nonce() - b.Nonce()) }) } + // Rejournal the tracker while holding the lock. No new transactions will + // be added to the old journal during this period, preventing any potential + // transaction loss. + if tracker.journal != nil { + if err := tracker.journal.rotate(rejournal); err != nil { + log.Warn("Transaction journal rotation failed", "err", err) + } + } } localGauge.Update(int64(len(tracker.all))) log.Debug("Tx tracker status", "need-resubmit", len(resubmits), "stale", numStales, "ok", numOk) - return resubmits, rejournal + return resubmits } // Start implements node.Lifecycle interface @@ -185,6 +194,12 @@ func (tracker *TxTracker) loop() { tracker.TrackAll(transactions) return nil }) + + // Setup the writer for the upcoming transactions + if err := tracker.journal.setupWriter(); err != nil { + log.Error("Failed to setup the journal writer", "err", err) + return + } defer tracker.journal.close() } var ( @@ -196,20 +211,15 @@ func (tracker *TxTracker) loop() { case <-tracker.shutdownCh: return case <-timer.C: - checkJournal := tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal - resubmits, rejournal := tracker.recheck(checkJournal) + var rejournal bool + if tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal { + rejournal, lastJournal = true, time.Now() + log.Debug("Rejournal the transaction tracker") + } + resubmits := tracker.recheck(rejournal) if len(resubmits) > 0 { tracker.pool.Add(resubmits, false) } - if checkJournal { - // Lock to prevent journal.rotate <-> journal.insert (via TrackAll) conflicts - tracker.mu.Lock() - lastJournal = time.Now() - if err := tracker.journal.rotate(rejournal); err != nil { - log.Warn("Transaction journal rotation failed", "err", err) - } - tracker.mu.Unlock() - } timer.Reset(recheckInterval) } } diff --git a/core/txpool/locals/tx_tracker_test.go b/core/txpool/locals/tx_tracker_test.go index 367fb6b6da..dde8754605 100644 --- a/core/txpool/locals/tx_tracker_test.go +++ b/core/txpool/locals/tx_tracker_test.go @@ -17,7 +17,11 @@ package locals import ( + "fmt" + "maps" "math/big" + "math/rand" + "path/filepath" "testing" "time" @@ -146,20 +150,59 @@ func TestResubmit(t *testing.T) { txsA := txs[:len(txs)/2] txsB := txs[len(txs)/2:] env.pool.Add(txsA, true) + pending, queued := env.pool.ContentFrom(address) if len(pending) != len(txsA) || len(queued) != 0 { t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued)) } env.tracker.TrackAll(txs) - resubmit, all := env.tracker.recheck(true) + resubmit := env.tracker.recheck(true) if len(resubmit) != len(txsB) { t.Fatalf("Unexpected transactions to resubmit, got: %d, want: %d", len(resubmit), len(txsB)) } - if len(all) == 0 || len(all[address]) == 0 { - t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", 0, len(txs)) - } - if len(all[address]) != len(txs) { - t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(all[address]), len(txs)) + env.tracker.mu.Lock() + allCopy := maps.Clone(env.tracker.all) + env.tracker.mu.Unlock() + + if len(allCopy) != len(txs) { + t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs)) + } +} + +func TestJournal(t *testing.T) { + journalPath := filepath.Join(t.TempDir(), fmt.Sprintf("%d", rand.Int63())) + env := newTestEnv(t, 10, 0, journalPath) + defer env.close() + + env.tracker.Start() + defer env.tracker.Stop() + + txs := env.makeTxs(10) + txsA := txs[:len(txs)/2] + txsB := txs[len(txs)/2:] + env.pool.Add(txsA, true) + + pending, queued := env.pool.ContentFrom(address) + if len(pending) != len(txsA) || len(queued) != 0 { + t.Fatalf("Unexpected txpool content: %d, %d", len(pending), len(queued)) + } + env.tracker.TrackAll(txsA) + env.tracker.TrackAll(txsB) + env.tracker.recheck(true) // manually rejournal the tracker + + // Make sure all the transactions are properly journalled + trackerB := New(journalPath, time.Minute, gspec.Config, env.pool) + trackerB.journal.load(func(transactions []*types.Transaction) []error { + trackerB.TrackAll(transactions) + return nil + }) + + trackerB.mu.Lock() + allCopy := maps.Clone(trackerB.all) + trackerB.mu.Unlock() + + if len(allCopy) != len(txs) { + t.Fatalf("Unexpected transactions being tracked, got: %d, want: %d", len(allCopy), len(txs)) } } From 79b6a56d3a3a5e831b8ca49b819ce487aebe3a22 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 21 Oct 2025 16:03:56 +0800 Subject: [PATCH 064/277] core/state: prevent SetCode hook if contract code is not changed (#32980) This PR prevents the SetCode hook from being called when the contract code remains unchanged. This situation can occur in the following cases: - The deployed runtime code has zero length - An EIP-7702 authorization attempt tries to unset a non-delegated account - An EIP-7702 authorization attempt tries to delegate to the same account --- core/state/statedb_hooked.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index d2595bcefe..9db201fc2b 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -191,17 +191,18 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte { prev := s.inner.SetCode(address, code, reason) + if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil { - prevHash := types.EmptyCodeHash - if len(prev) != 0 { - prevHash = crypto.Keccak256Hash(prev) - } + prevHash := crypto.Keccak256Hash(prev) codeHash := crypto.Keccak256Hash(code) - if s.hooks.OnCodeChangeV2 != nil { - s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason) - } else if s.hooks.OnCodeChange != nil { - s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code) + // Invoke the hooks only if the contract code is changed + if prevHash != codeHash { + if s.hooks.OnCodeChangeV2 != nil { + s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason) + } else if s.hooks.OnCodeChange != nil { + s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code) + } } } return prev From 0a8b8207251862a552904913c727b4e0c1701252 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 21 Oct 2025 19:11:36 +0800 Subject: [PATCH 065/277] triedb/pathdb: make batch with pre-allocated size (#32914) In this PR, the database batch for writing the history index data is pre-allocated. It's observed that database batch repeatedly grows the size of the mega-batch, causing significant memory allocation pressure. This approach can effectively mitigate the overhead. --- triedb/pathdb/history_index_block.go | 8 ++++---- triedb/pathdb/history_indexer.go | 23 ++++++++++++++++++++++- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go index 7648b99226..5abdee682a 100644 --- a/triedb/pathdb/history_index_block.go +++ b/triedb/pathdb/history_index_block.go @@ -25,10 +25,10 @@ import ( ) const ( - indexBlockDescSize = 14 // The size of index block descriptor - indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block - indexBlockRestartLen = 256 // The restart interval length of index block - historyIndexBatch = 1_000_000 // The number of state history indexes for constructing or deleting as batch + indexBlockDescSize = 14 // The size of index block descriptor + indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block + indexBlockRestartLen = 256 // The restart interval length of index block + historyIndexBatch = 512 * 1024 // The number of state history indexes for constructing or deleting as batch ) // indexBlockDesc represents a descriptor for an index block, which contains a diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index 368ff78d41..893ccd6523 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -40,6 +40,11 @@ const ( stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version + + // estimations for calculating the batch size for atomic database commit + estimatedStateHistoryIndexSize = 3 // The average size of each state history index entry is approximately 2–3 bytes + estimatedTrienodeHistoryIndexSize = 3 // The average size of each trienode history index entry is approximately 2-3 bytes + estimatedIndexBatchSizeFactor = 32 // The factor counts for the write amplification for each entry ) // indexVersion returns the latest index version for the given history type. @@ -150,6 +155,22 @@ func (b *batchIndexer) process(h history, id uint64) error { return b.finish(false) } +// makeBatch constructs a database batch based on the number of pending entries. +// The batch size is roughly estimated to minimize repeated resizing rounds, +// as accurately predicting the exact size is technically challenging. +func (b *batchIndexer) makeBatch() ethdb.Batch { + var size int + switch b.typ { + case typeStateHistory: + size = estimatedStateHistoryIndexSize + case typeTrienodeHistory: + size = estimatedTrienodeHistoryIndexSize + default: + panic(fmt.Sprintf("unknown history type %d", b.typ)) + } + return b.db.NewBatchWithSize(size * estimatedIndexBatchSizeFactor * b.pending) +} + // finish writes the accumulated state indexes into the disk if either the // memory limitation is reached or it's requested forcibly. func (b *batchIndexer) finish(force bool) error { @@ -160,7 +181,7 @@ func (b *batchIndexer) finish(force bool) error { return nil } var ( - batch = b.db.NewBatch() + batch = b.makeBatch() batchMu sync.RWMutex start = time.Now() eg errgroup.Group From 407d9faf713fa8dfd93282502de89ce93ea66bbe Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 21 Oct 2025 08:10:45 -0600 Subject: [PATCH 066/277] cmd/geth: add flag to set genesis (#32844) This PR is an alternative to #32556. Instead of trying to be smart and reuse `geth init`, we can introduce a new flag `--genesis` that loads the `genesis.json` from file into the `Genesis` object in the same path that the other network flags currently work in. Question: is something like `--genesis` enough to start deprecating `geth init`? -- ```console $ geth --datadir data --hoodi .. INFO [10-06|22:37:11.202] - BPO2: @1762955544 .. $ geth --datadir data --genesis genesis.json .. INFO [10-06|22:37:27.988] - BPO2: @1862955544 .. ``` Pull the genesis [from the specs](https://raw.githubusercontent.com/eth-clients/hoodi/refs/heads/main/metadata/genesis.json) and modify one of the BPO timestamps to simulate a shadow fork. --------- Co-authored-by: rjl493456442 --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index cc294b2f30..109b36836a 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -66,6 +66,7 @@ var ( utils.OverrideBPO1, utils.OverrideBPO2, utils.OverrideVerkle, + utils.OverrideGenesisFlag, utils.EnablePersonal, // deprecated utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0c5db9e6d8..2a3d6af062 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -262,6 +262,11 @@ var ( Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverrideGenesisFlag = &cli.StringFlag{ + Name: "override.genesis", + Usage: "Load genesis block and configuration from file at this path", + Category: flags.EthCategory, + } SyncModeFlag = &cli.StringFlag{ Name: "syncmode", Usage: `Blockchain sync mode ("snap" or "full")`, @@ -1605,7 +1610,7 @@ func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags, don't allow network id override on preset networks - flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, NetworkIdFlag) + flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, NetworkIdFlag, OverrideGenesisFlag) flags.CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer // Set configurations from CLI flags @@ -1891,6 +1896,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if !ctx.IsSet(MinerGasPriceFlag.Name) { cfg.Miner.GasPrice = big.NewInt(1) } + case ctx.String(OverrideGenesisFlag.Name) != "": + f, err := os.Open(ctx.String(OverrideGenesisFlag.Name)) + if err != nil { + Fatalf("Failed to read genesis file: %v", err) + } + defer f.Close() + + genesis := new(core.Genesis) + if err := json.NewDecoder(f).Decode(genesis); err != nil { + Fatalf("Invalid genesis file: %v", err) + } + cfg.Genesis = genesis default: if cfg.NetworkId == 1 { SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) From 6608a2aafd3603afe59f95fa7b2a8ec00b8eaa19 Mon Sep 17 00:00:00 2001 From: cui Date: Tue, 21 Oct 2025 23:49:43 +0800 Subject: [PATCH 067/277] core/types: remove unused `ErrInvalidTxType` var (#32989) The var `ErrInvalidTxType` is never used in the code base. --- core/types/transaction.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index e98563b85f..6af960b8c3 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -34,7 +34,6 @@ import ( var ( ErrInvalidSig = errors.New("invalid transaction v, r, s values") ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures") - ErrInvalidTxType = errors.New("transaction type not valid in this context") ErrTxTypeNotSupported = errors.New("transaction type not supported") ErrGasFeeCapTooLow = errors.New("fee cap less than base fee") ErrUint256Overflow = errors.New("bigint overflow, too large for uint256") From 3b8075234eb0d692ff6ac7eb11e9c204df309b6f Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 22 Oct 2025 22:35:26 +0800 Subject: [PATCH 068/277] core/state: fix the flaky TestSizeTracker (#32993) --- core/state/state_sizer_test.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/core/state/state_sizer_test.go b/core/state/state_sizer_test.go index cab0c38163..65f652e424 100644 --- a/core/state/state_sizer_test.go +++ b/core/state/state_sizer_test.go @@ -94,6 +94,14 @@ func TestSizeTracker(t *testing.T) { } baselineRoot := currentRoot + // Close and reopen the trie database so all async flushes triggered by the + // baseline commits are written before we measure the baseline snapshot. + if err := tdb.Close(); err != nil { + t.Fatalf("Failed to close triedb before baseline measurement: %v", err) + } + tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults}) + sdb = NewDatabase(tdb, nil) + // Wait for snapshot completion for !tdb.SnapshotCompleted() { time.Sleep(100 * time.Millisecond) @@ -215,13 +223,12 @@ func TestSizeTracker(t *testing.T) { if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes { t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes) } - // TODO: failed on github actions, need to investigate - // if actualStats.AccountTrienodes != expectedStats.AccountTrienodes { - // t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes) - // } - // if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes { - // t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes) - // } + if actualStats.AccountTrienodes != expectedStats.AccountTrienodes { + t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes) + } + if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes { + t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes) + } if actualStats.StorageTrienodes != expectedStats.StorageTrienodes { t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes) } From 116c916753c5317cce6d62d18c6fb1a14020e447 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 23 Oct 2025 00:24:40 +0800 Subject: [PATCH 069/277] cmd/devp2p: distinguish the jwt in devp2p and geth (#32972) This PR fixes some docs for the devp2p suite and uses the CLI library's required value instead of manually checking if required flags are passed. --- cmd/devp2p/README.md | 4 ++-- cmd/devp2p/rlpxcmd.go | 12 ------------ cmd/devp2p/runtest.go | 7 +++++-- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md index ad2985b4b0..b20d921dc4 100644 --- a/cmd/devp2p/README.md +++ b/cmd/devp2p/README.md @@ -121,7 +121,7 @@ with our test chain. The chain files are located in `./cmd/devp2p/internal/ethte --nat=none \ --networkid 3503995874084926 \ --verbosity 5 \ - --authrpc.jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365 + --authrpc.jwtsecret jwt.secret Note that the tests also require access to the engine API. The test suite can now be executed using the devp2p tool. @@ -130,7 +130,7 @@ The test suite can now be executed using the devp2p tool. --chain internal/ethtest/testdata \ --node enode://.... \ --engineapi http://127.0.0.1:8551 \ - --jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365 + --jwtsecret $(cat jwt.secret) Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again. diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index 118731fd6c..1dc8f82460 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -143,9 +143,6 @@ type testParams struct { func cliTestParams(ctx *cli.Context) *testParams { nodeStr := ctx.String(testNodeFlag.Name) - if nodeStr == "" { - exit(fmt.Errorf("missing -%s", testNodeFlag.Name)) - } node, err := parseNode(nodeStr) if err != nil { exit(err) @@ -156,14 +153,5 @@ func cliTestParams(ctx *cli.Context) *testParams { jwt: ctx.String(testNodeJWTFlag.Name), chainDir: ctx.String(testChainDirFlag.Name), } - if p.engineAPI == "" { - exit(fmt.Errorf("missing -%s", testNodeEngineFlag.Name)) - } - if p.jwt == "" { - exit(fmt.Errorf("missing -%s", testNodeJWTFlag.Name)) - } - if p.chainDir == "" { - exit(fmt.Errorf("missing -%s", testChainDirFlag.Name)) - } return &p } diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go index 7e3723c641..c40a4b8a01 100644 --- a/cmd/devp2p/runtest.go +++ b/cmd/devp2p/runtest.go @@ -39,26 +39,29 @@ var ( } // for eth/snap tests - testChainDirFlag = &cli.StringFlag{ + testChainDirFlag = &cli.PathFlag{ Name: "chain", Usage: "Test chain directory (required)", Category: flags.TestingCategory, + Required: true, } testNodeFlag = &cli.StringFlag{ Name: "node", Usage: "Peer-to-Peer endpoint (ENR) of the test node (required)", Category: flags.TestingCategory, + Required: true, } testNodeJWTFlag = &cli.StringFlag{ Name: "jwtsecret", Usage: "JWT secret for the engine API of the test node (required)", Category: flags.TestingCategory, - Value: "0x7365637265747365637265747365637265747365637265747365637265747365", + Required: true, } testNodeEngineFlag = &cli.StringFlag{ Name: "engineapi", Usage: "Engine API endpoint of the test node (required)", Category: flags.TestingCategory, + Required: true, } // These two are specific to the discovery tests. From 2bb3d9a330006b156ddf0835d3f00aec52678cc6 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 23 Oct 2025 16:44:54 +0800 Subject: [PATCH 070/277] p2p: silence on listener shutdown (#33001) Co-authored-by: Felix Lange --- p2p/server.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index 1f859089af..ddd4f5d072 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -813,7 +813,9 @@ func (srv *Server) listenLoop() { time.Sleep(time.Millisecond * 200) continue } else if err != nil { - srv.log.Debug("Read error", "err", err) + if !errors.Is(err, net.ErrClosed) { + srv.log.Debug("Read error", "err", err) + } slots <- struct{}{} return } From 030cd2d1555c164a88af831d22c42742eca9b3b1 Mon Sep 17 00:00:00 2001 From: maskpp Date: Thu, 23 Oct 2025 17:56:47 +0800 Subject: [PATCH 071/277] cmd/utils: use IsHexAddress method (#32997) Using the `IsHexAddress` method will result in no gaps in the verification logic, making it simpler. --- cmd/utils/flags.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 2a3d6af062..5a7e40767c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -20,7 +20,6 @@ package utils import ( "context" "crypto/ecdsa" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -1341,15 +1340,10 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) { return } addr := ctx.String(MinerPendingFeeRecipientFlag.Name) - if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") { - addr = addr[2:] - } - b, err := hex.DecodeString(addr) - if err != nil || len(b) != common.AddressLength { + if !common.IsHexAddress(addr) { Fatalf("-%s: invalid pending block producer address %q", MinerPendingFeeRecipientFlag.Name, addr) - return } - cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b) + cfg.Miner.PendingFeeRecipient = common.HexToAddress(addr) } func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { From f1be21501f53f7ac79478739c985fae82a32d8c9 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:02:13 +0200 Subject: [PATCH 072/277] crypto: implement ziren keccak state (#32996) The #32816 was only using the keccak precompile for some minor task. This PR implements a keccak state, which is what is used for hashing the tree. --- crypto/keccak_ziren.go | 66 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 4 deletions(-) diff --git a/crypto/keccak_ziren.go b/crypto/keccak_ziren.go index 033c0ec42c..8e967c6dbf 100644 --- a/crypto/keccak_ziren.go +++ b/crypto/keccak_ziren.go @@ -21,14 +21,72 @@ package crypto import ( "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime" "github.com/ethereum/go-ethereum/common" - "golang.org/x/crypto/sha3" ) +// zirenKeccakState implements the KeccakState interface using the Ziren zkvm_runtime. +// It accumulates data written to it and uses the zkvm's Keccak256 system call for hashing. +type zirenKeccakState struct { + buf []byte // accumulated data + result []byte // cached result + dirty bool // whether new data has been written since last hash +} + +func newZirenKeccakState() KeccakState { + return &zirenKeccakState{ + buf: make([]byte, 0, 512), // pre-allocate reasonable capacity + } +} + +func (s *zirenKeccakState) Write(p []byte) (n int, err error) { + s.buf = append(s.buf, p...) + s.dirty = true + return len(p), nil +} + +func (s *zirenKeccakState) Sum(b []byte) []byte { + s.computeHashIfNeeded() + return append(b, s.result...) +} + +func (s *zirenKeccakState) Reset() { + s.buf = s.buf[:0] + s.result = nil + s.dirty = false +} + +func (s *zirenKeccakState) Size() int { + return 32 +} + +func (s *zirenKeccakState) BlockSize() int { + return 136 // Keccak256 rate +} + +func (s *zirenKeccakState) Read(p []byte) (n int, err error) { + s.computeHashIfNeeded() + + if len(p) == 0 { + return 0, nil + } + + // After computeHashIfNeeded(), s.result is always a 32-byte slice + n = copy(p, s.result) + return n, nil +} + +func (s *zirenKeccakState) computeHashIfNeeded() { + if s.dirty || s.result == nil { + // Use the zkvm_runtime Keccak256 which uses SyscallKeccakSponge + hashArray := zkvm_runtime.Keccak256(s.buf) + s.result = hashArray[:] + s.dirty = false + } +} + // NewKeccakState creates a new KeccakState -// For now, we fallback to the original implementation for the stateful interface. -// TODO: Implement a stateful wrapper around zkvm_runtime.Keccak256 if needed. +// This uses a Ziren-optimized implementation that leverages the zkvm_runtime.Keccak256 system call. func NewKeccakState() KeccakState { - return sha3.NewLegacyKeccak256().(KeccakState) + return newZirenKeccakState() } // Keccak256 calculates and returns the Keccak256 hash using the Ziren zkvm_runtime implementation. From 0413af40f60290cf689b4ecca4e51fef0ec11119 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 23 Oct 2025 20:58:33 +0800 Subject: [PATCH 073/277] rpc: fix a flaky test of the websocket (#33002) Found in https://github.com/ethereum/go-ethereum/actions/runs/17803828253/job/50611300621?pr=32585 ``` --- FAIL: TestClientCancelWebsocket (0.33s) panic: read tcp 127.0.0.1:36048->127.0.0.1:38643: read: connection reset by peer [recovered, repanicked] goroutine 15 [running]: testing.tRunner.func1.2({0x98dd20, 0xc0005b0100}) /opt/actions-runner/_work/_tool/go/1.25.1/x64/src/testing/testing.go:1872 +0x237 testing.tRunner.func1() /opt/actions-runner/_work/_tool/go/1.25.1/x64/src/testing/testing.go:1875 +0x35b panic({0x98dd20?, 0xc0005b0100?}) /opt/actions-runner/_work/_tool/go/1.25.1/x64/src/runtime/panic.go:783 +0x132 github.com/ethereum/go-ethereum/rpc.httpTestClient(0xc0001dc1c0?, {0x9d5e40, 0x2}, 0xc0002bc1c0) /opt/actions-runner/_work/go-ethereum/go-ethereum/rpc/client_test.go:932 +0x2b1 github.com/ethereum/go-ethereum/rpc.testClientCancel({0x9d5e40, 0x2}, 0xc0001dc1c0) /opt/actions-runner/_work/go-ethereum/go-ethereum/rpc/client_test.go:356 +0x15f github.com/ethereum/go-ethereum/rpc.TestClientCancelWebsocket(0xc0001dc1c0?) /opt/actions-runner/_work/go-ethereum/go-ethereum/rpc/client_test.go:319 +0x25 testing.tRunner(0xc0001dc1c0, 0xa07370) /opt/actions-runner/_work/_tool/go/1.25.1/x64/src/testing/testing.go:1934 +0xea created by testing.(*T).Run in goroutine 1 /opt/actions-runner/_work/_tool/go/1.25.1/x64/src/testing/testing.go:1997 +0x465 FAIL github.com/ethereum/go-ethereum/rpc 0.371s ``` In `testClientCancel` we wrap the server listener in `flakeyListener`, which schedules an unconditional close of every accepted connection after a random delay, if the random delay is zero then the timer fires immediately, and then the http client paniced of connection reset by peer. Here we add a minimum 10ms to ensure the timeout won't fire immediately. Signed-off-by: jsvisa --- rpc/client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/client_test.go b/rpc/client_test.go index 6c1a4f8f6c..03a3410537 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -973,7 +973,7 @@ func (l *flakeyListener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err == nil { - timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout))) + timeout := max(time.Millisecond*10, time.Duration(rand.Int63n(int64(l.maxKillTimeout)))) time.AfterFunc(timeout, func() { log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout)) c.Close() From 53c85da79670b58e484aea9d3467b0907266b0b9 Mon Sep 17 00:00:00 2001 From: hero5512 Date: Fri, 24 Oct 2025 11:04:09 -0400 Subject: [PATCH 074/277] eth/tracers: fix crasher in TraceCall with BlockOverrides (#33015) fix https://github.com/ethereum/go-ethereum/issues/33014 --------- Co-authored-by: lightclient --- eth/tracers/api.go | 2 +- eth/tracers/api_test.go | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/eth/tracers/api.go b/eth/tracers/api.go index aebeb48463..5cfbc24b8e 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -959,7 +959,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc // Apply the customization rules if required. if config != nil { - if config.BlockOverrides != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 { + if config.BlockOverrides != nil && config.BlockOverrides.Number != nil && config.BlockOverrides.Number.ToInt().Uint64() == h.Number.Uint64()+1 { // Overriding the block number to n+1 is a common way for wallets to // simulate transactions, however without the following fix, a contract // can assert it is being simulated by checking if blockhash(n) == 0x0 and diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 4173d2a791..609c3f4d8b 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -471,6 +471,20 @@ func TestTraceCall(t *testing.T) { {"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]}, {"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`, }, + // Tests issue #33014 where accessing nil block number override panics. + { + blockNumber: rpc.BlockNumber(0), + call: ethapi.TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: &TraceCallConfig{ + BlockOverrides: &override.BlockOverrides{}, + }, + expectErr: nil, + expect: `{"gas":21000,"failed":false,"returnValue":"0x","structLogs":[]}`, + }, } for i, testspec := range testSuite { result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) From 074d7b79c1461ccd77e93f3ec0d493674257dc91 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 24 Oct 2025 17:19:25 +0200 Subject: [PATCH 075/277] .gitea/workflows, build: add release build for keeper (#32632) --- .gitea/workflows/release.yml | 21 ++++++++ .gitignore | 3 +- build/ci.go | 98 ++++++++++++++++++++++++++++++++++-- 3 files changed, 118 insertions(+), 4 deletions(-) diff --git a/.gitea/workflows/release.yml b/.gitea/workflows/release.yml index f2dcc3ae96..41defedd00 100644 --- a/.gitea/workflows/release.yml +++ b/.gitea/workflows/release.yml @@ -122,6 +122,27 @@ jobs: LINUX_SIGNING_KEY: ${{ secrets.LINUX_SIGNING_KEY }} AZURE_BLOBSTORE_TOKEN: ${{ secrets.AZURE_BLOBSTORE_TOKEN }} + keeper: + name: Keeper Build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.24 + cache: false + + - name: Install cross toolchain + run: | + apt-get update + apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib + + - name: Build (amd64) + run: | + go run build/ci.go keeper -dlgo + windows: name: Windows Build runs-on: "win-11" diff --git a/.gitignore b/.gitignore index 269455db7a..293359a669 100644 --- a/.gitignore +++ b/.gitignore @@ -55,4 +55,5 @@ cmd/ethkey/ethkey cmd/evm/evm cmd/geth/geth cmd/rlpdump/rlpdump -cmd/workload/workload \ No newline at end of file +cmd/workload/workload +cmd/keeper/keeper diff --git a/build/ci.go b/build/ci.go index 905f6e4072..99a0e14f16 100644 --- a/build/ci.go +++ b/build/ci.go @@ -31,6 +31,9 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests + keeper [ -dlgo ] + keeper-archive [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package @@ -86,6 +89,30 @@ var ( executablePath("clef"), } + // Keeper build targets with their configurations + keeperTargets = []struct { + Name string + GOOS string + GOARCH string + CC string + Tags string + Env map[string]string + }{ + { + Name: "ziren", + GOOS: "linux", + GOARCH: "mipsle", + // enable when cgo works + // CC: "mipsel-linux-gnu-gcc", + Tags: "ziren", + Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"}, + }, + { + Name: "example", + Tags: "example", + }, + } + // A debian package is created for all executables listed here. debExecutables = []debExecutable{ { @@ -178,6 +205,10 @@ func main() { doPurge(os.Args[2:]) case "sanitycheck": doSanityCheck() + case "keeper": + doInstallKeeper(os.Args[2:]) + case "keeper-archive": + doKeeperArchive(os.Args[2:]) default: log.Fatal("unknown command ", os.Args[1]) } @@ -212,9 +243,6 @@ func doInstall(cmdline []string) { // Configure the build. gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...) - // We use -trimpath to avoid leaking local paths into the built executables. - gobuild.Args = append(gobuild.Args, "-trimpath") - // Show packages during build. gobuild.Args = append(gobuild.Args, "-v") @@ -234,6 +262,42 @@ func doInstall(cmdline []string) { } } +// doInstallKeeper builds keeper binaries for all supported targets. +func doInstallKeeper(cmdline []string) { + var dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + + flag.CommandLine.Parse(cmdline) + env := build.Env() + + // Configure the toolchain. + tc := build.GoToolchain{} + if *dlgo { + csdb := download.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb) + } + + for _, target := range keeperTargets { + log.Printf("Building keeper-%s", target.Name) + + // Configure the build. + tc.GOARCH = target.GOARCH + tc.GOOS = target.GOOS + tc.CC = target.CC + gobuild := tc.Go("build", buildFlags(env, true, []string{target.Tags})...) + gobuild.Args = append(gobuild.Args, "-v") + + for key, value := range target.Env { + gobuild.Env = append(gobuild.Env, key+"="+value) + } + outputName := fmt.Sprintf("keeper-%s", target.Name) + + args := slices.Clone(gobuild.Args) + args = append(args, "-o", executablePath(outputName)) + args = append(args, "./cmd/keeper") + build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) + } +} + // buildFlags returns the go tool flags for building. func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) { var ld []string @@ -272,6 +336,8 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) ( if len(buildTags) > 0 { flags = append(flags, "-tags", strings.Join(buildTags, ",")) } + // We use -trimpath to avoid leaking local paths into the built executables. + flags = append(flags, "-trimpath") return flags } @@ -630,6 +696,32 @@ func doArchive(cmdline []string) { } } +func doKeeperArchive(cmdline []string) { + var ( + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + ) + flag.CommandLine.Parse(cmdline) + + var ( + env = build.Env() + vsn = version.Archive(env.Commit) + keeper = "keeper-" + vsn + ".tar.gz" + ) + maybeSkipArchive(env) + files := []string{"COPYING"} + for _, target := range keeperTargets { + files = append(files, executablePath(fmt.Sprintf("keeper-%s", target.Name))) + } + if err := build.WriteArchive(keeper, files); err != nil { + log.Fatal(err) + } + if err := archiveUpload(keeper, *upload, *signer, *signify); err != nil { + log.Fatal(err) + } +} + func archiveBasename(arch string, archiveVersion string) string { platform := runtime.GOOS + "-" + arch if arch == "arm" { From 17e5222997c325f8a93e69a3a9f8bc9cc9d91bd1 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 24 Oct 2025 18:25:54 +0200 Subject: [PATCH 076/277] build: fix keeper build (#33018) At the time keeper support was added into ci.go, we were using a go.work file to make ./cmd/keeper accessible from within the main go-ethereum module. The workspace file has since been removed, so we need to build keeper from within its own module instead. --- build/ci.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build/ci.go b/build/ci.go index 99a0e14f16..156626a82d 100644 --- a/build/ci.go +++ b/build/ci.go @@ -284,6 +284,7 @@ func doInstallKeeper(cmdline []string) { tc.GOOS = target.GOOS tc.CC = target.CC gobuild := tc.Go("build", buildFlags(env, true, []string{target.Tags})...) + gobuild.Dir = "./cmd/keeper" gobuild.Args = append(gobuild.Args, "-v") for key, value := range target.Env { @@ -293,7 +294,7 @@ func doInstallKeeper(cmdline []string) { args := slices.Clone(gobuild.Args) args = append(args, "-o", executablePath(outputName)) - args = append(args, "./cmd/keeper") + args = append(args, ".") build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) } } From cfa3b96103f515dc6bc280d78ab3d4830e4ca8c7 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sat, 25 Oct 2025 16:16:16 +0800 Subject: [PATCH 077/277] core/rawdb, triedb/pathdb: re-structure the trienode history header (#32907) In this PR, several changes have been made: (a) restructure the trienode history header section Previously, the offsets of the key and value sections were recorded before encoding data into these sections. As a result, these offsets referred to the start position of each chunk rather than the end position. This caused an issue where the end position of the last chunk was unknown, making it incompatible with the freezer partial-read APIs. With this update, all offsets now refer to the end position, and the start position of the first chunk is always 0. (b) Enable partial freezer read for trienode data retrieval The partial freezer read feature is now utilized in trienode data retrieval, improving efficiency. --- core/rawdb/accessors_state.go | 8 +-- triedb/pathdb/history_trienode.go | 106 ++++++++++++------------------ 2 files changed, 47 insertions(+), 67 deletions(-) diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 714c1f77d6..b97c7a07a1 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -313,13 +313,13 @@ func ReadTrienodeHistoryHeader(db ethdb.AncientReaderOp, id uint64) ([]byte, err } // ReadTrienodeHistoryKeySection retrieves the key section of trienode history. -func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) { - return db.Ancient(trienodeHistoryKeySectionTable, id-1) +func ReadTrienodeHistoryKeySection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) { + return db.AncientBytes(trienodeHistoryKeySectionTable, id-1, offset, length) } // ReadTrienodeHistoryValueSection retrieves the value section of trienode history. -func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64) ([]byte, error) { - return db.Ancient(trienodeHistoryValueSectionTable, id-1) +func ReadTrienodeHistoryValueSection(db ethdb.AncientReaderOp, id uint64, offset uint64, length uint64) ([]byte, error) { + return db.AncientBytes(trienodeHistoryValueSectionTable, id-1, offset, length) } // ReadTrienodeHistoryList retrieves the a list of trienode history corresponding diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 2f31238612..3f45b41117 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -22,7 +22,6 @@ import ( "fmt" "iter" "maps" - "math" "slices" "sort" "time" @@ -202,17 +201,6 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { binary.Write(&headerSection, binary.BigEndian, h.meta.block) // 8 byte for _, owner := range h.owners { - // Fill the header section with offsets at key and value section - headerSection.Write(owner.Bytes()) // 32 bytes - binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes - - // The offset to the value section is theoretically unnecessary, since the - // individual value offset is already tracked in the key section. However, - // we still keep it here for two reasons: - // - It's cheap to store (only 4 bytes for each trie). - // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode). - binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes - // Fill the key section with node index var ( prevKey []byte @@ -266,6 +254,21 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { if _, err := keySection.Write(trailer); err != nil { return nil, nil, nil, err } + + // Fill the header section with the offsets of the key and value sections. + // Note that the key/value offsets are intentionally tracked *after* encoding + // them into their respective sections, ensuring each offset refers to the end + // position. For n trie chunks, n offset pairs are sufficient to uniquely locate + // the corresponding data. + headerSection.Write(owner.Bytes()) // 32 bytes + binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes + + // The offset to the value section is theoretically unnecessary, since the + // individual value offset is already tracked in the key section. However, + // we still keep it here for two reasons: + // - It's cheap to store (only 4 bytes for each trie). + // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode). + binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes } return headerSection.Bytes(), keySection.Bytes(), valueSection.Bytes(), nil } @@ -475,22 +478,22 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection for i := range len(owners) { // Resolve the boundary of key section - keyStart := keyOffsets[i] - keyLimit := len(keySection) - if i != len(owners)-1 { - keyLimit = int(keyOffsets[i+1]) + var keyStart, keyLimit uint32 + if i != 0 { + keyStart = keyOffsets[i-1] } - if int(keyStart) > len(keySection) || keyLimit > len(keySection) { + keyLimit = keyOffsets[i] + if int(keyStart) > len(keySection) || int(keyLimit) > len(keySection) { return fmt.Errorf("invalid key offsets: keyStart: %d, keyLimit: %d, size: %d", keyStart, keyLimit, len(keySection)) } // Resolve the boundary of value section - valStart := valueOffsets[i] - valLimit := len(valueSection) - if i != len(owners)-1 { - valLimit = int(valueOffsets[i+1]) + var valStart, valLimit uint32 + if i != 0 { + valStart = valueOffsets[i-1] } - if int(valStart) > len(valueSection) || valLimit > len(valueSection) { + valLimit = valueOffsets[i] + if int(valStart) > len(valueSection) || int(valLimit) > len(valueSection) { return fmt.Errorf("invalid value offsets: valueStart: %d, valueLimit: %d, size: %d", valStart, valLimit, len(valueSection)) } @@ -510,33 +513,27 @@ type iRange struct { limit uint32 } +func (ir iRange) len() uint32 { + return ir.limit - ir.start +} + // singleTrienodeHistoryReader provides read access to a single trie within the // trienode history. It stores an offset to the trie's position in the history, // along with a set of per-node offsets that can be resolved on demand. type singleTrienodeHistoryReader struct { id uint64 reader ethdb.AncientReader - valueRange iRange // value range within the total value section + valueRange iRange // value range within the global value section valueInternalOffsets map[string]iRange // value offset within the single trie data } func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) { - // TODO(rjl493456442) partial freezer read should be supported - keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id) + keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id, uint64(keyRange.start), uint64(keyRange.len())) if err != nil { return nil, err } - keyStart := int(keyRange.start) - keyLimit := int(keyRange.limit) - if keyRange.limit == math.MaxUint32 { - keyLimit = len(keyData) - } - if len(keyData) < keyStart || len(keyData) < keyLimit { - return nil, fmt.Errorf("key section too short, start: %d, limit: %d, size: %d", keyStart, keyLimit, len(keyData)) - } - valueOffsets := make(map[string]iRange) - _, err = decodeSingle(keyData[keyStart:keyLimit], func(key []byte, start int, limit int) error { + _, err = decodeSingle(keyData, func(key []byte, start int, limit int) error { valueOffsets[string(key)] = iRange{ start: uint32(start), limit: uint32(limit), @@ -560,20 +557,7 @@ func (sr *singleTrienodeHistoryReader) read(path string) ([]byte, error) { if !exists { return nil, fmt.Errorf("trienode %v not found", []byte(path)) } - // TODO(rjl493456442) partial freezer read should be supported - valueData, err := rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id) - if err != nil { - return nil, err - } - if len(valueData) < int(sr.valueRange.start) { - return nil, fmt.Errorf("value section too short, start: %d, size: %d", sr.valueRange.start, len(valueData)) - } - entryStart := sr.valueRange.start + offset.start - entryLimit := sr.valueRange.start + offset.limit - if len(valueData) < int(entryStart) || len(valueData) < int(entryLimit) { - return nil, fmt.Errorf("value section too short, start: %d, limit: %d, size: %d", entryStart, entryLimit, len(valueData)) - } - return valueData[int(entryStart):int(entryLimit)], nil + return rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id, uint64(sr.valueRange.start+offset.start), uint64(offset.len())) } // trienodeHistoryReader provides read access to node data in the trie node history. @@ -614,27 +598,23 @@ func (r *trienodeHistoryReader) decodeHeader() error { } for i, owner := range owners { // Decode the key range for this trie chunk - var keyLimit uint32 - if i == len(owners)-1 { - keyLimit = math.MaxUint32 - } else { - keyLimit = keyOffsets[i+1] + var keyStart uint32 + if i != 0 { + keyStart = keyOffsets[i-1] } r.keyRanges[owner] = iRange{ - start: keyOffsets[i], - limit: keyLimit, + start: keyStart, + limit: keyOffsets[i], } // Decode the value range for this trie chunk - var valLimit uint32 - if i == len(owners)-1 { - valLimit = math.MaxUint32 - } else { - valLimit = valOffsets[i+1] + var valStart uint32 + if i != 0 { + valStart = valOffsets[i-1] } r.valRanges[owner] = iRange{ - start: valOffsets[i], - limit: valLimit, + start: valStart, + limit: valOffsets[i], } } return nil From 7fb91f3cd520c351da742aa29f1560d24e4cb21a Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Sun, 26 Oct 2025 09:13:04 +0100 Subject: [PATCH 078/277] rpc: remove unused vars (#33012) --- rpc/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/rpc/client.go b/rpc/client.go index ba7e43eb5c..9dc36a6105 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -32,7 +32,6 @@ import ( ) var ( - ErrBadResult = errors.New("bad result in JSON-RPC response") ErrClientQuit = errors.New("client is closed") ErrNoResult = errors.New("JSON-RPC response has no result") ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call") From 078a5ecb7d51f42f67d7ecf2450306e1ae661e72 Mon Sep 17 00:00:00 2001 From: cui Date: Sun, 26 Oct 2025 16:13:59 +0800 Subject: [PATCH 079/277] core/state: improve accessList copy (#33024) --- core/state/access_list.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/state/access_list.go b/core/state/access_list.go index e3f1738864..0b830e7222 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -61,9 +61,10 @@ func newAccessList() *accessList { // Copy creates an independent copy of an accessList. func (al *accessList) Copy() *accessList { - cp := newAccessList() - cp.addresses = maps.Clone(al.addresses) - cp.slots = make([]map[common.Hash]struct{}, len(al.slots)) + cp := &accessList{ + addresses: maps.Clone(al.addresses), + slots: make([]map[common.Hash]struct{}, len(al.slots)), + } for i, slotMap := range al.slots { cp.slots[i] = maps.Clone(slotMap) } From 447b5f7e199d1d57f286faf8bfa5b2323d451be8 Mon Sep 17 00:00:00 2001 From: Delweng Date: Mon, 27 Oct 2025 16:53:26 +0800 Subject: [PATCH 080/277] core: don't modify the shared chainId between tests (#33020) --- core/verkle_witness_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go index ca0c928c3c..9495e325ca 100644 --- a/core/verkle_witness_test.go +++ b/core/verkle_witness_test.go @@ -455,7 +455,7 @@ func verkleTestGenesis(config *params.ChainConfig) *Genesis { func TestProcessVerkleContractWithEmptyCode(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) gspec := verkleTestGenesis(&config) genesisH, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { @@ -511,7 +511,7 @@ func TestProcessVerkleContractWithEmptyCode(t *testing.T) { func TestProcessVerkleExtCodeHashOpcode(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -615,7 +615,7 @@ func TestProcessVerkleExtCodeHashOpcode(t *testing.T) { func TestProcessVerkleBalanceOpcode(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -672,7 +672,7 @@ func TestProcessVerkleBalanceOpcode(t *testing.T) { func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -792,7 +792,7 @@ func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) { func TestProcessVerkleSelfDestructInSameTx(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -888,7 +888,7 @@ func TestProcessVerkleSelfDestructInSameTx(t *testing.T) { func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -978,7 +978,7 @@ func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) @@ -1042,7 +1042,7 @@ func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) { func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount(t *testing.T) { // The test txs were taken from a secondary testnet with chain id 69421 config := *testKaustinenLikeChainConfig - config.ChainID.SetUint64(69421) + config.ChainID = new(big.Int).SetUint64(69421) var ( signer = types.LatestSigner(&config) From 33dbd64a23a14172c8d14ad198b0390e78e4bc02 Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 27 Oct 2025 23:04:06 +0800 Subject: [PATCH 081/277] core/types: optimize modernSigner.Equal (#32971) Equal is called every time the transaction sender is accessed, even when the sender is cached, so it is worth optimizing. --------- Co-authored-by: Felix Lange --- core/types/transaction_signing.go | 33 +++++++++++++++++--------- core/types/transaction_signing_test.go | 12 ++++++++++ 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 01aa67c6ba..ef8fb194d5 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -20,7 +20,6 @@ import ( "crypto/ecdsa" "errors" "fmt" - "maps" "math/big" "github.com/ethereum/go-ethereum/common" @@ -183,18 +182,31 @@ type Signer interface { // modernSigner is the signer implementation that handles non-legacy transaction types. // For legacy transactions, it defers to one of the legacy signers (frontier, homestead, eip155). type modernSigner struct { - txtypes map[byte]struct{} + txtypes txtypeSet chainID *big.Int legacy Signer } +// txtypeSet is a bitmap for transaction types. +type txtypeSet [2]uint64 + +func (v *txtypeSet) set(txType byte) { + v[txType/64] |= 1 << (txType % 64) +} + +func (v *txtypeSet) has(txType byte) bool { + if txType >= byte(len(v)*64) { + return false + } + return v[txType/64]&(1<<(txType%64)) != 0 +} + func newModernSigner(chainID *big.Int, fork forks.Fork) Signer { if chainID == nil || chainID.Sign() <= 0 { panic(fmt.Sprintf("invalid chainID %v", chainID)) } s := &modernSigner{ chainID: chainID, - txtypes: make(map[byte]struct{}, 4), } // configure legacy signer switch { @@ -205,19 +217,19 @@ func newModernSigner(chainID *big.Int, fork forks.Fork) Signer { default: s.legacy = FrontierSigner{} } - s.txtypes[LegacyTxType] = struct{}{} + s.txtypes.set(LegacyTxType) // configure tx types if fork >= forks.Berlin { - s.txtypes[AccessListTxType] = struct{}{} + s.txtypes.set(AccessListTxType) } if fork >= forks.London { - s.txtypes[DynamicFeeTxType] = struct{}{} + s.txtypes.set(DynamicFeeTxType) } if fork >= forks.Cancun { - s.txtypes[BlobTxType] = struct{}{} + s.txtypes.set(BlobTxType) } if fork >= forks.Prague { - s.txtypes[SetCodeTxType] = struct{}{} + s.txtypes.set(SetCodeTxType) } return s } @@ -228,7 +240,7 @@ func (s *modernSigner) ChainID() *big.Int { func (s *modernSigner) Equal(s2 Signer) bool { other, ok := s2.(*modernSigner) - return ok && s.chainID.Cmp(other.chainID) == 0 && maps.Equal(s.txtypes, other.txtypes) && s.legacy.Equal(other.legacy) + return ok && s.chainID.Cmp(other.chainID) == 0 && s.txtypes == other.txtypes && s.legacy.Equal(other.legacy) } func (s *modernSigner) Hash(tx *Transaction) common.Hash { @@ -236,8 +248,7 @@ func (s *modernSigner) Hash(tx *Transaction) common.Hash { } func (s *modernSigner) supportsType(txtype byte) bool { - _, ok := s.txtypes[txtype] - return ok + return s.txtypes.has(txtype) } func (s *modernSigner) Sender(tx *Transaction) (common.Address, error) { diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go index b66577f7ed..02a65fda13 100644 --- a/core/types/transaction_signing_test.go +++ b/core/types/transaction_signing_test.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/forks" "github.com/ethereum/go-ethereum/rlp" ) @@ -188,3 +189,14 @@ func createTestLegacyTxInner() *LegacyTx { Data: nil, } } + +func Benchmark_modernSigner_Equal(b *testing.B) { + signer1 := newModernSigner(big.NewInt(1), forks.Amsterdam) + signer2 := newModernSigner(big.NewInt(1), forks.Amsterdam) + + for b.Loop() { + if !signer1.Equal(signer2) { + b.Fatal("expected signers to be equal") + } + } +} From b1db341f7edeb312ed2c8ae2267851bdeb0ff696 Mon Sep 17 00:00:00 2001 From: Delweng Date: Tue, 28 Oct 2025 13:53:42 +0800 Subject: [PATCH 082/277] core: refine condition for using legacy chain freezer directory (#33032) --- common/path.go | 11 +++++++++++ core/rawdb/database.go | 2 +- node/defaults.go | 13 ++----------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/common/path.go b/common/path.go index 49c6a5efc2..19f24d426a 100644 --- a/common/path.go +++ b/common/path.go @@ -37,3 +37,14 @@ func AbsolutePath(datadir string, filename string) string { } return filepath.Join(datadir, filename) } + +// IsNonEmptyDir checks if a directory exists and is non-empty. +func IsNonEmptyDir(dir string) bool { + f, err := os.Open(dir) + if err != nil { + return false + } + defer f.Close() + names, _ := f.Readdirnames(1) + return len(names) > 0 +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 724c90ead6..29483baa5f 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -177,7 +177,7 @@ func resolveChainFreezerDir(ancient string) string { // - chain freezer exists in legacy location (root ancient folder) freezer := filepath.Join(ancient, ChainFreezerName) if !common.FileExist(freezer) { - if !common.FileExist(ancient) { + if !common.FileExist(ancient) || !common.IsNonEmptyDir(ancient) { // The entire ancient store is not initialized, still use the sub // folder for initialization. } else { diff --git a/node/defaults.go b/node/defaults.go index 307d9e186a..6c643e2b54 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -22,6 +22,7 @@ import ( "path/filepath" "runtime" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/rpc" @@ -90,7 +91,7 @@ func DefaultDataDir() string { // is non-empty, use it, otherwise DTRT and check %LOCALAPPDATA%. fallback := filepath.Join(home, "AppData", "Roaming", "Ethereum") appdata := windowsAppData() - if appdata == "" || isNonEmptyDir(fallback) { + if appdata == "" || common.IsNonEmptyDir(fallback) { return fallback } return filepath.Join(appdata, "Ethereum") @@ -113,16 +114,6 @@ func windowsAppData() string { return v } -func isNonEmptyDir(dir string) bool { - f, err := os.Open(dir) - if err != nil { - return false - } - names, _ := f.Readdir(1) - f.Close() - return len(names) > 0 -} - func homeDir() string { if home := os.Getenv("HOME"); home != "" { return home From 59d08c66ff31216fdb21834b2b3a47e5e8582f0b Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Tue, 28 Oct 2025 12:34:14 +0100 Subject: [PATCH 083/277] internal/jsre: pass correct args to setTimeout/setInterval callbacks (#32936) ## Description - Summary: Correct the JS timer callback argument forwarding to match standard JS semantics. - What changed: In `internal/jsre/jsre.go`, the callback is now invoked with only the arguments after the callback and delay. - Why: Previously, the callback received the function and delay as parameters, causing unexpected behavior and logic bugs for consumers. --- internal/jsre/jsre.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go index 0dfeae8e1b..4512115f16 100644 --- a/internal/jsre/jsre.go +++ b/internal/jsre/jsre.go @@ -201,7 +201,7 @@ loop: if !isFunc { panic(re.vm.ToValue("js error: timer/timeout callback is not a function")) } - call(goja.Null(), timer.call.Arguments...) + call(goja.Null(), timer.call.Arguments[2:]...) _, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it if timer.interval && inreg { From 739f6f46a25f4ba3995c664a0702c736fb1067af Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 28 Oct 2025 13:56:44 +0100 Subject: [PATCH 084/277] .github: add 32-bit CI targets (#32911) This adds two new CI targets. One is for building all supported keeper executables, the other is for running unit tests on 32-bit Linux. --------- Co-authored-by: Felix Lange --- .github/workflows/go.yml | 41 ++++++++++++++++++++++++++++++++++++++++ appveyor.yml | 2 +- beacon/params/config.go | 4 ++++ build/ci.go | 21 ++++++++++---------- 4 files changed, 56 insertions(+), 12 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index b8cf7f75e0..50c9fe7f75 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -34,6 +34,47 @@ jobs: go run build/ci.go check_generate go run build/ci.go check_baddeps + keeper: + name: Keeper Builds + needs: test + runs-on: [self-hosted-ghr, size-l-x64] + steps: + - uses: actions/checkout@v4 + with: + submodules: true + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.25' + cache: false + + - name: Build + run: go run build/ci.go keeper + + test-32bit: + name: "32bit tests" + needs: test + runs-on: [self-hosted-ghr, size-l-x64] + steps: + - uses: actions/checkout@v4 + with: + submodules: false + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.25' + cache: false + + - name: Install cross toolchain + run: | + apt-get update + apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib + + - name: Build + run: go run build/ci.go test -arch 386 -short -p 8 + test: name: Test needs: lint diff --git a/appveyor.yml b/appveyor.yml index 8dce7f30a2..aeafcfc838 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -36,4 +36,4 @@ for: - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds test_script: - - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short -skip-spectests + - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -short diff --git a/beacon/params/config.go b/beacon/params/config.go index 492ee53308..b01b739e07 100644 --- a/beacon/params/config.go +++ b/beacon/params/config.go @@ -108,6 +108,8 @@ func (c *ChainConfig) LoadForks(file []byte) error { switch version := value.(type) { case int: versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4)) + case int64: + versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4)) case uint64: versions[name] = new(big.Int).SetUint64(version).FillBytes(make([]byte, 4)) case string: @@ -125,6 +127,8 @@ func (c *ChainConfig) LoadForks(file []byte) error { switch epoch := value.(type) { case int: epochs[name] = uint64(epoch) + case int64: + epochs[name] = uint64(epoch) case uint64: epochs[name] = epoch case string: diff --git a/build/ci.go b/build/ci.go index 156626a82d..59c948acb3 100644 --- a/build/ci.go +++ b/build/ci.go @@ -348,16 +348,15 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) ( func doTest(cmdline []string) { var ( - dlgo = flag.Bool("dlgo", false, "Download Go and build with it") - arch = flag.String("arch", "", "Run tests for given architecture") - cc = flag.String("cc", "", "Sets C compiler binary") - coverage = flag.Bool("coverage", false, "Whether to record code coverage") - verbose = flag.Bool("v", false, "Whether to log verbosely") - race = flag.Bool("race", false, "Execute the race detector") - short = flag.Bool("short", false, "Pass the 'short'-flag to go test") - cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") - skipspectests = flag.Bool("skip-spectests", false, "Skip downloading execution-spec-tests fixtures") - threads = flag.Int("p", 1, "Number of CPU threads to use for testing") + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Run tests for given architecture") + cc = flag.String("cc", "", "Sets C compiler binary") + coverage = flag.Bool("coverage", false, "Whether to record code coverage") + verbose = flag.Bool("v", false, "Whether to log verbosely") + race = flag.Bool("race", false, "Execute the race detector") + short = flag.Bool("short", false, "Pass the 'short'-flag to go test") + cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") + threads = flag.Int("p", 1, "Number of CPU threads to use for testing") ) flag.CommandLine.Parse(cmdline) @@ -365,7 +364,7 @@ func doTest(cmdline []string) { csdb := download.MustLoadChecksums("build/checksums.txt") // Get test fixtures. - if !*skipspectests { + if !*short { downloadSpecTestFixtures(csdb, *cachedir) } From ae37b4928c4eb594c237c41587760bee35799d8d Mon Sep 17 00:00:00 2001 From: Austin Larson <78000745+alarso16@users.noreply.github.com> Date: Wed, 29 Oct 2025 02:59:45 -0400 Subject: [PATCH 085/277] accounts/abi/bind/v2: fix error assertion in test (#33041) --- accounts/abi/bind/v2/util_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/accounts/abi/bind/v2/util_test.go b/accounts/abi/bind/v2/util_test.go index a9f5b4035c..5beb0a4fae 100644 --- a/accounts/abi/bind/v2/util_test.go +++ b/accounts/abi/bind/v2/util_test.go @@ -144,10 +144,9 @@ func TestWaitDeployedCornerCases(t *testing.T) { done := make(chan struct{}) go func() { defer close(done) - want := errors.New("context canceled") _, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash()) - if err == nil || errors.Is(want, err) { - t.Errorf("error mismatch: want %v, got %v", want, err) + if !errors.Is(err, context.Canceled) { + t.Errorf("error mismatch: want %v, got %v", context.Canceled, err) } }() From 5dd0fe2f5380538733661fb5926c07d4e9f45546 Mon Sep 17 00:00:00 2001 From: Delweng Date: Wed, 29 Oct 2025 17:34:19 +0800 Subject: [PATCH 086/277] p2p: cleanup v4 if v5 failed (#33005) Clean the previous resource (v4) if the latter (v5) failed. --- p2p/server.go | 5 +++++ p2p/server_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/p2p/server.go b/p2p/server.go index ddd4f5d072..10c855f1c4 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -490,6 +490,11 @@ func (srv *Server) setupDiscovery() error { } srv.discv5, err = discover.ListenV5(sconn, srv.localnode, cfg) if err != nil { + // Clean up v4 if v5 setup fails. + if srv.discv4 != nil { + srv.discv4.Close() + srv.discv4 = nil + } return err } } diff --git a/p2p/server_test.go b/p2p/server_test.go index d42926cf4c..7bc7379099 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -579,6 +579,33 @@ func TestServerInboundThrottle(t *testing.T) { } } +func TestServerDiscoveryV5FailureRollsBackV4(t *testing.T) { + badBootstrap := enode.NewV4(&newkey().PublicKey, net.ParseIP("127.0.0.1"), 30303, 0) // invalid V5 of a V4 node + srv := &Server{ + Config: Config{ + PrivateKey: newkey(), + ListenAddr: "", + DiscAddr: "127.0.0.1:0", + MaxPeers: 5, + DiscoveryV4: true, + DiscoveryV5: true, + BootstrapNodesV5: []*enode.Node{badBootstrap}, + Logger: testlog.Logger(t, log.LvlTrace), + }, + } + err := srv.Start() + if err == nil { + t.Fatal("expected discovery v5 startup failure") + } + if !strings.Contains(err.Error(), "bad bootstrap node") { + t.Fatalf("unexpected error: %v", err) + } + if srv.DiscoveryV4() != nil { + t.Fatal("discovery v4 not cleaned after failure") + } + srv.Stop() +} + func listenFakeAddr(network, laddr string, remoteAddr net.Addr) (net.Listener, error) { l, err := net.Listen(network, laddr) if err == nil { From ccacbd1e3777893d4ba9add4c452530e29a3830b Mon Sep 17 00:00:00 2001 From: Coder <161350311+MamunC0der@users.noreply.github.com> Date: Thu, 30 Oct 2025 02:20:07 +0100 Subject: [PATCH 087/277] common: simplify FileExist helper (#32969) --- common/path.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/common/path.go b/common/path.go index 19f24d426a..841946348e 100644 --- a/common/path.go +++ b/common/path.go @@ -17,6 +17,8 @@ package common import ( + "errors" + "io/fs" "os" "path/filepath" ) @@ -24,10 +26,7 @@ import ( // FileExist checks if a file exists at filePath. func FileExist(filePath string) bool { _, err := os.Stat(filePath) - if err != nil && os.IsNotExist(err) { - return false - } - return true + return !errors.Is(err, fs.ErrNotExist) } // AbsolutePath returns datadir + filename, or filename if it is absolute. From 243407a3aa7d2dd5c426eccb45a8571eb54dd100 Mon Sep 17 00:00:00 2001 From: wit liu Date: Thu, 30 Oct 2025 15:39:02 +0800 Subject: [PATCH 088/277] eth/downloader: fix incorrect waitgroup in test `XTestDelivery` (#33047) --- eth/downloader/queue_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index 120e3f9d2d..ca71a769de 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -351,6 +351,7 @@ func XTestDelivery(t *testing.T) { } } }() + wg.Add(1) go func() { defer wg.Done() // reserve receiptfetch From e6d34c1fee407e77b1ea573346336a4b57c94a8b Mon Sep 17 00:00:00 2001 From: hero5512 Date: Fri, 31 Oct 2025 13:14:52 -0400 Subject: [PATCH 089/277] eth/tracers: fix prestateTracer for EIP-6780 SELFDESTRUCT (#33050) fix https://github.com/ethereum/go-ethereum/issues/33049 --- .../suicide_cancun.json | 101 ++++++++++++++++++ eth/tracers/native/prestate.go | 10 +- 2 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json new file mode 100644 index 0000000000..cdabe66913 --- /dev/null +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide_cancun.json @@ -0,0 +1,101 @@ +{ + "context": { + "difficulty": "0", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "1", + "timestamp": "1000", + "baseFeePerGas": "7" + }, + "genesis": { + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000000000", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x1111111111111111111111111111111111111111": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x2222222222222222222222222222222222222222": { + "balance": "0xde0b6b3a7640000", + "nonce": "1", + "code": "0x6099600155731111111111111111111111111111111111111111ff", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000001234" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "cancunTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "difficulty": "0", + "extraData": "0x", + "gasLimit": "8000000", + "hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0" + }, + "input": "0xf860800a830186a094222222222222222222222222222222222222222280801ba0c4829400221936e8016721406f84b4710ead5608f15c785a3cedc20a7aebaab2a033e8e6e12cc432098b5ce8a409691f977867249073a3fc7804e8676c4d159475", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x2222222222222222222222222222222222222222": { + "balance": "0xde0b6b3a7640000", + "nonce": 1, + "code": "0x6099600155731111111111111111111111111111111111111111ff", + "codeHash": "0x701bdb1d43777a9304905a100f758955d130e09c8e86d97e3f6becccdc001048", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000000000000000000000000000000000000000abcd" + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000000000" + } + }, + "post": { + "0x0000000000000000000000000000000000000000": { + "balance": "0x2aed3" + }, + "0x1111111111111111111111111111111111111111": { + "balance": "0xde0b6b3a7640000" + }, + "0x2222222222222222222222222222222222222222": { + "balance": "0x0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000099" + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xfffffffffff70e96", + "nonce": 1 + } + } + } +} diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 49679d312f..2e446f729b 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -131,7 +131,15 @@ func (t *prestateTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scop addr := common.Address(stackData[stackLen-1].Bytes20()) t.lookupAccount(addr) if op == vm.SELFDESTRUCT { - t.deleted[caller] = true + if t.chainConfig.IsCancun(t.env.BlockNumber, t.env.Time) { + // EIP-6780: only delete if created in same transaction + if t.created[caller] { + t.deleted[caller] = true + } + } else { + // Pre-EIP-6780: always delete + t.deleted[caller] = true + } } case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE): addr := common.Address(stackData[stackLen-2].Bytes20()) From 18a902799e50b8c0db94653bdae436573e4308a9 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Sat, 1 Nov 2025 06:17:45 +0100 Subject: [PATCH 090/277] common: fix duration comparison in PrettyAge (#33064) This pull request updates `PrettyAge.String` so that the age formatter now treats exact unit boundaries (like a full day or week) as that unit instead of spilling into smaller components, keeping duration output aligned with human expectations. --- common/format.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/format.go b/common/format.go index 7af41f52d5..31e08831f5 100644 --- a/common/format.go +++ b/common/format.go @@ -69,7 +69,7 @@ func (t PrettyAge) String() string { result, prec := "", 0 for _, unit := range ageUnits { - if diff > unit.Size { + if diff >= unit.Size { result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol) diff %= unit.Size From 28c59b7a760f498c51604791791e194853ba36b6 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 3 Nov 2025 21:11:14 +0800 Subject: [PATCH 091/277] core/rawdb: fix db inspector by supporting trienode history (#33087) --- core/rawdb/ancient_utils.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index f4909d86e7..b940d91040 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -105,6 +105,23 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { } infos = append(infos, info) + case MerkleTrienodeFreezerName, VerkleTrienodeFreezerName: + datadir, err := db.AncientDatadir() + if err != nil { + return nil, err + } + f, err := NewTrienodeFreezer(datadir, freezer == VerkleTrienodeFreezerName, true) + if err != nil { + continue // might be possible the trienode freezer is not existent + } + defer f.Close() + + info, err := inspect(freezer, trienodeFreezerTableConfigs, f) + if err != nil { + return nil, err + } + infos = append(infos, info) + default: return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers) } From 025072427e78b3af3e9a8ddcc64007a38dd374ed Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 3 Nov 2025 17:41:22 +0100 Subject: [PATCH 092/277] params: set osaka and BPO1 & BPO2 mainnet dates (#33063) Sets the fusaka, bpo1, bpo2 timestamps for mainnet see: https://notes.ethereum.org/@bbusa/fusaka-bpo-timeline --- core/forkid/forkid_test.go | 19 ++++++++++++++----- core/txpool/blobpool/blobpool_test.go | 6 +++--- params/config.go | 6 ++++++ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index dc6e6fe817..c78ff23cd6 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -76,10 +76,16 @@ func TestCreation(t *testing.T) { {20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block {30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block - {40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block + {30000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block {30000000, 1746022486, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // Last Cancun block - {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // First Prague block - {50000000, 2000000000, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // Future Prague block + {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // First Prague block + {30000000, 1764798550, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // Last Prague block + {30000000, 1764798551, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // First Osaka block + {30000000, 1765290070, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // Last Osaka block + {30000000, 1765290071, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // First BPO1 block + {30000000, 1767747670, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // Last BPO1 block + {30000000, 1767747671, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // First BPO2 block + {50000000, 2000000000, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // Future BPO2 block }, }, // Sepolia test cases @@ -162,6 +168,9 @@ func TestValidation(t *testing.T) { legacyConfig.ShanghaiTime = nil legacyConfig.CancunTime = nil legacyConfig.PragueTime = nil + legacyConfig.OsakaTime = nil + legacyConfig.BPO1Time = nil + legacyConfig.BPO2Time = nil tests := []struct { config *params.ChainConfig @@ -361,11 +370,11 @@ func TestValidation(t *testing.T) { // Local is mainnet Shanghai, remote is random Shanghai. {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale}, - // Local is mainnet Prague, far in the future. Remote announces Gopherium (non existing fork) + // Local is mainnet BPO2, far in the future. Remote announces Gopherium (non existing fork) // at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xc376cf8b), Next: 8888888888}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x07c9462e), Next: 8888888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing // fork) at timestamp 1668000000, before Cancun. Local is incompatible. diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index f0f00c8055..f7d8ca209b 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -992,7 +992,7 @@ func TestOpenCap(t *testing.T) { storage := t.TempDir() os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotterEIP7594(testMaxBlobsPerBlock), nil) // Insert a few transactions from a few accounts var ( @@ -1014,7 +1014,7 @@ func TestOpenCap(t *testing.T) { keep = []common.Address{addr1, addr3} drop = []common.Address{addr2} - size = uint64(2 * (txAvgSize + blobSize)) + size = 2 * (txAvgSize + blobSize + uint64(txBlobOverhead)) ) store.Put(blob1) store.Put(blob2) @@ -1023,7 +1023,7 @@ func TestOpenCap(t *testing.T) { // Verify pool capping twice: first by reducing the data cap, then restarting // with a high cap to ensure everything was persisted previously - for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} { + for _, datacap := range []uint64{2 * (txAvgSize + blobSize + uint64(txBlobOverhead)), 1000 * (txAvgSize + blobSize + uint64(txBlobOverhead))} { // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) diff --git a/params/config.go b/params/config.go index 06288575ae..c5dfad1cd3 100644 --- a/params/config.go +++ b/params/config.go @@ -61,11 +61,17 @@ var ( ShanghaiTime: newUint64(1681338455), CancunTime: newUint64(1710338135), PragueTime: newUint64(1746612311), + OsakaTime: newUint64(1764798551), + BPO1Time: newUint64(1765290071), + BPO2Time: newUint64(1767747671), DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"), Ethash: new(EthashConfig), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, + BPO1: DefaultBPO1BlobConfig, + BPO2: DefaultBPO2BlobConfig, }, } // HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network. From 044828e6606e3368368884e249256a093bae4a6d Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 3 Nov 2025 17:45:47 +0100 Subject: [PATCH 093/277] version: release go-ethereum v1.16.6 --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index ead2d04f2a..77089a5a86 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 6 // Patch version component of the current release - Meta = "unstable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 6 // Patch version component of the current release + Meta = "stable" // Version metadata to append to the version string ) From 5b77af394edd7d7384fbc7f112c8c183f86dcd4b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 3 Nov 2025 17:47:42 +0100 Subject: [PATCH 094/277] version: begin v1.16.7 release cycle --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index 77089a5a86..d4248013b0 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 6 // Patch version component of the current release - Meta = "stable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 7 // Patch version component of the current release + Meta = "unstable" // Version metadata to append to the version string ) From 653f8d499473c99e2e8ada6d3adea6ec95e97a69 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 4 Nov 2025 12:48:13 +0100 Subject: [PATCH 095/277] go.mod: update to c-kzg v2.1.5 (#33093) We unfortunately missed this update for the Geth v1.16.6 release, but it is critical. --- cmd/keeper/go.mod | 2 +- cmd/keeper/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 2b12297a7a..9486347b1f 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -18,7 +18,7 @@ require ( github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index b280a368d0..ad4c98c4b3 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -42,8 +42,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= -github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= diff --git a/go.mod b/go.mod index ae5e4cc114..3590a54929 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 - github.com/ethereum/c-kzg-4844/v2 v2.1.3 + github.com/ethereum/c-kzg-4844/v2 v2.1.5 github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab github.com/ethereum/go-verkle v0.2.2 github.com/fatih/color v1.16.0 diff --git a/go.sum b/go.sum index 8122f4b548..6ecb0b7ec3 100644 --- a/go.sum +++ b/go.sum @@ -113,8 +113,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= -github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= From 07129d21c0fa0aa8b6f7426344cf9ec2f31bc427 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 Nov 2025 13:16:02 +0100 Subject: [PATCH 096/277] version: release go-ethereum v1.16.7 stable --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index d4248013b0..5a39ac6318 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 7 // Patch version component of the current release - Meta = "unstable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 7 // Patch version component of the current release + Meta = "stable" // Version metadata to append to the version string ) From d39af344dc63f44c115b8f4ffab75c8fec5a57ad Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 Nov 2025 13:28:20 +0100 Subject: [PATCH 097/277] version: begin v1.16.8 release cycle --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index 5a39ac6318..a3aad5d398 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 7 // Patch version component of the current release - Meta = "stable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 16 // Minor version component of the current release + Patch = 8 // Patch version component of the current release + Meta = "unstable" // Version metadata to append to the version string ) From 19aa8020a914a7e2195bd7e3870a4784406e6a16 Mon Sep 17 00:00:00 2001 From: maskpp Date: Tue, 4 Nov 2025 21:09:36 +0800 Subject: [PATCH 098/277] common: introduce IsHexHash and use it (#32998) --- cmd/geth/config.go | 9 ++++----- cmd/geth/snapshot.go | 6 +++--- common/types.go | 9 +++++++++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index fcb315af97..87627467d2 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/beacon/blsync" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -273,11 +272,11 @@ func makeFullNode(ctx *cli.Context) *node.Node { // Configure synchronization override service var synctarget common.Hash if ctx.IsSet(utils.SyncTargetFlag.Name) { - hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name)) - if len(hex) != common.HashLength { - utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength) + target := ctx.String(utils.SyncTargetFlag.Name) + if !common.IsHexHash(target) { + utils.Fatalf("sync target hash is not a valid hex hash: %s", target) } - synctarget = common.BytesToHash(hex) + synctarget = common.HexToHash(target) } utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name)) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 7621dfa93c..fc0658a59c 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -639,11 +639,11 @@ func snapshotExportPreimages(ctx *cli.Context) error { var root common.Hash if ctx.NArg() > 1 { - rootBytes := common.FromHex(ctx.Args().Get(1)) - if len(rootBytes) != common.HashLength { + hash := ctx.Args().Get(1) + if !common.IsHexHash(hash) { return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1)) } - root = common.BytesToHash(rootBytes) + root = common.HexToHash(hash) } else { headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { diff --git a/common/types.go b/common/types.go index db4de8bcbd..a96d6c7c83 100644 --- a/common/types.go +++ b/common/types.go @@ -71,6 +71,15 @@ func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } // If b is larger than len(h), b will be cropped from the left. func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } +// IsHexHash verifies whether a string can represent a valid hex-encoded +// Ethereum hash or not. +func IsHexHash(s string) bool { + if has0xPrefix(s) { + s = s[2:] + } + return len(s) == 2*HashLength && isHex(s) +} + // Cmp compares two hashes. func (h Hash) Cmp(other Hash) int { return bytes.Compare(h[:], other[:]) From 395425902dca89d211b3e9bc1c2b0135ee3e32dd Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Tue, 4 Nov 2025 15:09:57 +0200 Subject: [PATCH 099/277] core/rawdb: fix readOnly mode for database (#33025) --- core/rawdb/database.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 29483baa5f..d5c0f0aab2 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -330,6 +330,7 @@ func Open(db ethdb.KeyValueStore, opts OpenOptions) (ethdb.Database, error) { }() } return &freezerdb{ + readOnly: opts.ReadOnly, ancientRoot: opts.Ancient, KeyValueStore: db, chainFreezer: frdb, From 15ff378a8927eed211589bcf375aa5c528209b71 Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Thu, 6 Nov 2025 02:25:41 +0100 Subject: [PATCH 100/277] common: fix size comparison in StorageSize (#33105) --- common/size.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/common/size.go b/common/size.go index 097b6304a8..e7f504a082 100644 --- a/common/size.go +++ b/common/size.go @@ -26,13 +26,13 @@ type StorageSize float64 // String implements the stringer interface. func (s StorageSize) String() string { - if s > 1099511627776 { + if s >= 1099511627776 { return fmt.Sprintf("%.2f TiB", s/1099511627776) - } else if s > 1073741824 { + } else if s >= 1073741824 { return fmt.Sprintf("%.2f GiB", s/1073741824) - } else if s > 1048576 { + } else if s >= 1048576 { return fmt.Sprintf("%.2f MiB", s/1048576) - } else if s > 1024 { + } else if s >= 1024 { return fmt.Sprintf("%.2f KiB", s/1024) } else { return fmt.Sprintf("%.2f B", s) @@ -42,13 +42,13 @@ func (s StorageSize) String() string { // TerminalString implements log.TerminalStringer, formatting a string for console // output during logging. func (s StorageSize) TerminalString() string { - if s > 1099511627776 { + if s >= 1099511627776 { return fmt.Sprintf("%.2fTiB", s/1099511627776) - } else if s > 1073741824 { + } else if s >= 1073741824 { return fmt.Sprintf("%.2fGiB", s/1073741824) - } else if s > 1048576 { + } else if s >= 1048576 { return fmt.Sprintf("%.2fMiB", s/1048576) - } else if s > 1024 { + } else if s >= 1024 { return fmt.Sprintf("%.2fKiB", s/1024) } else { return fmt.Sprintf("%.2fB", s) From 6420ee35925f7701d2af783b70920d1d297efdcd Mon Sep 17 00:00:00 2001 From: maskpp Date: Fri, 7 Nov 2025 11:00:20 +0800 Subject: [PATCH 101/277] core/state: fix bug about getting stable LogsHash result. (#33082) Because the map iteration is unstable, we need to order logs by tx index and keep the same order with receipts and their logs, so we can still get the same `LogsHash` across runs. --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: rjl493456442 --- core/state/statedb.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/state/statedb.go b/core/state/statedb.go index b770698255..364bc40850 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -22,6 +22,7 @@ import ( "fmt" "maps" "slices" + "sort" "sync" "sync/atomic" "time" @@ -264,6 +265,9 @@ func (s *StateDB) Logs() []*types.Log { for _, lgs := range s.logs { logs = append(logs, lgs...) } + sort.Slice(logs, func(i, j int) bool { + return logs[i].Index < logs[j].Index + }) return logs } From 7f9b06e7aae57eb295963b462b4c6ef1fdfe0eaf Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Fri, 7 Nov 2025 08:24:11 +0100 Subject: [PATCH 102/277] accounts/usbwallet: fix version check in SignTypedMessage (#33113) The version check incorrectly used `&&` instead of `||`, causing versions like v1.0.x through v1.4.x to be allowed when they should be rejected. These versions don't support EIP-712 signing which was introduced in firmware v1.5.0. --- accounts/usbwallet/ledger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 52595a1621..80e63f1864 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -184,7 +184,7 @@ func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash return nil, accounts.ErrWalletClosed } // Ensure the wallet is capable of signing the given transaction - if w.version[0] < 1 && w.version[1] < 5 { + if w.version[0] < 1 || (w.version[0] == 1 && w.version[1] < 5) { //lint:ignore ST1005 brand name displayed on the console return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2]) } From 982235f5e0a564798a930f2dd62d5fa938278a5f Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Fri, 7 Nov 2025 14:55:58 +0100 Subject: [PATCH 103/277] core/vm: remove todo (#33120) Removes an unnecessary todo. This case is handled, the comment was an artifact from Kev's refactor --- core/vm/contracts.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index cae0be9f2d..00ddbebd6b 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -591,7 +591,6 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 { if expLen > 32 { expHead.SetBytes(getData(input, baseLen, 32)) } else { - // TODO: Check that if expLen < baseLen, then getData will return an empty slice expHead.SetBytes(getData(input, baseLen, expLen)) } } From d2a5dba48f31031e8f9b1b4942a1e29e54d97079 Mon Sep 17 00:00:00 2001 From: Delweng Date: Sat, 8 Nov 2025 06:06:15 +0800 Subject: [PATCH 104/277] triedb/pathdb: fix 32-bit integer overflow in history trienode decoder (#33098) failed in 32bit: ``` --- FAIL: TestDecodeSingleCorruptedData (0.00s) panic: runtime error: slice bounds out of range [:-1501805520] [recovered, repanicked] goroutine 38872 [running]: testing.tRunner.func1.2({0x838db20, 0xa355620}) /opt/actions-runner/_work/_tool/go/1.25.3/x64/src/testing/testing.go:1872 +0x29b testing.tRunner.func1() /opt/actions-runner/_work/_tool/go/1.25.3/x64/src/testing/testing.go:1875 +0x414 panic({0x838db20, 0xa355620}) /opt/actions-runner/_work/_tool/go/1.25.3/x64/src/runtime/panic.go:783 +0x103 github.com/ethereum/go-ethereum/triedb/pathdb.decodeSingle({0x9e57500, 0x1432, 0x1432}, 0x0) /opt/actions-runner/_work/go-ethereum/go-ethereum/triedb/pathdb/history_trienode.go:399 +0x18d6 github.com/ethereum/go-ethereum/triedb/pathdb.TestDecodeSingleCorruptedData(0xa2db9e8) /opt/actions-runner/_work/go-ethereum/go-ethereum/triedb/pathdb/history_trienode_test.go:698 +0x180 testing.tRunner(0xa2db9e8, 0x83c86e8) /opt/actions-runner/_work/_tool/go/1.25.3/x64/src/testing/testing.go:1934 +0x114 created by testing.(*T).Run in goroutine 1 /opt/actions-runner/_work/_tool/go/1.25.3/x64/src/testing/testing.go:1997 +0x4b4 FAIL github.com/ethereum/go-ethereum/triedb/pathdb 41.453s ? github.com/ethereum/go-ethereum/version [no test files] FAIL ``` Found in https://github.com/ethereum/go-ethereum/actions/runs/18912701345/job/53990136071?pr=33052 --- triedb/pathdb/history_trienode.go | 16 ++++++++++++++++ triedb/pathdb/history_trienode_test.go | 5 ++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 3f45b41117..1004106af9 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -19,9 +19,11 @@ package pathdb import ( "bytes" "encoding/binary" + "errors" "fmt" "iter" "maps" + "math" "slices" "sort" "time" @@ -386,12 +388,26 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st } // Resolve the entry from key section nShared, nn := binary.Uvarint(keySection[keyOff:]) // key length shared (varint) + if nn <= 0 { + return nil, fmt.Errorf("corrupted varint encoding for nShared at offset %d", keyOff) + } keyOff += nn nUnshared, nn := binary.Uvarint(keySection[keyOff:]) // key length not shared (varint) + if nn <= 0 { + return nil, fmt.Errorf("corrupted varint encoding for nUnshared at offset %d", keyOff) + } keyOff += nn nValue, nn := binary.Uvarint(keySection[keyOff:]) // value length (varint) + if nn <= 0 { + return nil, fmt.Errorf("corrupted varint encoding for nValue at offset %d", keyOff) + } keyOff += nn + // Validate that the values can fit in an int to prevent overflow on 32-bit systems + if nShared > uint64(math.MaxUint32) || nUnshared > uint64(math.MaxUint32) || nValue > uint64(math.MaxUint32) { + return nil, errors.New("key size too large") + } + // Resolve unshared key if keyOff+int(nUnshared) > len(keySection) { return nil, fmt.Errorf("key length too long, unshared key length: %d, off: %d, section size: %d", nUnshared, keyOff, len(keySection)) diff --git a/triedb/pathdb/history_trienode_test.go b/triedb/pathdb/history_trienode_test.go index d6b80f61f5..be4740a904 100644 --- a/triedb/pathdb/history_trienode_test.go +++ b/triedb/pathdb/history_trienode_test.go @@ -694,7 +694,10 @@ func TestDecodeSingleCorruptedData(t *testing.T) { // Test with corrupted varint in key section corrupted := make([]byte, len(keySection)) copy(corrupted, keySection) - corrupted[5] = 0xFF // Corrupt varint + // Fill first 10 bytes with 0xFF to create a varint overflow (>64 bits) + for i := range 10 { + corrupted[i] = 0xFF + } _, err = decodeSingle(corrupted, nil) if err == nil { t.Fatal("Expected error for corrupted varint") From ebc7dc9e37a8f9b2ed54ca9578e88b7f2ae968e3 Mon Sep 17 00:00:00 2001 From: Lucia Date: Sat, 8 Nov 2025 23:25:53 +1300 Subject: [PATCH 105/277] crypto: validate hash length in no cgo Sign (#33104) - Replace hardcoded DigestLength - Add hash length validation --- crypto/signature_nocgo.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go index d76127c258..9dce1057fa 100644 --- a/crypto/signature_nocgo.go +++ b/crypto/signature_nocgo.go @@ -43,6 +43,9 @@ func sigToPub(hash, sig []byte) (*secp256k1.PublicKey, error) { if len(sig) != SignatureLength { return nil, errors.New("invalid signature") } + if len(hash) != DigestLength { + return nil, fmt.Errorf("hash is required to be exactly %d bytes (%d)", DigestLength, len(hash)) + } // Convert to secp256k1 input format with 'recovery id' v at the beginning. btcsig := make([]byte, SignatureLength) btcsig[0] = sig[RecoveryIDOffset] + 27 @@ -76,8 +79,8 @@ func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) { // // The produced signature is in the [R || S || V] format where V is 0 or 1. func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) { - if len(hash) != 32 { - return nil, fmt.Errorf("hash is required to be exactly 32 bytes (%d)", len(hash)) + if len(hash) != DigestLength { + return nil, fmt.Errorf("hash is required to be exactly %d bytes (%d)", DigestLength, len(hash)) } if prv.Curve != S256() { return nil, errors.New("private key curve is not secp256k1") From 7755ee3e4f5ee534252a524a668e58714f944408 Mon Sep 17 00:00:00 2001 From: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> Date: Mon, 10 Nov 2025 09:38:28 +0100 Subject: [PATCH 106/277] consensus/misc/eip4844: expose TargetBlobsPerBlock (#32991) Rollups may want to use these to dynamically adjust blobs posted after BPO forks. --- consensus/misc/eip4844/eip4844.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index e14d129561..c1a21195e3 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -200,6 +200,15 @@ func LatestMaxBlobsPerBlock(cfg *params.ChainConfig) int { return bcfg.Max } +// TargetBlobsPerBlock returns the target blobs per block for a block at the given timestamp. +func TargetBlobsPerBlock(cfg *params.ChainConfig, time uint64) int { + blobConfig := latestBlobConfig(cfg, time) + if blobConfig == nil { + return 0 + } + return blobConfig.Target +} + // fakeExponential approximates factor * e ** (numerator / denominator) using // Taylor expansion. func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { From fbd89be0479f76ff56ccb6f8f2d882f3c38133cf Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Mon, 10 Nov 2025 19:44:31 +0200 Subject: [PATCH 107/277] eth/catalyst: always reset timer after sealing error (#33146) The periodic sealing loop failed to reset its timer when sealBlock returned an error, causing the timer to never fire again and effectively halting block production in developer periodic mode after the first failure. This is a bug because the loop relies on the timer to trigger subsequent sealing attempts, and transient errors (e.g., pool races or chain rewinds) should not permanently stop the loop. The change moves timer.Reset after the sealing attempt unconditionally, ensuring the loop continues ticking and retrying even when sealing fails, which matches how other periodic timers in the codebase behave and preserves forward progress. --- eth/catalyst/simulated_beacon.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index c10990c233..d9f01240a7 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -280,9 +280,8 @@ func (c *SimulatedBeacon) loop() { case <-timer.C: if err := c.sealBlock(c.withdrawals.pop(10), uint64(time.Now().Unix())); err != nil { log.Warn("Error performing sealing work", "err", err) - } else { - timer.Reset(time.Second * time.Duration(c.period)) } + timer.Reset(time.Second * time.Duration(c.period)) } } } From ca912542591033ab292d1a299c1048cdce9ecece Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 11 Nov 2025 03:07:32 +0100 Subject: [PATCH 108/277] build: add wasm targets for keeper (#33124) [powdr](github.com/powdr-labs/powdr) has tested keeper in their womir system and managed to get it to work. This PR adds wasm as a keeper target. There's another plan by the zkevm team to support wasm with wasi as well, so these PR adds both targets. These currently uses the `example` tag, as there is no precompile intefrace defined for either target yet. Nonetheless, this is useful for testing these zkvms so it makes sense to support these experimental targets already. --- build/ci.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/build/ci.go b/build/ci.go index 59c948acb3..e589cd2b40 100644 --- a/build/ci.go +++ b/build/ci.go @@ -107,6 +107,18 @@ var ( Tags: "ziren", Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"}, }, + { + Name: "wasm-js", + GOOS: "js", + GOARCH: "wasm", + Tags: "example", + }, + { + Name: "wasm-wasi", + GOOS: "wasip1", + GOARCH: "wasm", + Tags: "example", + }, { Name: "example", Tags: "example", @@ -331,6 +343,10 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) ( } ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'") } + // TODO(gballet): revisit after the input api has been defined + if runtime.GOARCH == "wasm" { + ld = append(ld, "-gcflags=all=-d=softfloat") + } if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } From 5f4cc3f57d737ad124ddb8a23c97f04d7c60a079 Mon Sep 17 00:00:00 2001 From: Matthieu Vachon Date: Tue, 11 Nov 2025 01:57:52 -0500 Subject: [PATCH 109/277] core/state: fixed hooked StateDB handling of `OnCodeChangeV2` (#33148) While updating to latest Geth, I noticed `OnCodeChangeV2` was not properly handled in `SelfDestruct/6780`, this PR fixes this and bring a unit test. Let me know if it's deemed more approriate to merge the tests with the other one. --- core/state/statedb_hooked.go | 4 +-- core/state/statedb_hooked_test.go | 41 +++++++++++++++++++++++++++++++ eth/tracers/native/mux.go | 8 ++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 9db201fc2b..50acc03aa8 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -220,7 +220,7 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int { var prevCode []byte var prevCodeHash common.Hash - if s.hooks.OnCodeChange != nil { + if s.hooks.OnCodeChange != nil || s.hooks.OnCodeChangeV2 != nil { prevCode = s.inner.GetCode(address) prevCodeHash = s.inner.GetCodeHash(address) } @@ -246,7 +246,7 @@ func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, b var prevCode []byte var prevCodeHash common.Hash - if s.hooks.OnCodeChange != nil { + if s.hooks.OnCodeChange != nil || s.hooks.OnCodeChangeV2 != nil { prevCodeHash = s.inner.GetCodeHash(address) prevCode = s.inner.GetCode(address) } diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go index bacb7baee1..4ff1023eb2 100644 --- a/core/state/statedb_hooked_test.go +++ b/core/state/statedb_hooked_test.go @@ -122,6 +122,47 @@ func TestHooks(t *testing.T) { sdb.AddLog(&types.Log{ Address: common.Address{0xbb}, }) + + if len(result) != len(wants) { + t.Fatalf("number of tracing events wrong, have %d want %d", len(result), len(wants)) + } + + for i, want := range wants { + if have := result[i]; have != want { + t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want) + } + } +} + +func TestHooks_OnCodeChangeV2(t *testing.T) { + inner, _ := New(types.EmptyRootHash, NewDatabaseForTesting()) + + var result []string + var wants = []string{ + "0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728) ContractCreation", + "0xaa00000000000000000000000000000000000000.code: 0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728) -> (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) SelfDestruct", + "0xbb00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1326 (0x3c54516221d604e623f358bc95996ca3242aaa109bddabcebda13db9b3f90dcb) ContractCreation", + "0xbb00000000000000000000000000000000000000.code: 0x1326 (0x3c54516221d604e623f358bc95996ca3242aaa109bddabcebda13db9b3f90dcb) -> (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) SelfDestruct", + } + emitF := func(format string, a ...any) { + result = append(result, fmt.Sprintf(format, a...)) + } + sdb := NewHookedState(inner, &tracing.Hooks{ + OnCodeChangeV2: func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) { + emitF("%v.code: %#x (%v) ->%#x (%v) %s", addr, prevCode, prevCodeHash, code, codeHash, reason) + }, + }) + sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}, tracing.CodeChangeContractCreation) + sdb.SelfDestruct(common.Address{0xaa}) + + sdb.SetCode(common.Address{0xbb}, []byte{0x13, 38}, tracing.CodeChangeContractCreation) + sdb.CreateContract(common.Address{0xbb}) + sdb.SelfDestruct6780(common.Address{0xbb}) + + if len(result) != len(wants) { + t.Fatalf("number of tracing events wrong, have %d want %d", len(result), len(wants)) + } + for i, want := range wants { if have := result[i]; have != want { t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want) diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go index 77ab254568..37fc64f3f5 100644 --- a/eth/tracers/native/mux.go +++ b/eth/tracers/native/mux.go @@ -156,6 +156,14 @@ func (t *muxTracer) OnCodeChange(a common.Address, prevCodeHash common.Hash, pre } } +func (t *muxTracer) OnCodeChangeV2(a common.Address, prevCodeHash common.Hash, prev []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) { + for _, t := range t.tracers { + if t.OnCodeChangeV2 != nil { + t.OnCodeChangeV2(a, prevCodeHash, prev, codeHash, code, reason) + } + } +} + func (t *muxTracer) OnStorageChange(a common.Address, k, prev, new common.Hash) { for _, t := range t.tracers { if t.OnStorageChange != nil { From 7368b34a4beac8ccf2db3529821c9fb2d1a40378 Mon Sep 17 00:00:00 2001 From: Lucia Date: Tue, 11 Nov 2025 21:01:37 +1300 Subject: [PATCH 110/277] core/rawdb: capture open file error and fix resource leak (#33147) --- core/rawdb/eradb/eradb.go | 1 + core/rawdb/freezer_batch.go | 10 ++++++++-- core/rawdb/freezer_table.go | 7 ++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/core/rawdb/eradb/eradb.go b/core/rawdb/eradb/eradb.go index 29e658798e..a552b94da9 100644 --- a/core/rawdb/eradb/eradb.go +++ b/core/rawdb/eradb/eradb.go @@ -303,6 +303,7 @@ func (db *Store) openEraFile(epoch uint64) (*era.Era, error) { } // Sanity-check start block. if e.Start()%uint64(era.MaxEra1Size) != 0 { + e.Close() return nil, fmt.Errorf("pre-merge era1 file has invalid boundary. %d %% %d != 0", e.Start(), era.MaxEra1Size) } log.Debug("Opened era1 file", "epoch", epoch) diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index 7e46e49f43..080c0720a1 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -51,12 +51,18 @@ func newFreezerBatch(f *Freezer) *freezerBatch { // Append adds an RLP-encoded item of the given kind. func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error { - return batch.tables[kind].Append(num, item) + if table := batch.tables[kind]; table != nil { + return table.Append(num, item) + } + return errUnknownTable } // AppendRaw adds an item of the given kind. func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error { - return batch.tables[kind].AppendRaw(num, item) + if table := batch.tables[kind]; table != nil { + return table.AppendRaw(num, item) + } + return errUnknownTable } // reset initializes the batch. diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 01a754c5c8..aedb2d8eed 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -1196,8 +1196,7 @@ func (t *freezerTable) sizeNolock() (uint64, error) { } // advanceHead should be called when the current head file would outgrow the file limits, -// and a new file must be opened. The caller of this method must hold the write-lock -// before calling this method. +// and a new file must be opened. This method acquires the write-lock internally. func (t *freezerTable) advanceHead() error { t.lock.Lock() defer t.lock.Unlock() @@ -1218,7 +1217,9 @@ func (t *freezerTable) advanceHead() error { return err } t.releaseFile(t.headId) - t.openFile(t.headId, openFreezerFileForReadOnly) + if _, err := t.openFile(t.headId, openFreezerFileForReadOnly); err != nil { + return err + } // Swap out the current head. t.head = newHead From d8f9801305128711863fdc5657b883afb7075052 Mon Sep 17 00:00:00 2001 From: Michael Kaplan <55204436+michaelkaplan13@users.noreply.github.com> Date: Tue, 11 Nov 2025 09:54:36 -0500 Subject: [PATCH 111/277] rpc: avoid unnecessary RST_STREAM, PING frames sent by client (#33122) Context from Cloudflare blog: https://blog.cloudflare.com/go-and-enhance-your-calm/#reading-bodies-in-go-can-be-unintuitive We were able to reproduce the same issue discussed by Cloudflare in their recent blog post above using the `ethclient`. --- rpc/http.go | 14 +++++++++++--- rpc/http_test.go | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/rpc/http.go b/rpc/http.go index f4b99429ef..a74f36a1b0 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -168,13 +168,21 @@ func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc { } } +// cleanlyCloseBody avoids sending unnecessary RST_STREAM and PING frames by +// ensuring the whole body is read before being closed. +// See https://blog.cloudflare.com/go-and-enhance-your-calm/#reading-bodies-in-go-can-be-unintuitive +func cleanlyCloseBody(body io.ReadCloser) error { + io.Copy(io.Discard, body) + return body.Close() +} + func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) error { hc := c.writeConn.(*httpConn) respBody, err := hc.doRequest(ctx, msg) if err != nil { return err } - defer respBody.Close() + defer cleanlyCloseBody(respBody) var resp jsonrpcMessage batch := [1]*jsonrpcMessage{&resp} @@ -191,7 +199,7 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr if err != nil { return err } - defer respBody.Close() + defer cleanlyCloseBody(respBody) var respmsgs []*jsonrpcMessage if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil { @@ -236,7 +244,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos if _, err := buf.ReadFrom(resp.Body); err == nil { body = buf.Bytes() } - resp.Body.Close() + cleanlyCloseBody(resp.Body) return nil, HTTPError{ Status: resp.Status, StatusCode: resp.StatusCode, diff --git a/rpc/http_test.go b/rpc/http_test.go index 6c268b6292..15ddd59bd0 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -106,7 +106,7 @@ func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body if err != nil { t.Fatalf("request failed: %v", err) } - resp.Body.Close() + cleanlyCloseBody(resp.Body) confirmStatusCode(t, resp.StatusCode, expectedStatusCode) } From 3d2a4cb0532c43c6e34845cd7f21f8e180fa92a8 Mon Sep 17 00:00:00 2001 From: oxBoni Date: Wed, 12 Nov 2025 08:30:16 +0100 Subject: [PATCH 112/277] core: remove unused peek function in insertIterator (#33155) --- core/blockchain_insert.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index ac6a156d3e..07a250a1bb 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -131,28 +131,6 @@ func (it *insertIterator) next() (*types.Block, error) { return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index]) } -// peek returns the next block in the iterator, along with any potential validation -// error for that block, but does **not** advance the iterator. -// -// Both header and body validation errors (nil too) is cached into the iterator -// to avoid duplicating work on the following next() call. -// nolint:unused -func (it *insertIterator) peek() (*types.Block, error) { - // If we reached the end of the chain, abort - if it.index+1 >= len(it.chain) { - return nil, nil - } - // Wait for verification result if not yet done - if len(it.errors) <= it.index+1 { - it.errors = append(it.errors, <-it.results) - } - if it.errors[it.index+1] != nil { - return it.chain[it.index+1], it.errors[it.index+1] - } - // Block header valid, ignore body validation since we don't have a parent anyway - return it.chain[it.index+1], nil -} - // previous returns the previous header that was being processed, or nil. func (it *insertIterator) previous() *types.Header { if it.index < 1 { From 12a389f0650b62965ffd32cf86d854dadda12fae Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Thu, 13 Nov 2025 07:32:01 +0100 Subject: [PATCH 113/277] core/txpool/blobpool: fix benchmarkPoolPending (#33161) Add BlobTxs flag to filter. Signed-off-by: Csaba Kiraly --- core/txpool/blobpool/blobpool_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index f7d8ca209b..2fa1927cae 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -2353,6 +2353,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) { MinTip: uint256.NewInt(1), BaseFee: chain.basefee, BlobFee: chain.blobfee, + BlobTxs: true, }) if len(p) != int(capacity) { b.Fatalf("have %d want %d", len(p), capacity) From 48d708a194a498f1956d6cd0335c65ba73a23a12 Mon Sep 17 00:00:00 2001 From: Marcel <153717436+MonkeyMarcel@users.noreply.github.com> Date: Thu, 13 Nov 2025 14:48:26 +0800 Subject: [PATCH 114/277] eth/filters: further optimize tx hash map in #32965 (#33108) --- eth/filters/filter.go | 4 ++-- eth/filters/filter_system.go | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 422e5cd67b..a818f0b607 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -563,7 +563,7 @@ type ReceiptWithTx struct { // In addition to returning receipts, it also returns the corresponding transactions. // This is because receipts only contain low-level data, while user-facing data // may require additional information from the Transaction. -func filterReceipts(txHashes map[common.Hash]bool, ev core.ChainEvent) []*ReceiptWithTx { +func filterReceipts(txHashes map[common.Hash]struct{}, ev core.ChainEvent) []*ReceiptWithTx { var ret []*ReceiptWithTx receipts := ev.Receipts @@ -585,7 +585,7 @@ func filterReceipts(txHashes map[common.Hash]bool, ev core.ChainEvent) []*Receip } } else { for i, receipt := range receipts { - if txHashes[receipt.TxHash] { + if _, ok := txHashes[receipt.TxHash]; ok { ret = append(ret, &ReceiptWithTx{ Receipt: receipt, Transaction: txs[i], diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index f10e6a277b..8b9bce47b9 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -185,9 +185,9 @@ type subscription struct { txs chan []*types.Transaction headers chan *types.Header receipts chan []*ReceiptWithTx - txHashes map[common.Hash]bool // contains transaction hashes for transactionReceipts subscription filtering - installed chan struct{} // closed when the filter is installed - err chan error // closed when the filter is uninstalled + txHashes map[common.Hash]struct{} // contains transaction hashes for transactionReceipts subscription filtering + installed chan struct{} // closed when the filter is installed + err chan error // closed when the filter is uninstalled } // EventSystem creates subscriptions, processes events and broadcasts them to the @@ -403,9 +403,9 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc // transactions when they are included in blocks. If txHashes is provided, only receipts // for those specific transaction hashes will be delivered. func (es *EventSystem) SubscribeTransactionReceipts(txHashes []common.Hash, receipts chan []*ReceiptWithTx) *Subscription { - hashSet := make(map[common.Hash]bool) + hashSet := make(map[common.Hash]struct{}, len(txHashes)) for _, h := range txHashes { - hashSet[h] = true + hashSet[h] = struct{}{} } sub := &subscription{ id: rpc.NewID(), From eb8f32588b5712dcb96509f318a5cc815a783b5f Mon Sep 17 00:00:00 2001 From: Forostovec Date: Thu, 13 Nov 2025 08:51:41 +0200 Subject: [PATCH 115/277] triedb/pathdb: fix ID assignment in history inspection (#33103) --- triedb/pathdb/history_inspect.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index 9b4eea27b4..a839a184ca 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -50,7 +50,7 @@ func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint if err != nil { return 0, 0, err } - last := head - 1 + last := head if end != 0 && end < last { last = end } @@ -143,7 +143,7 @@ func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) { if err != nil { return 0, 0, err } - last := head - 1 + last := head fh, err := readStateHistory(freezer, first) if err != nil { From f23d506b7db56f057d2320df92e728d369fbfb73 Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 13 Nov 2025 15:06:27 +0800 Subject: [PATCH 116/277] eth/syncer: advance safe and finalized block (#33038) --- eth/syncer/syncer.go | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go index 6b33ec54ba..83fe3ad230 100644 --- a/eth/syncer/syncer.go +++ b/eth/syncer/syncer.go @@ -22,6 +22,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" @@ -133,13 +134,35 @@ func (s *Syncer) run() { } case <-ticker.C: - if target == nil || !s.exitWhenSynced { + if target == nil { continue } - if block := s.backend.BlockChain().GetBlockByHash(target.Hash()); block != nil { - log.Info("Sync target reached", "number", block.NumberU64(), "hash", block.Hash()) - go s.stack.Close() // async since we need to close ourselves - return + + // Terminate the node if the target has been reached + if s.exitWhenSynced { + if block := s.backend.BlockChain().GetBlockByHash(target.Hash()); block != nil { + log.Info("Sync target reached", "number", block.NumberU64(), "hash", block.Hash()) + go s.stack.Close() // async since we need to close ourselves + return + } + } + + // Set the finalized and safe markers relative to the current head. + // The finalized marker is set two epochs behind the target, + // and the safe marker is set one epoch behind the target. + head := s.backend.BlockChain().CurrentHeader() + if head == nil { + continue + } + if header := s.backend.BlockChain().GetHeaderByNumber(head.Number.Uint64() - params.EpochLength*2); header != nil { + if final := s.backend.BlockChain().CurrentFinalBlock(); final == nil || final.Number.Cmp(header.Number) < 0 { + s.backend.BlockChain().SetFinalized(header) + } + } + if header := s.backend.BlockChain().GetHeaderByNumber(head.Number.Uint64() - params.EpochLength); header != nil { + if safe := s.backend.BlockChain().CurrentSafeBlock(); safe == nil || safe.Number.Cmp(header.Number) < 0 { + s.backend.BlockChain().SetSafe(header) + } } case <-s.closed: From fa16c89bfd9b923989fa8a3add530c391f30b309 Mon Sep 17 00:00:00 2001 From: Bashmunta Date: Thu, 13 Nov 2025 10:15:44 +0200 Subject: [PATCH 117/277] core: use scheme-aware empty root in flushAlloc (#33168) --- core/genesis.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/genesis.go b/core/genesis.go index d0d490874d..7d640c8cae 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -190,7 +190,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e return common.Hash{}, err } // Commit newly generated states into disk if it's not empty. - if root != types.EmptyRootHash { + if root != emptyRoot { if err := triedb.Commit(root, true); err != nil { return common.Hash{}, err } From 488d987fc4ea0d3ad66423430b85e0b043fb36d5 Mon Sep 17 00:00:00 2001 From: phrwlk Date: Thu, 13 Nov 2025 10:17:54 +0200 Subject: [PATCH 118/277] accounts/keystore: clear decrypted key after use (#33090) --- accounts/keystore/keystore.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 3e4266924f..29c4bdf2ca 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -418,6 +418,7 @@ func (ks *KeyStore) Export(a accounts.Account, passphrase, newPassphrase string) if err != nil { return nil, err } + defer zeroKey(key.PrivateKey) var N, P int if store, ok := ks.storage.(*keyStorePassphrase); ok { N, P = store.scryptN, store.scryptP @@ -477,6 +478,7 @@ func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) if err != nil { return err } + defer zeroKey(key.PrivateKey) return ks.storage.StoreKey(a.URL.Path, key, newPassphrase) } From aa36bcd0aa49a7ef1f08288006359e9d7f42c595 Mon Sep 17 00:00:00 2001 From: oooLowNeoNooo Date: Fri, 14 Nov 2025 08:16:03 +0100 Subject: [PATCH 119/277] graphql: add nil check in Transaction.Type() method (#33184) Add nil check before calling tx.Type() to prevent panic when transaction is not found. --- graphql/graphql.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/graphql/graphql.go b/graphql/graphql.go index 0b2a77a3c4..55da3185dd 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -575,6 +575,9 @@ func (t *Transaction) getLogs(ctx context.Context, hash common.Hash) (*[]*Log, e func (t *Transaction) Type(ctx context.Context) *hexutil.Uint64 { tx, _ := t.resolve(ctx) + if tx == nil { + return nil + } txType := hexutil.Uint64(tx.Type()) return &txType } From 95273afec4669f443dc9b1adc464b7f68a6d2dbf Mon Sep 17 00:00:00 2001 From: radik878 Date: Fri, 14 Nov 2025 14:55:41 +0200 Subject: [PATCH 120/277] core/rawdb: return iterator error in findTxInBlockBody (#33188) The iterator loop in findTxInBlockBody returned the outer-scoped err when iter.Err() was non-nil, which could incorrectly propagate a nil or stale error and hide actual RLP decoding issues. This patch returns iter.Err() as intended by the rlp list iterator API, matching established patterns elsewhere in the codebase and improving diagnostics when encountering malformed transaction entries. --- core/rawdb/accessors_indexes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index a725f144d4..10eb454015 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -148,7 +148,7 @@ func findTxInBlockBody(blockbody rlp.RawValue, target common.Hash) (*types.Trans txIndex := uint64(0) for iter.Next() { if iter.Err() != nil { - return nil, 0, err + return nil, 0, iter.Err() } // The preimage for the hash calculation of legacy transactions // is just their RLP encoding. For typed (EIP-2718) transactions, From 81c5b430291c16387b2e7f909e306ed1aafb5d2d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:39:36 +0100 Subject: [PATCH 121/277] core/rawdb: delete dead code to avoid more useless AI submissions (#33186) Co-authored-by: Gary Rong --- core/rawdb/accessors_chain.go | 93 ------------------------------ core/rawdb/accessors_chain_test.go | 51 ---------------- 2 files changed, 144 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index f20d675ff8..6ae64fb2fd 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -83,65 +83,6 @@ type NumberHash struct { Hash common.Hash } -// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain -// heights, both canonical and reorged forks included. -// This method considers both limits to be _inclusive_. -func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { - var ( - start = encodeBlockNumber(first) - keyLength = len(headerPrefix) + 8 + 32 - hashes = make([]*NumberHash, 0, 1+last-first) - it = db.NewIterator(headerPrefix, start) - ) - defer it.Release() - for it.Next() { - key := it.Key() - if len(key) != keyLength { - continue - } - num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8]) - if num > last { - break - } - hash := common.BytesToHash(key[len(key)-32:]) - hashes = append(hashes, &NumberHash{num, hash}) - } - return hashes -} - -// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the -// certain chain range. If the accumulated entries reaches the given threshold, -// abort the iteration and return the semi-finish result. -func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) { - // Short circuit if the limit is 0. - if limit == 0 { - return nil, nil - } - var ( - numbers []uint64 - hashes []common.Hash - ) - // Construct the key prefix of start point. - start, end := headerHashKey(from), headerHashKey(to) - it := db.NewIterator(nil, start) - defer it.Release() - - for it.Next() { - if bytes.Compare(it.Key(), end) >= 0 { - break - } - if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) { - numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8])) - hashes = append(hashes, common.BytesToHash(it.Value())) - // If the accumulated entries reaches the limit threshold, return. - if len(numbers) >= limit { - break - } - } - } - return numbers, hashes -} - // ReadHeaderNumber returns the header number assigned to a hash. func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) (uint64, bool) { data, _ := db.Get(headerNumberKey(hash)) @@ -886,40 +827,6 @@ func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { } } -// DeleteBadBlocks deletes all the bad blocks from the database -func DeleteBadBlocks(db ethdb.KeyValueWriter) { - if err := db.Delete(badBlockKey); err != nil { - log.Crit("Failed to delete bad blocks", "err", err) - } -} - -// FindCommonAncestor returns the last common ancestor of two block headers -func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { - for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { - a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) - if a == nil { - return nil - } - } - for an := a.Number.Uint64(); an < b.Number.Uint64(); { - b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) - if b == nil { - return nil - } - } - for a.Hash() != b.Hash() { - a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) - if a == nil { - return nil - } - b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) - if b == nil { - return nil - } - } - return a -} - // ReadHeadHeader returns the current canonical head header. func ReadHeadHeader(db ethdb.Reader) *types.Header { headHeaderHash := ReadHeadHeaderHash(db) diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 819788b4da..02d51f4dd2 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -23,7 +23,6 @@ import ( "math/big" "math/rand" "os" - "reflect" "testing" "github.com/ethereum/go-ethereum/common" @@ -249,13 +248,6 @@ func TestBadBlockStorage(t *testing.T) { t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, badBlocks[i].NumberU64(), i+1, badBlocks[i+1].NumberU64()) } } - - // Delete all bad blocks - DeleteBadBlocks(db) - badBlocks = ReadAllBadBlocks(db) - if len(badBlocks) != 0 { - t.Fatalf("Failed to delete bad blocks") - } } // Tests that canonical numbers can be mapped to hashes and retrieved. @@ -516,37 +508,6 @@ func TestWriteAncientHeaderChain(t *testing.T) { } } -func TestCanonicalHashIteration(t *testing.T) { - var cases = []struct { - from, to uint64 - limit int - expect []uint64 - }{ - {1, 8, 0, nil}, - {1, 8, 1, []uint64{1}}, - {1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}}, - {1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}}, - {2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}}, - {9, 10, 10, nil}, - } - // Test empty db iteration - db := NewMemoryDatabase() - numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10) - if len(numbers) != 0 { - t.Fatalf("No entry should be returned to iterate an empty db") - } - // Fill database with testing data. - for i := uint64(1); i <= 8; i++ { - WriteCanonicalHash(db, common.Hash{}, i) - } - for i, c := range cases { - numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit) - if !reflect.DeepEqual(numbers, c.expect) { - t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers) - } - } -} - func TestHashesInRange(t *testing.T) { mkHeader := func(number, seq int) *types.Header { h := types.Header{ @@ -565,18 +526,6 @@ func TestHashesInRange(t *testing.T) { total++ } } - if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want { - t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have) - } - if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want { - t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have) - } - if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want { - t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have) - } - if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want { - t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have) - } if have, want := len(ReadAllHashes(db, 10)), 10; have != want { t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have) } From 2a2f106a0166ef2b3e20e7a4d0b2aa36f03cb7de Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:25:30 +0100 Subject: [PATCH 122/277] cmd/evm/internal/t8ntool, trie: support for verkle-at-genesis, use UBT, and move the transition tree to its own package (#32445) This is broken off of #31730 to only focus on testing networks that start with verkle at genesis. The PR has seen a lot of work since its creation, and it now targets creating and re-executing tests for a binary tree testnet without the transition (so it starts at genesis). The transition tree has been moved to its own package. It also replaces verkle with the binary tree for this specific application. --------- Co-authored-by: Gary Rong --- cmd/evm/internal/t8ntool/execution.go | 38 +- cmd/evm/internal/t8ntool/flags.go | 13 + cmd/evm/internal/t8ntool/transition.go | 225 ++++- cmd/evm/main.go | 48 + core/bintrie_witness_test.go | 237 +++++ core/chain_makers.go | 124 +-- core/genesis_test.go | 2 +- core/state/database.go | 10 +- core/state/dump.go | 24 + core/state/reader.go | 25 +- core/state/state_object.go | 3 +- core/verkle_witness_test.go | 1107 ----------------------- tests/block_test_util.go | 7 +- tests/init.go | 19 + trie/bintrie/binary_node.go | 44 +- trie/bintrie/binary_node_test.go | 18 +- trie/bintrie/hashed_node.go | 30 +- trie/bintrie/hashed_node_test.go | 83 +- trie/bintrie/internal_node.go | 82 +- trie/bintrie/iterator.go | 33 +- trie/bintrie/iterator_test.go | 83 -- trie/bintrie/key_encoding.go | 6 + trie/bintrie/stem_node.go | 31 +- trie/bintrie/stem_node_test.go | 14 +- trie/bintrie/trie.go | 120 ++- trie/bintrie/trie_test.go | 8 +- trie/{ => transitiontrie}/transition.go | 47 +- trie/utils/verkle.go | 114 +++ trie/verkle.go | 3 +- triedb/pathdb/database.go | 12 +- 30 files changed, 1142 insertions(+), 1468 deletions(-) create mode 100644 core/bintrie_witness_test.go delete mode 100644 core/verkle_witness_test.go delete mode 100644 trie/bintrie/iterator_test.go rename trie/{ => transitiontrie}/transition.go (87%) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 5303d432fb..44f15c322c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -18,6 +18,7 @@ package t8ntool import ( "fmt" + stdmath "math" "math/big" "github.com/ethereum/go-ethereum/common" @@ -43,8 +44,9 @@ import ( ) type Prestate struct { - Env stEnv `json:"env"` - Pre types.GenesisAlloc `json:"pre"` + Env stEnv `json:"env"` + Pre types.GenesisAlloc `json:"pre"` + TreeLeaves map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"` } //go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go @@ -142,7 +144,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return h } var ( - statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + isEIP4762 = chainConfig.IsVerkle(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp) + statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre, isEIP4762) signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp) gaspool = new(core.GasPool) blockHash = common.Hash{0x13, 0x37} @@ -301,6 +304,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // Amount is in gwei, turn into wei amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei)) statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal) + + if isEIP4762 { + statedb.AccessEvents().AddAccount(w.Address, true, stdmath.MaxUint64) + } } // Gather the execution-layer triggered requests. @@ -361,8 +368,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, execRs.Requests = requests } - // Re-create statedb instance with new root upon the updated database - // for accessing latest states. + // Re-create statedb instance with new root for MPT mode statedb, err = state.New(root, statedb.Database()) if err != nil { return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) @@ -371,12 +377,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return statedb, execRs, body, nil } -func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB { - tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true}) +func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, isBintrie bool) *state.StateDB { + tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true, IsVerkle: isBintrie}) sdb := state.NewDatabase(tdb, nil) - statedb, err := state.New(types.EmptyRootHash, sdb) + + root := types.EmptyRootHash + if isBintrie { + root = types.EmptyBinaryHash + } + statedb, err := state.New(root, sdb) if err != nil { - panic(fmt.Errorf("failed to create initial state: %v", err)) + panic(fmt.Errorf("failed to create initial statedb: %v", err)) } for addr, a := range accounts { statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified) @@ -387,10 +398,15 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB } } // Commit and re-open to start with a clean state. - root, err := statedb.Commit(0, false, false) + root, err = statedb.Commit(0, false, false) if err != nil { panic(fmt.Errorf("failed to commit initial state: %v", err)) } + // If bintrie mode started, check if conversion happened + if isBintrie { + return statedb + } + // For MPT mode, reopen the state with the committed root statedb, err = state.New(root, sdb) if err != nil { panic(fmt.Errorf("failed to reopen state after commit: %v", err)) @@ -398,7 +414,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB return statedb } -func rlpHash(x interface{}) (h common.Hash) { +func rlpHash(x any) (h common.Hash) { hw := sha3.NewLegacyKeccak256() rlp.Encode(hw, x) hw.Sum(h[:0]) diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index f2606c86d1..a6ec33eacf 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -88,6 +88,14 @@ var ( "\t - into the file ", Value: "block.json", } + OutputBTFlag = &cli.StringFlag{ + Name: "output.vkt", + Usage: "Determines where to put the `BT` of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "vkt.json", + } InputAllocFlag = &cli.StringFlag{ Name: "input.alloc", Usage: "`stdin` or file name of where to find the prestate alloc to use.", @@ -123,6 +131,11 @@ var ( Usage: "`stdin` or file name of where to find the transactions list in RLP form.", Value: "txs.rlp", } + // TODO(@CPerezz): rename `Name` of the file in a follow-up PR (relays on EEST -> https://github.com/ethereum/execution-spec-tests/tree/verkle/main) + InputBTFlag = &cli.StringFlag{ + Name: "input.vkt", + Usage: "`stdin` or file name of where to find the prestate BT.", + } SealCliqueFlag = &cli.StringFlag{ Name: "seal.clique", Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.", diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index e946ccddd5..af60333cbd 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -28,15 +28,22 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/tests" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/triedb" + "github.com/ethereum/go-ethereum/triedb/database" + "github.com/holiman/uint256" "github.com/urfave/cli/v2" ) @@ -75,10 +82,11 @@ var ( ) type input struct { - Alloc types.GenesisAlloc `json:"alloc,omitempty"` - Env *stEnv `json:"env,omitempty"` - Txs []*txWithKey `json:"txs,omitempty"` - TxRlp string `json:"txsRlp,omitempty"` + Alloc types.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + BT map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"` + Txs []*txWithKey `json:"txs,omitempty"` + TxRlp string `json:"txsRlp,omitempty"` } func Transition(ctx *cli.Context) error { @@ -90,16 +98,16 @@ func Transition(ctx *cli.Context) error { // stdin input or in files. // Check if anything needs to be read from stdin var ( - prestate Prestate - txIt txIterator // txs to apply - allocStr = ctx.String(InputAllocFlag.Name) - + prestate Prestate + txIt txIterator // txs to apply + allocStr = ctx.String(InputAllocFlag.Name) + btStr = ctx.String(InputBTFlag.Name) envStr = ctx.String(InputEnvFlag.Name) txStr = ctx.String(InputTxsFlag.Name) inputData = &input{} ) // Figure out the prestate alloc - if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { + if allocStr == stdinSelector || btStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) if err := decoder.Decode(inputData); err != nil { return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err)) @@ -112,6 +120,13 @@ func Transition(ctx *cli.Context) error { } prestate.Pre = inputData.Alloc + if btStr != stdinSelector && btStr != "" { + if err := readFile(btStr, "BT", &inputData.BT); err != nil { + return err + } + } + prestate.TreeLeaves = inputData.BT + // Set the block environment if envStr != stdinSelector { var env stEnv @@ -182,9 +197,21 @@ func Transition(ctx *cli.Context) error { return err } // Dump the execution result - collector := make(Alloc) - s.DumpToCollector(collector, nil) - return dispatchOutput(ctx, baseDir, result, collector, body) + var ( + collector = make(Alloc) + btleaves map[common.Hash]hexutil.Bytes + ) + isBinary := chainConfig.IsVerkle(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) + if !isBinary { + s.DumpToCollector(collector, nil) + } else { + btleaves = make(map[common.Hash]hexutil.Bytes) + if err := s.DumpBinTrieLeaves(btleaves); err != nil { + return err + } + } + + return dispatchOutput(ctx, baseDir, result, collector, body, btleaves) } func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { @@ -306,7 +333,7 @@ func saveFile(baseDir, filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes, bt map[common.Hash]hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -333,6 +360,13 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil { return err } + // Only write bt output if we actually have binary trie leaves + if bt != nil { + if err := dispatch(baseDir, ctx.String(OutputBTFlag.Name), "vkt", bt); err != nil { + return err + } + } + if len(stdOutObject) > 0 { b, err := json.MarshalIndent(stdOutObject, "", " ") if err != nil { @@ -351,3 +385,168 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a } return nil } + +// BinKey computes the tree key given an address and an optional slot number. +func BinKey(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 { + return errors.New("invalid number of arguments: expecting an address and an optional slot number") + } + + addr, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + + if ctx.Args().Len() == 2 { + slot, err := hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("error decoding slot: %w", err) + } + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyStorageSlot(common.BytesToAddress(addr), slot)) + } else { + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyBasicData(common.BytesToAddress(addr))) + } + return nil +} + +// BinKeys computes a set of tree keys given a genesis alloc. +func BinKeys(ctx *cli.Context) error { + var allocStr = ctx.String(InputAllocFlag.Name) + var alloc core.GenesisAlloc + // Figure out the prestate alloc + if allocStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(&alloc); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if allocStr != stdinSelector { + if err := readFile(allocStr, "alloc", &alloc); err != nil { + return err + } + } + db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults) + defer db.Close() + + bt, err := genBinTrieFromAlloc(alloc, db) + if err != nil { + return fmt.Errorf("error generating bt: %w", err) + } + + collector := make(map[common.Hash]hexutil.Bytes) + it, err := bt.NodeIterator(nil) + if err != nil { + panic(err) + } + for it.Next(true) { + if it.Leaf() { + collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob() + } + } + + output, err := json.MarshalIndent(collector, "", "") + if err != nil { + return fmt.Errorf("error outputting tree: %w", err) + } + + fmt.Println(string(output)) + + return nil +} + +// BinTrieRoot computes the root of a Binary Trie from a genesis alloc. +func BinTrieRoot(ctx *cli.Context) error { + var allocStr = ctx.String(InputAllocFlag.Name) + var alloc core.GenesisAlloc + if allocStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(&alloc); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if allocStr != stdinSelector { + if err := readFile(allocStr, "alloc", &alloc); err != nil { + return err + } + } + db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults) + defer db.Close() + + bt, err := genBinTrieFromAlloc(alloc, db) + if err != nil { + return fmt.Errorf("error generating bt: %w", err) + } + fmt.Println(bt.Hash().Hex()) + + return nil +} + +// TODO(@CPerezz): Should this go to `bintrie` module? +func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase) (*bintrie.BinaryTrie, error) { + bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db) + if err != nil { + return nil, err + } + for addr, acc := range alloc { + for slot, value := range acc.Storage { + err := bt.UpdateStorage(addr, slot.Bytes(), value.Big().Bytes()) + if err != nil { + return nil, fmt.Errorf("error inserting storage: %w", err) + } + } + account := &types.StateAccount{ + Balance: uint256.MustFromBig(acc.Balance), + Nonce: acc.Nonce, + CodeHash: crypto.Keccak256Hash(acc.Code).Bytes(), + Root: common.Hash{}, + } + err := bt.UpdateAccount(addr, account, len(acc.Code)) + if err != nil { + return nil, fmt.Errorf("error inserting account: %w", err) + } + err = bt.UpdateContractCode(addr, common.BytesToHash(account.CodeHash), acc.Code) + if err != nil { + return nil, fmt.Errorf("error inserting code: %w", err) + } + } + return bt, nil +} + +// BinaryCodeChunkKey computes the tree key of a code-chunk for a given address. +func BinaryCodeChunkKey(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 { + return errors.New("invalid number of arguments: expecting an address and an code-chunk number") + } + + addr, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + chunkNumberBytes, err := hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("error decoding chunk number: %w", err) + } + var chunkNumber uint256.Int + chunkNumber.SetBytes(chunkNumberBytes) + + fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyCodeChunk(common.BytesToAddress(addr), &chunkNumber)) + + return nil +} + +// BinaryCodeChunkCode returns the code chunkification for a given code. +func BinaryCodeChunkCode(ctx *cli.Context) error { + if ctx.Args().Len() == 0 || ctx.Args().Len() > 1 { + return errors.New("invalid number of arguments: expecting a bytecode") + } + + bytecode, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("error decoding address: %w", err) + } + + chunkedCode := bintrie.ChunkifyCode(bytecode) + fmt.Printf("%#x\n", chunkedCode) + + return nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index bf5be9a359..5238d5920c 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -146,16 +146,63 @@ var ( t8ntool.TraceEnableCallFramesFlag, t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, + t8ntool.OutputBTFlag, t8ntool.OutputResultFlag, t8ntool.OutputBodyFlag, t8ntool.InputAllocFlag, t8ntool.InputEnvFlag, + t8ntool.InputBTFlag, t8ntool.InputTxsFlag, t8ntool.ForknameFlag, t8ntool.ChainIDFlag, t8ntool.RewardFlag, }, } + + verkleCommand = &cli.Command{ + Name: "verkle", + Aliases: []string{"vkt"}, + Usage: "Binary Trie helpers", + Subcommands: []*cli.Command{ + { + Name: "tree-keys", + Aliases: []string{"v"}, + Usage: "compute a set of binary trie keys, given their source addresses and optional slot numbers", + Action: t8ntool.BinKeys, + Flags: []cli.Flag{ + t8ntool.InputAllocFlag, + }, + }, + { + Name: "single-key", + Aliases: []string{"vk"}, + Usage: "compute the binary trie key given an address and optional slot number", + Action: t8ntool.BinKey, + }, + { + Name: "code-chunk-key", + Aliases: []string{"vck"}, + Usage: "compute the binary trie key given an address and chunk number", + Action: t8ntool.BinaryCodeChunkKey, + }, + { + Name: "chunkify-code", + Aliases: []string{"vcc"}, + Usage: "chunkify a given bytecode for a binary trie", + Action: t8ntool.BinaryCodeChunkCode, + }, + { + Name: "state-root", + Aliases: []string{"vsr"}, + Usage: "compute the state-root of a binary trie for the given alloc", + Action: t8ntool.BinTrieRoot, + Flags: []cli.Flag{ + t8ntool.InputAllocFlag, + }, + }, + }, + } + transactionCommand = &cli.Command{ Name: "transaction", Aliases: []string{"t9n"}, @@ -210,6 +257,7 @@ func init() { stateTransitionCommand, transactionCommand, blockBuilderCommand, + verkleCommand, } app.Before = func(ctx *cli.Context) error { flags.MigrateGlobalFlags(ctx) diff --git a/core/bintrie_witness_test.go b/core/bintrie_witness_test.go new file mode 100644 index 0000000000..7704ba41fb --- /dev/null +++ b/core/bintrie_witness_test.go @@ -0,0 +1,237 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "encoding/binary" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/triedb" +) + +var ( + testVerkleChainConfig = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + VerkleTime: u64(0), + TerminalTotalDifficulty: common.Big0, + EnableVerkleAtGenesis: true, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Verkle: params.DefaultPragueBlobConfig, + }, + } +) + +func TestProcessVerkle(t *testing.T) { + var ( + code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) + intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, nil, true, true, true, true) + // A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness + // will not contain that copied data. + // Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 + codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) + intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, nil, true, true, true, true) + signer = types.LatestSigner(testVerkleChainConfig) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + gspec = &Genesis{ + Config: testVerkleChainConfig, + Alloc: GenesisAlloc{ + coinbase: { + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, + params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, + params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, + params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + // genesis := gspec.MustCommit(bcdb, triedb) + options := DefaultConfig().WithStateScheme(rawdb.PathScheme) + options.SnapshotLimit = 0 + blockchain, _ := NewBlockChain(bcdb, gspec, beacon.New(ethash.NewFaker()), options) + defer blockchain.Stop() + + txCost1 := params.TxGas + txCost2 := params.TxGas + contractCreationCost := intrinsicContractCreationGas + + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */ + 739 /* execution costs */ + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ + params.WitnessChunkReadCost + /* SLOAD in constructor */ + params.WitnessChunkWriteCost + /* SSTORE in constructor */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ + params.WitnessChunkReadCost + /* SLOAD in constructor */ + params.WitnessChunkWriteCost + /* SSTORE in constructor */ + params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */ + 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */ + uint64(4844) /* execution costs */ + blockGasUsagesExpected := []uint64{ + txCost1*2 + txCost2, + txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, + } + _, chain, _ := GenerateChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { + gen.SetPoS() + + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) + tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + + // Add two contract creations in block #2 + if i == 1 { + tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 6, + Value: big.NewInt(16), + Gas: 3000000, + GasPrice: big.NewInt(875000000), + Data: code, + }) + gen.AddTx(tx) + + tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 7, + Value: big.NewInt(0), + Gas: 3000000, + GasPrice: big.NewInt(875000000), + Data: codeWithExtCodeCopy, + }) + gen.AddTx(tx) + } + }) + + for i, b := range chain { + fmt.Printf("%d %x\n", i, b.Root()) + } + endnum, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block %d imported with error: %v", endnum, err) + } + + for i := range 2 { + b := blockchain.GetBlockByNumber(uint64(i) + 1) + if b == nil { + t.Fatalf("expected block %d to be present in chain", i+1) + } + if b.Hash() != chain[i].Hash() { + t.Fatalf("block #%d not found at expected height", b.NumberU64()) + } + if b.GasUsed() != blockGasUsagesExpected[i] { + t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) + } + } +} + +func TestProcessParentBlockHash(t *testing.T) { + // This test uses blocks where, + // block 1 parent hash is 0x0100.... + // block 2 parent hash is 0x0200.... + // etc + checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { + statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) + statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified) + // Process n blocks, from 1 .. num + var num = 2 + for i := 1; i <= num; i++ { + header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} + chainConfig := params.MergedTestChainConfig + if isVerkle { + chainConfig = testVerkleChainConfig + } + vmContext := NewEVMBlockContext(header, nil, new(common.Address)) + evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) + ProcessParentBlockHash(header.ParentHash, evm) + } + // Read block hashes for block 0 .. num-1 + for i := 0; i < num; i++ { + have, want := getContractStoredBlockHash(statedb, uint64(i), isVerkle), common.Hash{byte(i + 1)} + if have != want { + t.Errorf("block %d, verkle=%v, have parent hash %v, want %v", i, isVerkle, have, want) + } + } + } + t.Run("MPT", func(t *testing.T) { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + checkBlockHashes(statedb, false) + }) + t.Run("Verkle", func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) + cacheConfig.SnapshotLimit = 0 + triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) + statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil)) + checkBlockHashes(statedb, true) + }) +} + +// getContractStoredBlockHash is a utility method which reads the stored parent blockhash for block 'number' +func getContractStoredBlockHash(statedb *state.StateDB, number uint64, isVerkle bool) common.Hash { + ringIndex := number % params.HistoryServeWindow + var key common.Hash + binary.BigEndian.PutUint64(key[24:], ringIndex) + if isVerkle { + return statedb.GetState(params.HistoryStorageAddress, key) + } + return statedb.GetState(params.HistoryStorageAddress, key) +} diff --git a/core/chain_makers.go b/core/chain_makers.go index af55716cca..a1e07becba 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) @@ -427,7 +426,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } // Forcibly use hash-based state scheme for retaining all nodes in disk. - triedb := triedb.NewDatabase(db, triedb.HashDefaults) + var triedbConfig *triedb.Config = triedb.HashDefaults + if config.IsVerkle(config.ChainID, 0) { + triedbConfig = triedb.VerkleDefaults + } + triedb := triedb.NewDatabase(db, triedbConfig) defer triedb.Close() for i := 0; i < n; i++ { @@ -472,7 +475,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { db := rawdb.NewMemoryDatabase() - triedb := triedb.NewDatabase(db, triedb.HashDefaults) + var triedbConfig *triedb.Config = triedb.HashDefaults + if genesis.Config != nil && genesis.Config.IsVerkle(genesis.Config.ChainID, 0) { + triedbConfig = triedb.VerkleDefaults + } + triedb := triedb.NewDatabase(db, triedbConfig) defer triedb.Close() _, err := genesis.Commit(db, triedb) if err != nil { @@ -482,117 +489,6 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, return db, blocks, receipts } -func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, trdb *triedb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { - if config == nil { - config = params.TestChainConfig - } - proofs := make([]*verkle.VerkleProof, 0, n) - keyvals := make([]verkle.StateDiff, 0, n) - cm := newChainMaker(parent, config, engine) - - genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { - b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine} - b.header = cm.makeHeader(parent, statedb, b.engine) - - // TODO uncomment when proof generation is merged - // Save pre state for proof generation - // preState := statedb.Copy() - - // EIP-2935 / 7709 - blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase) - blockContext.Random = &common.Hash{} // enable post-merge instruction set - evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{}) - ProcessParentBlockHash(b.header.ParentHash, evm) - - // Execute any user modifications to the block. - if gen != nil { - gen(i, b) - } - - requests := b.collectRequests(false) - if requests != nil { - reqHash := types.CalcRequestsHash(requests) - b.header.RequestsHash = &reqHash - } - - body := &types.Body{ - Transactions: b.txs, - Uncles: b.uncles, - Withdrawals: b.withdrawals, - } - block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts) - if err != nil { - panic(err) - } - - // Write state changes to DB. - root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time)) - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - if err = triedb.Commit(root, false); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - - proofs = append(proofs, block.ExecutionWitness().VerkleProof) - keyvals = append(keyvals, block.ExecutionWitness().StateDiff) - - return block, b.receipts - } - - sdb := state.NewDatabase(trdb, nil) - - for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), sdb) - if err != nil { - panic(err) - } - block, receipts := genblock(i, parent, trdb, statedb) - - // Post-process the receipts. - // Here we assign the final block hash and other info into the receipt. - // In order for DeriveFields to work, the transaction and receipt lists need to be - // of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be - // extra ones, so we just trim the lists here. - receiptsCount := len(receipts) - txs := block.Transactions() - if len(receipts) > len(txs) { - receipts = receipts[:len(txs)] - } else if len(receipts) < len(txs) { - txs = txs[:len(receipts)] - } - var blobGasPrice *big.Int - if block.ExcessBlobGas() != nil { - blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header()) - } - if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { - panic(err) - } - - // Re-expand to ensure all receipts are returned. - receipts = receipts[:receiptsCount] - - // Advance the chain. - cm.add(block, receipts) - parent = block - } - return cm.chain, cm.receipts, proofs, keyvals -} - -func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (common.Hash, ethdb.Database, []*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { - db := rawdb.NewMemoryDatabase() - cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) - cacheConfig.SnapshotLimit = 0 - triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) - defer triedb.Close() - genesisBlock, err := genesis.Commit(db, triedb) - if err != nil { - panic(err) - } - blocks, receipts, proofs, keyvals := GenerateVerkleChain(genesis.Config, genesisBlock, engine, db, triedb, n, gen) - return genesisBlock.Hash(), db, blocks, receipts, proofs, keyvals -} - func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { time := parent.Time() + 10 // block time is fixed at 10 seconds parentHeader := parent.Header() diff --git a/core/genesis_test.go b/core/genesis_test.go index a41dfce578..1ed475695d 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -308,7 +308,7 @@ func TestVerkleGenesisCommit(t *testing.T) { }, } - expected := common.FromHex("018d20eebb130b5e2b796465fe36aafab650650729a92435aec071bf2386f080") + expected := common.FromHex("19056b480530799a4fdaa9fd9407043b965a3a5c37b4d2a1a9a4f3395a327561") got := genesis.ToBlock().Root().Bytes() if !bytes.Equal(got, expected) { t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) diff --git a/core/state/database.go b/core/state/database.go index 58d0ccfe82..ae177d964f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -28,6 +28,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" @@ -239,10 +241,12 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) { if db.triedb.IsVerkle() { ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle()) if ts.InTransition() { - panic("transition isn't supported yet") + panic("state tree transition isn't supported yet") } if ts.Transitioned() { - return trie.NewVerkleTrie(root, db.triedb, db.pointCache) + // Use BinaryTrie instead of VerkleTrie when IsVerkle is set + // (IsVerkle actually means Binary Trie mode in this codebase) + return bintrie.NewBinaryTrie(root, db.triedb) } } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) @@ -302,7 +306,7 @@ func mustCopyTrie(t Trie) Trie { return t.Copy() case *trie.VerkleTrie: return t.Copy() - case *trie.TransitionTrie: + case *transitiontrie.TransitionTrie: return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) diff --git a/core/state/dump.go b/core/state/dump.go index a4abc33733..829d106ed3 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -18,6 +18,7 @@ package state import ( "encoding/json" + "errors" "fmt" "time" @@ -27,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" ) // DumpConfig is a set of options to control what portions of the state will be @@ -221,6 +223,28 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] return nextKey } +// DumpBinTrieLeaves collects all binary trie leaf nodes into the provided map. +func (s *StateDB) DumpBinTrieLeaves(collector map[common.Hash]hexutil.Bytes) error { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + return err + } + btr, ok := tr.(*bintrie.BinaryTrie) + if !ok { + return errors.New("trie is not a binary trie") + } + it, err := btr.NodeIterator(nil) + if err != nil { + return err + } + for it.Next(true) { + if it.Leaf() { + collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob() + } + } + return nil +} + // RawDump returns the state. If the processing is aborted e.g. due to options // reaching Max, the `Next` key is set on the returned Dump. func (s *StateDB) RawDump(opts *DumpConfig) Dump { diff --git a/core/state/reader.go b/core/state/reader.go index 3e8b31b6be..93083c8ae2 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/database" @@ -242,7 +244,11 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach if !db.IsVerkle() { tr, err = trie.NewStateTrie(trie.StateTrieID(root), db) } else { - tr, err = trie.NewVerkleTrie(root, db, cache) + // When IsVerkle() is true, create a BinaryTrie wrapped in TransitionTrie + binTrie, binErr := bintrie.NewBinaryTrie(root, db) + if binErr != nil { + return nil, binErr + } // Based on the transition status, determine if the overlay // tree needs to be created, or if a single, target tree is @@ -253,7 +259,22 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach if err != nil { return nil, err } - tr = trie.NewTransitionTrie(mpt, tr.(*trie.VerkleTrie), false) + tr = transitiontrie.NewTransitionTrie(mpt, binTrie, false) + } else { + // HACK: Use TransitionTrie with nil base as a wrapper to make BinaryTrie + // satisfy the Trie interface. This works around the import cycle between + // trie and trie/bintrie packages. + // + // TODO: In future PRs, refactor the package structure to avoid this hack: + // - Option 1: Move common interfaces (Trie, NodeIterator) to a separate + // package that both trie and trie/bintrie can import + // - Option 2: Create a factory function in the trie package that returns + // BinaryTrie as a Trie interface without direct import + // - Option 3: Move BinaryTrie to the main trie package + // + // The current approach works but adds unnecessary overhead and complexity + // by using TransitionTrie when there's no actual transition happening. + tr = transitiontrie.NewTransitionTrie(nil, binTrie, false) } } if err != nil { diff --git a/core/state/state_object.go b/core/state/state_object.go index fdeb4254c1..8f2f323327 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" ) @@ -501,7 +502,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { // Verkle uses only one tree, and the copy has already been // made in mustCopyTrie. obj.trie = db.trie - case *trie.TransitionTrie: + case *transitiontrie.TransitionTrie: // Same thing for the transition tree, since the MPT is // read-only. obj.trie = db.trie diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go deleted file mode 100644 index 9495e325ca..0000000000 --- a/core/verkle_witness_test.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "fmt" - "math/big" - "slices" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/tracing" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" -) - -var ( - testVerkleChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - ShanghaiTime: u64(0), - VerkleTime: u64(0), - TerminalTotalDifficulty: common.Big0, - EnableVerkleAtGenesis: true, - BlobScheduleConfig: ¶ms.BlobScheduleConfig{ - Verkle: params.DefaultPragueBlobConfig, - }, - // TODO uncomment when proof generation is merged - // ProofInBlocks: true, - } - testKaustinenLikeChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(69420), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - ShanghaiTime: u64(0), - VerkleTime: u64(0), - TerminalTotalDifficulty: common.Big0, - EnableVerkleAtGenesis: true, - BlobScheduleConfig: ¶ms.BlobScheduleConfig{ - Verkle: params.DefaultPragueBlobConfig, - }, - } -) - -func TestProcessVerkle(t *testing.T) { - var ( - code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) - intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, nil, true, true, true, true) - // A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness - // will not contain that copied data. - // Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 - codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) - intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, nil, true, true, true, true) - signer = types.LatestSigner(testVerkleChainConfig) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain - coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") - gspec = &Genesis{ - Config: testVerkleChainConfig, - Alloc: GenesisAlloc{ - coinbase: { - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, - params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, - params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, - params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, - }, - } - ) - // Verkle trees use the snapshot, which must be enabled before the - // data is saved into the tree+database. - // genesis := gspec.MustCommit(bcdb, triedb) - options := DefaultConfig().WithStateScheme(rawdb.PathScheme) - options.SnapshotLimit = 0 - blockchain, _ := NewBlockChain(bcdb, gspec, beacon.New(ethash.NewFaker()), options) - defer blockchain.Stop() - - txCost1 := params.TxGas - txCost2 := params.TxGas - contractCreationCost := intrinsicContractCreationGas + - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */ - 739 /* execution costs */ - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ - params.WitnessChunkReadCost + /* SLOAD in constructor */ - params.WitnessChunkWriteCost + /* SSTORE in constructor */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */ - params.WitnessChunkReadCost + /* SLOAD in constructor */ - params.WitnessChunkWriteCost + /* SSTORE in constructor */ - params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */ - 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */ - uint64(4844) /* execution costs */ - blockGasUsagesExpected := []uint64{ - txCost1*2 + txCost2, - txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, - } - _, _, chain, _, proofs, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) - tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - - // Add two contract creations in block #2 - if i == 1 { - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 6, - Value: big.NewInt(16), - Gas: 3000000, - GasPrice: big.NewInt(875000000), - Data: code, - }) - gen.AddTx(tx) - - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 7, - Value: big.NewInt(0), - Gas: 3000000, - GasPrice: big.NewInt(875000000), - Data: codeWithExtCodeCopy, - }) - gen.AddTx(tx) - } - }) - - // Check proof for both blocks - err := verkle.Verify(proofs[0], gspec.ToBlock().Root().Bytes(), chain[0].Root().Bytes(), statediffs[0]) - if err != nil { - t.Fatal(err) - } - err = verkle.Verify(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), statediffs[1]) - if err != nil { - t.Fatal(err) - } - - t.Log("verified verkle proof, inserting blocks into the chain") - - for i, b := range chain { - fmt.Printf("%d %x\n", i, b.Root()) - } - endnum, err := blockchain.InsertChain(chain) - if err != nil { - t.Fatalf("block %d imported with error: %v", endnum, err) - } - - for i := range 2 { - b := blockchain.GetBlockByNumber(uint64(i) + 1) - if b == nil { - t.Fatalf("expected block %d to be present in chain", i+1) - } - if b.Hash() != chain[i].Hash() { - t.Fatalf("block #%d not found at expected height", b.NumberU64()) - } - if b.GasUsed() != blockGasUsagesExpected[i] { - t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) - } - } -} - -func TestProcessParentBlockHash(t *testing.T) { - // This test uses blocks where, - // block 1 parent hash is 0x0100.... - // block 2 parent hash is 0x0200.... - // etc - checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { - statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) - statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified) - // Process n blocks, from 1 .. num - var num = 2 - for i := 1; i <= num; i++ { - header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} - chainConfig := params.MergedTestChainConfig - if isVerkle { - chainConfig = testVerkleChainConfig - } - vmContext := NewEVMBlockContext(header, nil, new(common.Address)) - evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) - ProcessParentBlockHash(header.ParentHash, evm) - } - // Read block hashes for block 0 .. num-1 - for i := 0; i < num; i++ { - have, want := getContractStoredBlockHash(statedb, uint64(i), isVerkle), common.Hash{byte(i + 1)} - if have != want { - t.Errorf("block %d, verkle=%v, have parent hash %v, want %v", i, isVerkle, have, want) - } - } - } - t.Run("MPT", func(t *testing.T) { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - checkBlockHashes(statedb, false) - }) - t.Run("Verkle", func(t *testing.T) { - db := rawdb.NewMemoryDatabase() - cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme) - cacheConfig.SnapshotLimit = 0 - triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true)) - statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil)) - checkBlockHashes(statedb, true) - }) -} - -// getContractStoredBlockHash is a utility method which reads the stored parent blockhash for block 'number' -func getContractStoredBlockHash(statedb *state.StateDB, number uint64, isVerkle bool) common.Hash { - ringIndex := number % params.HistoryServeWindow - var key common.Hash - binary.BigEndian.PutUint64(key[24:], ringIndex) - if isVerkle { - return statedb.GetState(params.HistoryStorageAddress, key) - } - return statedb.GetState(params.HistoryStorageAddress, key) -} - -// TestProcessVerkleInvalidContractCreation checks for several modes of contract creation failures -func TestProcessVerkleInvalidContractCreation(t *testing.T) { - var ( - account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(testKaustinenLikeChainConfig) - ) - // slightly modify it to suit the live txs from the testnet - gspec.Alloc[account2] = types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 1, - } - - // Create two blocks that reproduce what is happening on kaustinen. - // - The first block contains two failing contract creation transactions, that - // write to storage before they revert. - // - // - The second block contains a single failing contract creation transaction, - // that fails right off the bat. - genesisH, _, chain, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - for _, rlpData := range []string{ - // SSTORE at slot 41 and reverts - "f8d48084479c2c18830186a08080b8806000602955bda3f9600060ca55600060695523b360006039551983576000601255b0620c2fde2c592ac2600060bc55e0ac6000606455a63e22600060e655eb607e605c5360a2605d5360c7605e53601d605f5360eb606053606b606153608e60625360816063536079606453601e60655360fc60665360b7606753608b60685383021e7ca0cc20c65a97d2e526b8ec0f4266e8b01bdcde43b9aeb59d8bfb44e8eb8119c109a07a8e751813ae1b2ce734960dbc39a4f954917d7822a2c5d1dca18b06c584131f", - // SSTORE at slot 133 and reverts - "02f8db83010f2c01843b9aca0084479c2c18830186a08080b88060006085553fad6000600a55600060565555600060b55506600060cf557f1b8b38183e7bd1bdfaa7123c5a4976e54cce0e42049d841411978fd3595e25c66019527f0538943712953cf08900aae40222a40b2d5a4ac8075ad8cf0870e2be307edbb96039527f9f3174ff85024747041ae7a611acffb987c513c088d90ab288aec080a0cd6ac65ce2cb0a912371f6b5a551ba8caffc22ec55ad4d3cb53de41d05eb77b6a02e0dfe8513dfa6ec7bfd7eda6f5c0dac21b39b982436045e128cec46cfd3f960", - // this one is a simple transfer that succeeds, necessary to get the correct nonce in the other block. - "f8e80184479c2c18830186a094bbbbde4ca27f83fc18aa108170547ff57675936a80b8807ff71f7c15faadb969a76a5f54a81a0117e1e743cb7f24e378eda28442ea4c6eb6604a527fb5409e5718d44e23bfffac926e5ea726067f772772e7e19446acba0c853f62f5606a526020608a536088608b536039608c536004608d5360af608e537f7f7675d9f210e0a61564e6d11e7cd75f5bc9009ac9f6b94a0fc63035441a83021e7ba04a4a172d81ebb02847829b76a387ac09749c8b65668083699abe20c887fb9efca07c5b1a990702ec7b31a5e8e3935cd9a77649f8c25a84131229e24ab61aec6093", - } { - var tx = new(types.Transaction) - if err := tx.UnmarshalBinary(common.Hex2Bytes(rlpData)); err != nil { - t.Fatal(err) - } - gen.AddTx(tx) - } - } else { - var tx = new(types.Transaction) - // immediately reverts - if err := tx.UnmarshalBinary(common.Hex2Bytes("01f8d683010f2c028443ad7d0e830186a08080b880b00e7fa3c849dce891cce5fae8a4c46cbb313d6aec0c0ffe7863e05fb7b22d4807674c6055527ffbfcb0938f3e18f7937aa8fa95d880afebd5c4cec0d85186095832d03c85cf8a60755260ab60955360cf6096536066609753606e60985360fa609953609e609a53608e609b536024609c5360f6609d536072609e5360a4609fc080a08fc6f7101f292ff1fb0de8ac69c2d320fbb23bfe61cf327173786ea5daee6e37a044c42d91838ef06646294bf4f9835588aee66243b16a66a2da37641fae4c045f")); err != nil { - t.Fatal(err) - } - gen.AddTx(tx) - } - }) - - tx1ContractAddress := crypto.CreateAddress(account1, 0) - tx1ContractStem := utils.GetTreeKey(tx1ContractAddress[:], uint256.NewInt(0), 105) - tx1ContractStem = tx1ContractStem[:31] - - tx2ContractAddress := crypto.CreateAddress(account2, 1) - tx2SlotKey := [32]byte{} - tx2SlotKey[31] = 133 - tx2ContractStem := utils.StorageSlotKey(tx2ContractAddress[:], tx2SlotKey[:]) - tx2ContractStem = tx2ContractStem[:31] - - eip2935Stem := utils.GetTreeKey(params.HistoryStorageAddress[:], uint256.NewInt(0), 0) - eip2935Stem = eip2935Stem[:31] - - // Check that the witness contains what we expect: a storage entry for each of the two contract - // creations that failed: one at 133 for the 2nd tx, and one at 105 for the first tx. - for _, stemStateDiff := range statediffs[0] { - // Check that the slot number 133, which is overflowing the account header, - // is present. Note that the offset of the 2nd group (first group after the - // header) is skipping the first 64 values, hence we still have an offset - // of 133, and not 133 - 64. - if bytes.Equal(stemStateDiff.Stem[:], tx2ContractStem[:]) { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix != 133 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - if suffixDiff.CurrentValue != nil { - t.Fatalf("invalid prestate value found for %x in block #1: %v != nil\n", stemStateDiff.Stem, suffixDiff.CurrentValue) - } - if suffixDiff.NewValue != nil { - t.Fatalf("invalid poststate value found for %x in block #1: %v != nil\n", stemStateDiff.Stem, suffixDiff.NewValue) - } - } - } else if bytes.Equal(stemStateDiff.Stem[:], tx1ContractStem) { - // For this contract creation, check that only the account header and storage slot 41 - // are found in the witness. - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 1 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } else if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // Check the eip 2935 group of leaves. - // Check that only one leaf was accessed, and is present in the witness. - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - // Check that this leaf is the first storage slot - if stemStateDiff.SuffixDiffs[0].Suffix != 64 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) - } - // check that the prestate value is nil and that the poststate value isn't. - if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { - t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("nil new value in BLOCKHASH contract insert") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != genesisH { - t.Fatalf("invalid BLOCKHASH value: %x != %x", *stemStateDiff.SuffixDiffs[0].NewValue, genesisH) - } - } else { - // For all other entries present in the witness, check that nothing beyond - // the account header was accessed. - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix > 2 { - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } - } - - // Check that no account has a value above 4 in the 2nd block as no storage nor - // code should make it to the witness. - for _, stemStateDiff := range statediffs[1] { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // BLOCKHASH contract stem - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract at block #2: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - if stemStateDiff.SuffixDiffs[0].Suffix != 65 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract at block #2: %d != 65", stemStateDiff.SuffixDiffs[0].Suffix) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("missing post state value for BLOCKHASH contract at block #2") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != chain[0].Hash() { - t.Fatalf("invalid post state value for BLOCKHASH contract at block #2: %x != %x", chain[0].Hash(), (*stemStateDiff.SuffixDiffs[0].NewValue)[:]) - } - } else if suffixDiff.Suffix > 4 { - t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } -} - -func verkleTestGenesis(config *params.ChainConfig) *Genesis { - var ( - coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") - account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - ) - return &Genesis{ - Config: config, - Alloc: GenesisAlloc{ - coinbase: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - account1: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - account2: GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 3, - }, - params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, - params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, - params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0}, - params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0}, - }, - } -} - -// TestProcessVerkleContractWithEmptyCode checks that the witness contains all valid -// entries, if the initcode returns an empty code. -func TestProcessVerkleContractWithEmptyCode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - gspec := verkleTestGenesis(&config) - - genesisH, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - var tx types.Transaction - // a transaction that does some PUSH1n but returns a 0-sized contract - txpayload := common.Hex2Bytes("02f8db83010f2d03843b9aca008444cf6a05830186a08080b8807fdfbbb59f2371a76485ce557fd0de00c298d3ede52a3eab56d35af674eb49ec5860335260826053536001605453604c60555360f3605653606060575360446058536096605953600c605a5360df605b5360f3605c5360fb605d53600c605e53609a605f53607f60605360fe606153603d60625360f4606353604b60645360cac001a0486b6dc55b8a311568b7239a2cae1d77e7446dba71df61eaafd53f73820a138fa010bd48a45e56133ac4c5645142c2ea48950d40eb35050e9510b6bad9e15c5865") - if err := tx.UnmarshalBinary(txpayload); err != nil { - t.Fatal(err) - } - gen.AddTx(&tx) - }) - - eip2935Stem := utils.GetTreeKey(params.HistoryStorageAddress[:], uint256.NewInt(0), 0) - eip2935Stem = eip2935Stem[:31] - - for _, stemStateDiff := range statediffs[0] { - // Handle the case of the history contract: make sure only the correct - // slots are added to the witness. - if bytes.Equal(stemStateDiff.Stem[:], eip2935Stem) { - // BLOCKHASH contract stem - if len(stemStateDiff.SuffixDiffs) > 1 { - t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) - } - if stemStateDiff.SuffixDiffs[0].Suffix != 64 { - t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) - } - // check that the "current value" is nil and that the new value isn't. - if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { - t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) - } - if stemStateDiff.SuffixDiffs[0].NewValue == nil { - t.Fatalf("nil new value in BLOCKHASH contract insert") - } - if *stemStateDiff.SuffixDiffs[0].NewValue != genesisH { - t.Fatalf("invalid BLOCKHASH value: %x != %x", *stemStateDiff.SuffixDiffs[0].NewValue, genesisH) - } - } else { - for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix > 2 { - // if d8898012c484fb48610ecb7963886339207dab004bce968b007b616ffa18e0 shows up, it means that the PUSHn - // in the transaction above added entries into the witness, when they should not have since they are - // part of a contract deployment. - t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) - } - } - } - } -} - -// TestProcessVerkleExtCodeHashOpcode verifies that calling EXTCODEHASH on another -// deployed contract, creates all the right entries in the witness. -func TestProcessVerkleExtCodeHashOpcode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - ) - dummyContract := []byte{ - byte(vm.PUSH1), 2, - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), - - byte(vm.PUSH1), 2, - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), - - byte(vm.PUSH1), 42, - } - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - dummyContractAddr := crypto.CreateAddress(deployer, 0) - - // contract that calls EXTCODEHASH on the dummy contract - extCodeHashContract := []byte{ - byte(vm.PUSH1), 22, - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), - - byte(vm.PUSH1), 22, - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), - - byte(vm.PUSH20), - 0x3a, 0x22, 0x0f, 0x35, 0x12, 0x52, 0x08, 0x9d, 0x38, 0x5b, 0x29, 0xbe, 0xca, 0x14, 0xe2, 0x7f, 0x20, 0x4c, 0x29, 0x6a, - byte(vm.EXTCODEHASH), - } - extCodeHashContractAddr := crypto.CreateAddress(deployer, 1) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - // Create dummy contract. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: dummyContract, - }) - gen.AddTx(tx) - - // Create contract with EXTCODEHASH opcode. - tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 1, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: extCodeHashContract}) - gen.AddTx(tx) - } else { - tx, _ := types.SignTx(types.NewTransaction(2, extCodeHashContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - contractKeccakTreeKey := utils.CodeHashKey(dummyContractAddr[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], contractKeccakTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - codeHashStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - // Check location of code hash was accessed - if codeHashStateDiff.Suffix != utils.CodeHashLeafKey { - t.Fatalf("code hash invalid suffix") - } - // check the code hash wasn't present in the prestate, as - // the contract was deployed in this block. - if codeHashStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - // check the poststate value corresponds to the code hash - // of the deployed contract. - expCodeHash := crypto.Keccak256Hash(dummyContract[12:]) - if *codeHashStateDiff.CurrentValue != expCodeHash { - t.Fatalf("codeHash.CurrentValue unexpected code hash") - } - if codeHashStateDiff.NewValue != nil { - t.Fatalf("codeHash.NewValue must be nil") - } -} - -// TestProcessVerkleBalanceOpcode checks that calling balance -// on another contract will add the correct entries to the witness. -func TestProcessVerkleBalanceOpcode(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - txData := slices.Concat( - []byte{byte(vm.PUSH20)}, - common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d").Bytes(), - []byte{byte(vm.BALANCE)}) - - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(0), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: txData}) - gen.AddTx(tx) - }) - - account2BalanceTreeKey := utils.BasicDataKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], account2BalanceTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - var zero [32]byte - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("invalid suffix diff") - } - // check the prestate balance wasn't 0 or missing - if balanceStateDiff.CurrentValue == nil || *balanceStateDiff.CurrentValue == zero { - t.Fatalf("invalid current value %v", *balanceStateDiff.CurrentValue) - } - // check that the poststate witness value for the balance is nil, - // meaning that it didn't get updated. - if balanceStateDiff.NewValue != nil { - t.Fatalf("invalid new value") - } -} - -// TestProcessVerkleSelfDestructInSeparateTx controls the contents of the witness after -// a non-eip6780-compliant selfdestruct occurs. -func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - - // runtime code: selfdestruct ( 0x6177843db3138ae69679A54b95cf345ED759450d ) - runtimeCode := slices.Concat( - []byte{byte(vm.PUSH20)}, - account2.Bytes(), - []byte{byte(vm.SELFDESTRUCT)}) - - //The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in a previous transaction. - selfDestructContract := slices.Concat([]byte{ - byte(vm.PUSH1), byte(len(runtimeCode)), - byte(vm.PUSH1), 12, - byte(vm.PUSH1), 0x00, - byte(vm.CODECOPY), // Codecopy( to-offset: 0, code offset: 12, length: 22 ) - - byte(vm.PUSH1), byte(len(runtimeCode)), - byte(vm.PUSH1), 0x00, - byte(vm.RETURN), // Return ( 0 : len(runtimecode) - }, - runtimeCode) - - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - - if i == 0 { - // Create selfdestruct contract, sending 42 wei. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - } else { - // Call it. - tx, _ := types.SignTx(types.NewTransaction(1, contract, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - var zero [32]byte - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - - // The original balance was 42. - var oldBalance [16]byte - oldBalance[15] = 42 - if !bytes.Equal((*balanceStateDiff.CurrentValue)[utils.BasicDataBalanceOffset:], oldBalance[:]) { - t.Fatalf("the pre-state balance before self-destruct must be %x, got %x", oldBalance, *balanceStateDiff.CurrentValue) - } - - // The new balance must be 0. - if !bytes.Equal((*balanceStateDiff.NewValue)[utils.BasicDataBalanceOffset:], zero[utils.BasicDataBalanceOffset:]) { - t.Fatalf("the post-state balance after self-destruct must be 0") - } - } - { // Check self-destructed target in the witness. - selfDestructTargetTreeKey := utils.CodeHashKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - if balanceStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - if balanceStateDiff.NewValue == nil { - t.Fatalf("codeHash.NewValue must not be empty") - } - preStateBalance := binary.BigEndian.Uint64(balanceStateDiff.CurrentValue[utils.BasicDataBalanceOffset+8:]) - postStateBalance := binary.BigEndian.Uint64(balanceStateDiff.NewValue[utils.BasicDataBalanceOffset+8:]) - if postStateBalance-preStateBalance != 42 { - t.Fatalf("the post-state balance after self-destruct must be 42, got %d-%d=%d", postStateBalance, preStateBalance, postStateBalance-preStateBalance) - } - } -} - -// TestProcessVerkleSelfDestructInSameTx controls the contents of the witness after -// a eip6780-compliant selfdestruct occurs. -func TestProcessVerkleSelfDestructInSameTx(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") - gspec = verkleTestGenesis(&config) - ) - - // The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in **the same** transaction sending the remaining - // balance to an external (i.e: not itself) account. - - selfDestructContract := slices.Concat( - []byte{byte(vm.PUSH20)}, - account2.Bytes(), - []byte{byte(vm.SELFDESTRUCT)}) - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - - if balanceStateDiff.CurrentValue != nil { - t.Fatalf("the pre-state balance before must be nil, since the contract didn't exist") - } - - if balanceStateDiff.NewValue != nil { - t.Fatalf("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") - } - } - { // Check self-destructed target in the witness. - selfDestructTargetTreeKey := utils.CodeHashKey(account2[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[0] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatalf("no state diff found for stem") - } - - balanceStateDiff := statediffs[0][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatalf("balance invalid suffix") - } - if balanceStateDiff.CurrentValue == nil { - t.Fatalf("codeHash.CurrentValue must not be empty") - } - if balanceStateDiff.NewValue == nil { - t.Fatalf("codeHash.NewValue must not be empty") - } - preStateBalance := binary.BigEndian.Uint64(balanceStateDiff.CurrentValue[utils.BasicDataBalanceOffset+8:]) - postStateBalance := binary.BigEndian.Uint64(balanceStateDiff.NewValue[utils.BasicDataBalanceOffset+8:]) - if postStateBalance-preStateBalance != 42 { - t.Fatalf("the post-state balance after self-destruct must be 42. got %d", postStateBalance) - } - } -} - -// TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary checks the content of the witness -// if a selfdestruct occurs in a different tx than the one that created it, but the beneficiary -// is the selfdestructed account. -func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - ) - // The goal of this test is to test SELFDESTRUCT that happens in a contract - // execution which is created in a *previous* transaction sending the remaining - // balance to itself. - selfDestructContract := []byte{ - byte(vm.PUSH1), 2, // PUSH1 2 - byte(vm.PUSH1), 10, // PUSH1 12 - byte(vm.PUSH0), // PUSH0 - byte(vm.CODECOPY), // Codecopy ( to offset 0, code@offset: 10, length: 2) - - byte(vm.PUSH1), 22, - byte(vm.PUSH0), - byte(vm.RETURN), // RETURN( memory[0:2] ) - - // Deployed code - byte(vm.ADDRESS), - byte(vm.SELFDESTRUCT), - } - deployer := crypto.PubkeyToAddress(testKey.PublicKey) - contract := crypto.CreateAddress(deployer, 0) - - _, _, _, _, _, statediffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) { - gen.SetPoS() - if i == 0 { - // Create self-destruct contract, sending 42 wei. - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - } else { - // Call it. - tx, _ := types.SignTx(types.NewTransaction(1, contract, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) - gen.AddTx(tx) - } - }) - - { - // Check self-destructed contract in the witness. - // The way 6780 is implemented today, it always SubBalance from the self-destructed contract, and AddBalance - // to the beneficiary. In this case both addresses are the same, thus this might be optimizable from a gas - // perspective. But until that happens, we need to honor this "balance reading" adding it to the witness. - - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range statediffs[1] { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - - balanceStateDiff := statediffs[1][stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - - // The original balance was 42. - var oldBalance [16]byte - oldBalance[15] = 42 - if !bytes.Equal((*balanceStateDiff.CurrentValue)[utils.BasicDataBalanceOffset:], oldBalance[:]) { - t.Fatal("the pre-state balance before self-destruct must be 42") - } - - // Note that the SubBalance+AddBalance net effect is a 0 change, so NewValue - // must be nil. - if balanceStateDiff.NewValue != nil { - t.Fatal("the post-state balance after self-destruct must be empty") - } - } -} - -// TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary checks the content of the witness -// if a selfdestruct occurs in the same tx as the one that created it, but the beneficiary -// is the selfdestructed account. -func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - deployer = crypto.PubkeyToAddress(testKey.PublicKey) - contract = crypto.CreateAddress(deployer, 0) - ) - - // The goal of this test is to test SELFDESTRUCT that happens while executing - // the init code of a contract creation, that occurs in **the same** transaction. - // The balance is sent to itself. - t.Logf("Contract: %v", contract.String()) - - selfDestructContract := []byte{byte(vm.ADDRESS), byte(vm.SELFDESTRUCT)} - - _, _, _, _, _, stateDiffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - stateDiff := stateDiffs[0] // state difference of block 1 - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range stateDiff { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - balanceStateDiff := stateDiff[stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - if balanceStateDiff.CurrentValue != nil { - t.Fatal("the pre-state balance before must be nil, since the contract didn't exist") - } - // Ensure that the value is burnt, and therefore that the balance of the self-destructed - // contract isn't modified (it should remain missing from the state) - if balanceStateDiff.NewValue != nil { - t.Fatal("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") - } - } -} - -// TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount checks the -// content of the witness if a selfdestruct occurs in the same tx as the one that created it, -// it, but the beneficiary is the selfdestructed account. The difference with the test above, -// is that the created account is prefunded and so the final value should be 0. -func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiaryAndPrefundedAccount(t *testing.T) { - // The test txs were taken from a secondary testnet with chain id 69421 - config := *testKaustinenLikeChainConfig - config.ChainID = new(big.Int).SetUint64(69421) - - var ( - signer = types.LatestSigner(&config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - gspec = verkleTestGenesis(&config) - deployer = crypto.PubkeyToAddress(testKey.PublicKey) - contract = crypto.CreateAddress(deployer, 0) - ) - // Prefund the account, at an address that the contract will be deployed at, - // before it selfdestrucs. We can therefore check that the account itseld is - // NOT destroyed, which is what the current version of the spec requires. - // TODO(gballet) revisit after the spec has been modified. - gspec.Alloc[contract] = types.Account{ - Balance: big.NewInt(100), - } - - selfDestructContract := []byte{byte(vm.ADDRESS), byte(vm.SELFDESTRUCT)} - - _, _, _, _, _, stateDiffs := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 1, func(i int, gen *BlockGen) { - gen.SetPoS() - tx, _ := types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 0, - Value: big.NewInt(42), - Gas: 100_000, - GasPrice: big.NewInt(875000000), - Data: selfDestructContract, - }) - gen.AddTx(tx) - }) - stateDiff := stateDiffs[0] // state difference of block 1 - - { // Check self-destructed contract in the witness - selfDestructContractTreeKey := utils.CodeHashKey(contract[:]) - - var stateDiffIdx = -1 - for i, stemStateDiff := range stateDiff { - if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { - stateDiffIdx = i - break - } - } - if stateDiffIdx == -1 { - t.Fatal("no state diff found for stem") - } - balanceStateDiff := stateDiff[stateDiffIdx].SuffixDiffs[0] - if balanceStateDiff.Suffix != utils.BasicDataLeafKey { - t.Fatal("balance invalid suffix") - } - expected, _ := hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000064") - if balanceStateDiff.CurrentValue == nil || !bytes.Equal(balanceStateDiff.CurrentValue[:], expected) { - t.Fatalf("incorrect prestate balance: %x != %x", *balanceStateDiff.CurrentValue, expected) - } - // Ensure that the value is burnt, and therefore that the balance of the self-destructed - // contract isn't modified (it should remain missing from the state) - expected = make([]byte, 32) - if balanceStateDiff.NewValue == nil { - t.Fatal("incorrect nil poststate balance") - } - if !bytes.Equal(balanceStateDiff.NewValue[:], expected[:]) { - t.Fatalf("incorrect poststate balance: %x != %x", *balanceStateDiff.NewValue, expected[:]) - } - } -} diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 3b88753b1c..2ced18787a 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -117,19 +117,20 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t return UnsupportedForkError{t.json.Network} } // import pre accounts & construct test genesis block & state root + // Commit genesis state var ( + gspec = t.genesis(config) db = rawdb.NewMemoryDatabase() tconf = &triedb.Config{ Preimages: true, + IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp, } ) - if scheme == rawdb.PathScheme { + if scheme == rawdb.PathScheme || tconf.IsVerkle { tconf.PathDB = pathdb.Defaults } else { tconf.HashDB = hashdb.Defaults } - // Commit genesis state - gspec := t.genesis(config) // if ttd is not specified, set an arbitrary huge value if gspec.Config.TerminalTotalDifficulty == nil { diff --git a/tests/init.go b/tests/init.go index 705e929ae9..d10b47986c 100644 --- a/tests/init.go +++ b/tests/init.go @@ -720,6 +720,25 @@ var Forks = map[string]*params.ChainConfig{ BPO4: params.DefaultBPO4BlobConfig, }, }, + "Verkle": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + VerkleTime: u64(0), + }, } var bpo1BlobConfig = ¶ms.BlobConfig{ diff --git a/trie/bintrie/binary_node.go b/trie/bintrie/binary_node.go index 1c003a6c8f..690489b2aa 100644 --- a/trie/bintrie/binary_node.go +++ b/trie/bintrie/binary_node.go @@ -31,8 +31,11 @@ type ( var zero [32]byte const ( - NodeWidth = 256 // Number of child per leaf node - StemSize = 31 // Number of bytes to travel before reaching a group of leaves + StemNodeWidth = 256 // Number of child per leaf node + StemSize = 31 // Number of bytes to travel before reaching a group of leaves + NodeTypeBytes = 1 // Size of node type prefix in serialization + HashSize = 32 // Size of a hash in bytes + BitmapSize = 32 // Size of the bitmap in a stem node ) const ( @@ -58,25 +61,28 @@ type BinaryNode interface { func SerializeNode(node BinaryNode) []byte { switch n := (node).(type) { case *InternalNode: - var serialized [65]byte + // InternalNode: 1 byte type + 32 bytes left hash + 32 bytes right hash + var serialized [NodeTypeBytes + HashSize + HashSize]byte serialized[0] = nodeTypeInternal copy(serialized[1:33], n.left.Hash().Bytes()) copy(serialized[33:65], n.right.Hash().Bytes()) return serialized[:] case *StemNode: - var serialized [32 + 32 + 256*32]byte + // StemNode: 1 byte type + 31 bytes stem + 32 bytes bitmap + 256*32 bytes values + var serialized [NodeTypeBytes + StemSize + BitmapSize + StemNodeWidth*HashSize]byte serialized[0] = nodeTypeStem - copy(serialized[1:32], node.(*StemNode).Stem) - bitmap := serialized[32:64] - offset := 64 - for i, v := range node.(*StemNode).Values { + copy(serialized[NodeTypeBytes:NodeTypeBytes+StemSize], n.Stem) + bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize] + offset := NodeTypeBytes + StemSize + BitmapSize + for i, v := range n.Values { if v != nil { bitmap[i/8] |= 1 << (7 - (i % 8)) - copy(serialized[offset:offset+32], v) - offset += 32 + copy(serialized[offset:offset+HashSize], v) + offset += HashSize } } - return serialized[:] + // Only return the actual data, not the entire array + return serialized[:offset] default: panic("invalid node type") } @@ -104,21 +110,21 @@ func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) { if len(serialized) < 64 { return nil, invalidSerializedLength } - var values [256][]byte - bitmap := serialized[32:64] - offset := 64 + var values [StemNodeWidth][]byte + bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize] + offset := NodeTypeBytes + StemSize + BitmapSize - for i := range 256 { + for i := range StemNodeWidth { if bitmap[i/8]>>(7-(i%8))&1 == 1 { - if len(serialized) < offset+32 { + if len(serialized) < offset+HashSize { return nil, invalidSerializedLength } - values[i] = serialized[offset : offset+32] - offset += 32 + values[i] = serialized[offset : offset+HashSize] + offset += HashSize } } return &StemNode{ - Stem: serialized[1:32], + Stem: serialized[NodeTypeBytes : NodeTypeBytes+StemSize], Values: values[:], depth: depth, }, nil diff --git a/trie/bintrie/binary_node_test.go b/trie/bintrie/binary_node_test.go index b21daaab69..242743ba53 100644 --- a/trie/bintrie/binary_node_test.go +++ b/trie/bintrie/binary_node_test.go @@ -77,12 +77,12 @@ func TestSerializeDeserializeInternalNode(t *testing.T) { // TestSerializeDeserializeStemNode tests serialization and deserialization of StemNode func TestSerializeDeserializeStemNode(t *testing.T) { // Create a stem node with some values - stem := make([]byte, 31) + stem := make([]byte, StemSize) for i := range stem { stem[i] = byte(i) } - var values [256][]byte + var values [StemNodeWidth][]byte // Add some values at different indices values[0] = common.HexToHash("0x0101010101010101010101010101010101010101010101010101010101010101").Bytes() values[10] = common.HexToHash("0x0202020202020202020202020202020202020202020202020202020202020202").Bytes() @@ -103,7 +103,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) { } // Check the stem is correctly serialized - if !bytes.Equal(serialized[1:32], stem) { + if !bytes.Equal(serialized[1:1+StemSize], stem) { t.Errorf("Stem mismatch in serialized data") } @@ -136,7 +136,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) { } // Check that other values are nil - for i := range NodeWidth { + for i := range StemNodeWidth { if i == 0 || i == 10 || i == 255 { continue } @@ -218,15 +218,15 @@ func TestKeyToPath(t *testing.T) { }, { name: "max valid depth", - depth: 31 * 8, - key: make([]byte, 32), - expected: make([]byte, 31*8+1), + depth: StemSize * 8, + key: make([]byte, HashSize), + expected: make([]byte, StemSize*8+1), wantErr: false, }, { name: "depth too large", - depth: 31*8 + 1, - key: make([]byte, 32), + depth: StemSize*8 + 1, + key: make([]byte, HashSize), wantErr: true, }, } diff --git a/trie/bintrie/hashed_node.go b/trie/bintrie/hashed_node.go index 8f9fd66a59..e4d8c2e7ac 100644 --- a/trie/bintrie/hashed_node.go +++ b/trie/bintrie/hashed_node.go @@ -46,8 +46,31 @@ func (h HashedNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error return nil, errors.New("attempted to get values from an unresolved node") } -func (h HashedNode) InsertValuesAtStem(key []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { - return nil, errors.New("insertValuesAtStem not implemented for hashed node") +func (h HashedNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { + // Step 1: Generate the path for this node's position in the tree + path, err := keyToPath(depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem path generation error: %w", err) + } + + if resolver == nil { + return nil, errors.New("InsertValuesAtStem resolve error: resolver is nil") + } + + // Step 2: Resolve the hashed node to get the actual node data + data, err := resolver(path, common.Hash(h)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + + // Step 3: Deserialize the resolved data into a concrete node + node, err := DeserializeNode(data, depth) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + + // Step 4: Call InsertValuesAtStem on the resolved concrete node + return node.InsertValuesAtStem(stem, values, resolver, depth) } func (h HashedNode) toDot(parent string, path string) string { @@ -58,7 +81,8 @@ func (h HashedNode) toDot(parent string, path string) string { } func (h HashedNode) CollectNodes([]byte, NodeFlushFn) error { - return errors.New("collectNodes not implemented for hashed node") + // HashedNodes are already persisted in the database and don't need to be collected. + return nil } func (h HashedNode) GetHeight() int { diff --git a/trie/bintrie/hashed_node_test.go b/trie/bintrie/hashed_node_test.go index 0c19ae0c57..f9e6984888 100644 --- a/trie/bintrie/hashed_node_test.go +++ b/trie/bintrie/hashed_node_test.go @@ -17,6 +17,7 @@ package bintrie import ( + "bytes" "testing" "github.com/ethereum/go-ethereum/common" @@ -59,8 +60,8 @@ func TestHashedNodeCopy(t *testing.T) { func TestHashedNodeInsert(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - key := make([]byte, 32) - value := make([]byte, 32) + key := make([]byte, HashSize) + value := make([]byte, HashSize) _, err := node.Insert(key, value, nil, 0) if err == nil { @@ -76,7 +77,7 @@ func TestHashedNodeInsert(t *testing.T) { func TestHashedNodeGetValuesAtStem(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - stem := make([]byte, 31) + stem := make([]byte, StemSize) _, err := node.GetValuesAtStem(stem, nil) if err == nil { t.Fatal("Expected error for GetValuesAtStem on HashedNode") @@ -91,17 +92,85 @@ func TestHashedNodeGetValuesAtStem(t *testing.T) { func TestHashedNodeInsertValuesAtStem(t *testing.T) { node := HashedNode(common.HexToHash("0x1234")) - stem := make([]byte, 31) - values := make([][]byte, 256) + stem := make([]byte, StemSize) + values := make([][]byte, StemNodeWidth) + // Test 1: nil resolver should return an error _, err := node.InsertValuesAtStem(stem, values, nil, 0) if err == nil { - t.Fatal("Expected error for InsertValuesAtStem on HashedNode") + t.Fatal("Expected error for InsertValuesAtStem on HashedNode with nil resolver") } - if err.Error() != "insertValuesAtStem not implemented for hashed node" { + if err.Error() != "InsertValuesAtStem resolve error: resolver is nil" { t.Errorf("Unexpected error message: %v", err) } + + // Test 2: mock resolver returning invalid data should return deserialization error + mockResolver := func(path []byte, hash common.Hash) ([]byte, error) { + // Return invalid/nonsense data that cannot be deserialized + return []byte{0xff, 0xff, 0xff}, nil + } + + _, err = node.InsertValuesAtStem(stem, values, mockResolver, 0) + if err == nil { + t.Fatal("Expected error for InsertValuesAtStem on HashedNode with invalid resolver data") + } + + expectedPrefix := "InsertValuesAtStem node deserialization error:" + if len(err.Error()) < len(expectedPrefix) || err.Error()[:len(expectedPrefix)] != expectedPrefix { + t.Errorf("Expected deserialization error, got: %v", err) + } + + // Test 3: mock resolver returning valid serialized node should succeed + stem = make([]byte, StemSize) + stem[0] = 0xaa + var originalValues [StemNodeWidth][]byte + originalValues[0] = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes() + originalValues[1] = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222").Bytes() + + originalNode := &StemNode{ + Stem: stem, + Values: originalValues[:], + depth: 0, + } + + // Serialize the node + serialized := SerializeNode(originalNode) + + // Create a mock resolver that returns the serialized node + validResolver := func(path []byte, hash common.Hash) ([]byte, error) { + return serialized, nil + } + + var newValues [StemNodeWidth][]byte + newValues[2] = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333").Bytes() + + resolvedNode, err := node.InsertValuesAtStem(stem, newValues[:], validResolver, 0) + if err != nil { + t.Fatalf("Expected successful resolution and insertion, got error: %v", err) + } + + resultStem, ok := resolvedNode.(*StemNode) + if !ok { + t.Fatalf("Expected resolved node to be *StemNode, got %T", resolvedNode) + } + + if !bytes.Equal(resultStem.Stem, stem) { + t.Errorf("Stem mismatch: expected %x, got %x", stem, resultStem.Stem) + } + + // Verify the original values are preserved + if !bytes.Equal(resultStem.Values[0], originalValues[0]) { + t.Errorf("Original value at index 0 not preserved: expected %x, got %x", originalValues[0], resultStem.Values[0]) + } + if !bytes.Equal(resultStem.Values[1], originalValues[1]) { + t.Errorf("Original value at index 1 not preserved: expected %x, got %x", originalValues[1], resultStem.Values[1]) + } + + // Verify the new value was inserted + if !bytes.Equal(resultStem.Values[2], newValues[2]) { + t.Errorf("New value at index 2 not inserted correctly: expected %x, got %x", newValues[2], resultStem.Values[2]) + } } // TestHashedNodeToDot tests the toDot method for visualization diff --git a/trie/bintrie/internal_node.go b/trie/bintrie/internal_node.go index f3ddd1aab0..0a7bece521 100644 --- a/trie/bintrie/internal_node.go +++ b/trie/bintrie/internal_node.go @@ -49,14 +49,26 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([ } bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 - var child *BinaryNode if bit == 0 { - child = &bt.left - } else { - child = &bt.right + if hn, ok := bt.left.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err) + } + bt.left = node + } + return bt.left.GetValuesAtStem(stem, resolver) } - if hn, ok := (*child).(HashedNode); ok { + if hn, ok := bt.right.(HashedNode); ok { path, err := keyToPath(bt.depth, stem) if err != nil { return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err) @@ -69,9 +81,9 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([ if err != nil { return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err) } - *child = node + bt.right = node } - return (*child).GetValuesAtStem(stem, resolver) + return bt.right.GetValuesAtStem(stem, resolver) } // Get retrieves the value for the given key. @@ -80,6 +92,9 @@ func (bt *InternalNode) Get(key []byte, resolver NodeResolverFn) ([]byte, error) if err != nil { return nil, fmt.Errorf("get error: %w", err) } + if values == nil { + return nil, nil + } return values[key[31]], nil } @@ -118,17 +133,54 @@ func (bt *InternalNode) Hash() common.Hash { // InsertValuesAtStem inserts a full value group at the given stem in the internal node. // Already-existing values will be overwritten. func (bt *InternalNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) { - var ( - child *BinaryNode - err error - ) + var err error bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 if bit == 0 { - child = &bt.left - } else { - child = &bt.right + if bt.left == nil { + bt.left = Empty{} + } + + if hn, ok := bt.left.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + bt.left = node + } + + bt.left, err = bt.left.InsertValuesAtStem(stem, values, resolver, depth+1) + return bt, err } - *child, err = (*child).InsertValuesAtStem(stem, values, resolver, depth+1) + + if bt.right == nil { + bt.right = Empty{} + } + + if hn, ok := bt.right.(HashedNode); ok { + path, err := keyToPath(bt.depth, stem) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + data, err := resolver(path, common.Hash(hn)) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err) + } + node, err := DeserializeNode(data, bt.depth+1) + if err != nil { + return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err) + } + bt.right = node + } + + bt.right, err = bt.right.InsertValuesAtStem(stem, values, resolver, depth+1) return bt, err } diff --git a/trie/bintrie/iterator.go b/trie/bintrie/iterator.go index a6bab2bcfa..9b863ed1e3 100644 --- a/trie/bintrie/iterator.go +++ b/trie/bintrie/iterator.go @@ -108,6 +108,11 @@ func (it *binaryNodeIterator) Next(descend bool) bool { } // go back to parent to get the next leaf + // Check if we're at the root before popping + if len(it.stack) == 1 { + it.lastErr = errIteratorEnd + return false + } it.stack = it.stack[:len(it.stack)-1] it.current = it.stack[len(it.stack)-1].Node it.stack[len(it.stack)-1].Index++ @@ -183,9 +188,31 @@ func (it *binaryNodeIterator) NodeBlob() []byte { } // Leaf returns true iff the current node is a leaf node. +// In a Binary Trie, a StemNode contains up to 256 leaf values. +// The iterator is only considered to be "at a leaf" when it's positioned +// at a specific non-nil value within the StemNode, not just at the StemNode itself. func (it *binaryNodeIterator) Leaf() bool { - _, ok := it.current.(*StemNode) - return ok + sn, ok := it.current.(*StemNode) + if !ok { + return false + } + + // Check if we have a valid stack position + if len(it.stack) == 0 { + return false + } + + // The Index in the stack state points to the NEXT position after the current value. + // So if Index is 0, we haven't started iterating through the values yet. + // If Index is 5, we're currently at value[4] (the 5th value, 0-indexed). + idx := it.stack[len(it.stack)-1].Index + if idx == 0 || idx > 256 { + return false + } + + // Check if there's actually a value at the current position + currentValueIndex := idx - 1 + return sn.Values[currentValueIndex] != nil } // LeafKey returns the key of the leaf. The method panics if the iterator is not @@ -219,7 +246,7 @@ func (it *binaryNodeIterator) LeafProof() [][]byte { panic("LeafProof() called on an binary node iterator not at a leaf location") } - proof := make([][]byte, 0, len(it.stack)+NodeWidth) + proof := make([][]byte, 0, len(it.stack)+StemNodeWidth) // Build proof by walking up the stack and collecting sibling hashes for i := range it.stack[:len(it.stack)-2] { diff --git a/trie/bintrie/iterator_test.go b/trie/bintrie/iterator_test.go deleted file mode 100644 index 8773e9e0c5..0000000000 --- a/trie/bintrie/iterator_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2025 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bintrie - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-ethereum/triedb/hashdb" - "github.com/ethereum/go-ethereum/triedb/pathdb" - "github.com/holiman/uint256" -) - -func newTestDatabase(diskdb ethdb.Database, scheme string) *triedb.Database { - config := &triedb.Config{Preimages: true} - if scheme == rawdb.HashScheme { - config.HashDB = &hashdb.Config{CleanCacheSize: 0} - } else { - config.PathDB = &pathdb.Config{TrieCleanSize: 0, StateCleanSize: 0} - } - return triedb.NewDatabase(diskdb, config) -} - -func TestBinaryIterator(t *testing.T) { - trie, err := NewBinaryTrie(types.EmptyVerkleHash, newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)) - if err != nil { - t.Fatal(err) - } - account0 := &types.StateAccount{ - Nonce: 1, - Balance: uint256.NewInt(2), - Root: types.EmptyRootHash, - CodeHash: nil, - } - // NOTE: the code size isn't written to the trie via TryUpdateAccount - // so it will be missing from the test nodes. - trie.UpdateAccount(common.Address{}, account0, 0) - account1 := &types.StateAccount{ - Nonce: 1337, - Balance: uint256.NewInt(2000), - Root: types.EmptyRootHash, - CodeHash: nil, - } - // This address is meant to hash to a value that has the same first byte as 0xbf - var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be") - trie.UpdateAccount(clash, account1, 0) - - // Manually go over every node to check that we get all - // the correct nodes. - it, err := trie.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } - var leafcount int - for it.Next(true) { - t.Logf("Node: %x", it.Path()) - if it.Leaf() { - leafcount++ - t.Logf("\tLeaf: %x", it.LeafKey()) - } - } - if leafcount != 2 { - t.Fatalf("invalid leaf count: %d != 6", leafcount) - } -} diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go index 13c2057371..cda797521a 100644 --- a/trie/bintrie/key_encoding.go +++ b/trie/bintrie/key_encoding.go @@ -47,6 +47,12 @@ func GetBinaryTreeKey(addr common.Address, key []byte) []byte { return k } +func GetBinaryTreeKeyBasicData(addr common.Address) []byte { + var k [32]byte + k[31] = BasicDataLeafKey + return GetBinaryTreeKey(addr, k[:]) +} + func GetBinaryTreeKeyCodeHash(addr common.Address) []byte { var k [32]byte k[31] = CodeHashLeafKey diff --git a/trie/bintrie/stem_node.go b/trie/bintrie/stem_node.go index 50c06c9761..60856b42ce 100644 --- a/trie/bintrie/stem_node.go +++ b/trie/bintrie/stem_node.go @@ -28,7 +28,7 @@ import ( // StemNode represents a group of `NodeWith` values sharing the same stem. type StemNode struct { - Stem []byte // Stem path to get to 256 values + Stem []byte // Stem path to get to StemNodeWidth values Values [][]byte // All values, indexed by the last byte of the key. depth int // Depth of the node } @@ -40,7 +40,7 @@ func (bt *StemNode) Get(key []byte, _ NodeResolverFn) ([]byte, error) { // Insert inserts a new key-value pair into the node. func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) { - if !bytes.Equal(bt.Stem, key[:31]) { + if !bytes.Equal(bt.Stem, key[:StemSize]) { bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 n := &InternalNode{depth: bt.depth} @@ -65,26 +65,26 @@ func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int } *other = Empty{} } else { - var values [256][]byte - values[key[31]] = value + var values [StemNodeWidth][]byte + values[key[StemSize]] = value *other = &StemNode{ - Stem: slices.Clone(key[:31]), + Stem: slices.Clone(key[:StemSize]), Values: values[:], depth: depth + 1, } } return n, nil } - if len(value) != 32 { + if len(value) != HashSize { return bt, errors.New("invalid insertion: value length") } - bt.Values[key[31]] = value + bt.Values[key[StemSize]] = value return bt, nil } // Copy creates a deep copy of the node. func (bt *StemNode) Copy() BinaryNode { - var values [256][]byte + var values [StemNodeWidth][]byte for i, v := range bt.Values { values[i] = slices.Clone(v) } @@ -102,7 +102,7 @@ func (bt *StemNode) GetHeight() int { // Hash returns the hash of the node. func (bt *StemNode) Hash() common.Hash { - var data [NodeWidth]common.Hash + var data [StemNodeWidth]common.Hash for i, v := range bt.Values { if v != nil { h := sha256.Sum256(v) @@ -112,7 +112,7 @@ func (bt *StemNode) Hash() common.Hash { h := sha256.New() for level := 1; level <= 8; level++ { - for i := range NodeWidth / (1 << level) { + for i := range StemNodeWidth / (1 << level) { h.Reset() if data[i*2] == (common.Hash{}) && data[i*2+1] == (common.Hash{}) { @@ -141,14 +141,17 @@ func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error { } // GetValuesAtStem retrieves the group of values located at the given stem key. -func (bt *StemNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) { +func (bt *StemNode) GetValuesAtStem(stem []byte, _ NodeResolverFn) ([][]byte, error) { + if !bytes.Equal(bt.Stem, stem) { + return nil, nil + } return bt.Values[:], nil } // InsertValuesAtStem inserts a full value group at the given stem in the internal node. // Already-existing values will be overwritten. func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) { - if !bytes.Equal(bt.Stem, key[:31]) { + if !bytes.Equal(bt.Stem, key[:StemSize]) { bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1 n := &InternalNode{depth: bt.depth} @@ -174,7 +177,7 @@ func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolv *other = Empty{} } else { *other = &StemNode{ - Stem: slices.Clone(key[:31]), + Stem: slices.Clone(key[:StemSize]), Values: values, depth: n.depth + 1, } @@ -206,7 +209,7 @@ func (bt *StemNode) toDot(parent, path string) string { // Key returns the full key for the given index. func (bt *StemNode) Key(i int) []byte { - var ret [32]byte + var ret [HashSize]byte copy(ret[:], bt.Stem) ret[StemSize] = byte(i) return ret[:] diff --git a/trie/bintrie/stem_node_test.go b/trie/bintrie/stem_node_test.go index e0ffd5c3c8..d8d6844427 100644 --- a/trie/bintrie/stem_node_test.go +++ b/trie/bintrie/stem_node_test.go @@ -251,27 +251,23 @@ func TestStemNodeGetValuesAtStem(t *testing.T) { } // Check that all values match - for i := 0; i < 256; i++ { + for i := range 256 { if !bytes.Equal(retrievedValues[i], values[i]) { t.Errorf("Value mismatch at index %d", i) } } - // GetValuesAtStem with different stem also returns the same values - // (implementation ignores the stem parameter) + // GetValuesAtStem with different stem should return nil differentStem := make([]byte, 31) differentStem[0] = 0xFF - retrievedValues2, err := node.GetValuesAtStem(differentStem, nil) + shouldBeNil, err := node.GetValuesAtStem(differentStem, nil) if err != nil { t.Fatalf("Failed to get values with different stem: %v", err) } - // Should still return the same values (stem is ignored) - for i := 0; i < 256; i++ { - if !bytes.Equal(retrievedValues2[i], values[i]) { - t.Errorf("Value mismatch at index %d with different stem", i) - } + if shouldBeNil != nil { + t.Error("Expected nil for different stem, got non-nil") } } diff --git a/trie/bintrie/trie.go b/trie/bintrie/trie.go index 0a8bd325f5..a7ee342b74 100644 --- a/trie/bintrie/trie.go +++ b/trie/bintrie/trie.go @@ -33,6 +33,84 @@ import ( var errInvalidRootType = errors.New("invalid root type") +// ChunkedCode represents a sequence of HashSize-byte chunks of code (StemSize bytes of which +// are actual code, and NodeTypeBytes byte is the pushdata offset). +type ChunkedCode []byte + +// Copy the values here so as to avoid an import cycle +const ( + PUSH1 = byte(0x60) + PUSH32 = byte(0x7f) +) + +// ChunkifyCode generates the chunked version of an array representing EVM bytecode +// according to EIP-7864 specification. +// +// The code is divided into HashSize-byte chunks, where each chunk contains: +// - Byte 0: Metadata byte indicating the number of leading bytes that are PUSHDATA (0-StemSize) +// - Bytes 1-StemSize: Actual code bytes +// +// This format enables stateless clients to validate jump destinations within a chunk +// without requiring additional context. When a PUSH instruction's data spans multiple +// chunks, the metadata byte tells us how many bytes at the start of the chunk are +// part of the previous chunk's PUSH instruction data. +// +// For example: +// - If a chunk starts with regular code: metadata byte = 0 +// - If a PUSH32 instruction starts at byte 30 of chunk N: +// - Chunk N: normal, contains PUSH32 opcode + 1 byte of data +// - Chunk N+1: metadata = StemSize (entire chunk is PUSH data) +// - Chunk N+2: metadata = 1 (first byte is PUSH data, then normal code resumes) +// +// This chunking approach ensures that jump destination validity can be determined +// by examining only the chunk containing the potential JUMPDEST, making it ideal +// for stateless execution and verkle/binary tries. +// +// Reference: https://eips.ethereum.org/EIPS/eip-7864 +func ChunkifyCode(code []byte) ChunkedCode { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / StemSize + codeOffset = 0 // offset in the code + ) + if len(code)%StemSize != 0 { + chunkCount++ + } + chunks := make([]byte, chunkCount*HashSize) + for i := 0; i < chunkCount; i++ { + // number of bytes to copy, StemSize unless the end of the code has been reached. + end := StemSize * (i + 1) + if len(code) < end { + end = len(code) + } + copy(chunks[i*HashSize+1:], code[StemSize*i:end]) // copy the code itself + + // chunk offset = taken from the last chunk. + if chunkOffset > StemSize { + // skip offset calculation if push data covers the whole chunk + chunks[i*HashSize] = StemSize + chunkOffset = 1 + continue + } + chunks[HashSize*i] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset it should be 0 unless + // a PUSH-N overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= StemSize*(i+1) { + codeOffset++ + chunkOffset = codeOffset - StemSize*(i+1) + break + } + } + } + } + return chunks +} + // NewBinaryNode creates a new empty binary trie func NewBinaryNode() BinaryNode { return Empty{} @@ -114,7 +192,7 @@ func (t *BinaryTrie) GetAccount(addr common.Address) (*types.StateAccount, error ) switch r := t.root.(type) { case *InternalNode: - values, err = r.GetValuesAtStem(key[:31], t.nodeResolver) + values, err = r.GetValuesAtStem(key[:StemSize], t.nodeResolver) case *StemNode: values = r.Values case Empty: @@ -168,8 +246,8 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error { var ( err error - basicData [32]byte - values = make([][]byte, NodeWidth) + basicData [HashSize]byte + values = make([][]byte, StemNodeWidth) stem = GetBinaryTreeKey(addr, zero[:]) ) binary.BigEndian.PutUint32(basicData[BasicDataCodeSizeOffset-1:], uint32(codeLen)) @@ -177,14 +255,14 @@ func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, // Because the balance is a max of 16 bytes, truncate // the extra values. This happens in devmode, where - // 0xff**32 is allocated to the developer account. + // 0xff**HashSize is allocated to the developer account. balanceBytes := acc.Balance.Bytes() // TODO: reduce the size of the allocation in devmode, then panic instead // of truncating. if len(balanceBytes) > 16 { balanceBytes = balanceBytes[16:] } - copy(basicData[32-len(balanceBytes):], balanceBytes[:]) + copy(basicData[HashSize-len(balanceBytes):], balanceBytes[:]) values[BasicDataLeafKey] = basicData[:] values[CodeHashLeafKey] = acc.CodeHash[:] @@ -205,11 +283,11 @@ func (t *BinaryTrie) UpdateStem(key []byte, values [][]byte) error { // database, a trie.MissingNodeError is returned. func (t *BinaryTrie) UpdateStorage(address common.Address, key, value []byte) error { k := GetBinaryTreeKeyStorageSlot(address, key) - var v [32]byte - if len(value) >= 32 { - copy(v[:], value[:32]) + var v [HashSize]byte + if len(value) >= HashSize { + copy(v[:], value[:HashSize]) } else { - copy(v[32-len(value):], value[:]) + copy(v[HashSize-len(value):], value[:]) } root, err := t.root.Insert(k, v[:], t.nodeResolver, 0) if err != nil { @@ -228,7 +306,7 @@ func (t *BinaryTrie) DeleteAccount(addr common.Address) error { // found in the database, a trie.MissingNodeError is returned. func (t *BinaryTrie) DeleteStorage(addr common.Address, key []byte) error { k := GetBinaryTreeKey(addr, key) - var zero [32]byte + var zero [HashSize]byte root, err := t.root.Insert(k, zero[:], t.nodeResolver, 0) if err != nil { return fmt.Errorf("DeleteStorage (%x) error: %v", addr, err) @@ -246,12 +324,12 @@ func (t *BinaryTrie) Hash() common.Hash { // Commit writes all nodes to the trie's memory database, tracking the internal // and external (for account tries) references. func (t *BinaryTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) { - root := t.root.(*InternalNode) nodeset := trienode.NewNodeSet(common.Hash{}) - err := root.CollectNodes(nil, func(path []byte, node BinaryNode) { + // The root can be any type of BinaryNode (InternalNode, StemNode, etc.) + err := t.root.CollectNodes(nil, func(path []byte, node BinaryNode) { serialized := SerializeNode(node) - nodeset.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, serialized, t.tracer.Get(path))) + nodeset.AddNode(path, trienode.NewNodeWithPrev(node.Hash(), serialized, t.tracer.Get(path))) }) if err != nil { panic(fmt.Errorf("CollectNodes failed: %v", err)) @@ -299,23 +377,23 @@ func (t *BinaryTrie) IsVerkle() bool { // Note: the basic data leaf needs to have been previously created for this to work func (t *BinaryTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { var ( - chunks = trie.ChunkifyCode(code) + chunks = ChunkifyCode(code) values [][]byte key []byte err error ) - for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { - groupOffset := (chunknr + 128) % 256 + for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+HashSize, chunknr+1 { + groupOffset := (chunknr + 128) % StemNodeWidth if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { - values = make([][]byte, NodeWidth) - var offset [32]byte + values = make([][]byte, StemNodeWidth) + var offset [HashSize]byte binary.LittleEndian.PutUint64(offset[24:], chunknr+128) key = GetBinaryTreeKey(addr, offset[:]) } - values[groupOffset] = chunks[i : i+32] + values[groupOffset] = chunks[i : i+HashSize] - if groupOffset == 255 || len(chunks)-i <= 32 { - err = t.UpdateStem(key[:31], values) + if groupOffset == StemNodeWidth-1 || len(chunks)-i <= HashSize { + err = t.UpdateStem(key[:StemSize], values) if err != nil { return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err) diff --git a/trie/bintrie/trie_test.go b/trie/bintrie/trie_test.go index 84f7689549..ca02cfaa1f 100644 --- a/trie/bintrie/trie_test.go +++ b/trie/bintrie/trie_test.go @@ -25,7 +25,7 @@ import ( ) var ( - zeroKey = [32]byte{} + zeroKey = [HashSize]byte{} oneKey = common.HexToHash("0101010101010101010101010101010101010101010101010101010101010101") twoKey = common.HexToHash("0202020202020202020202020202020202020202020202020202020202020202") threeKey = common.HexToHash("0303030303030303030303030303030303030303030303030303030303030303") @@ -158,8 +158,8 @@ func TestInsertDuplicateKey(t *testing.T) { func TestLargeNumberOfEntries(t *testing.T) { var err error tree := NewBinaryNode() - for i := range 256 { - var key [32]byte + for i := range StemNodeWidth { + var key [HashSize]byte key[0] = byte(i) tree, err = tree.Insert(key[:], ffKey[:], nil, 0) if err != nil { @@ -182,7 +182,7 @@ func TestMerkleizeMultipleEntries(t *testing.T) { common.HexToHash("8100000000000000000000000000000000000000000000000000000000000000").Bytes(), } for i, key := range keys { - var v [32]byte + var v [HashSize]byte binary.LittleEndian.PutUint64(v[:8], uint64(i)) tree, err = tree.Insert(key, v[:], nil, 0) if err != nil { diff --git a/trie/transition.go b/trie/transitiontrie/transition.go similarity index 87% rename from trie/transition.go rename to trie/transitiontrie/transition.go index c6eecd3937..4c73022082 100644 --- a/trie/transition.go +++ b/trie/transitiontrie/transition.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package trie +package transitiontrie import ( "fmt" @@ -22,8 +22,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/ethereum/go-verkle" ) // TransitionTrie is a trie that implements a façade design pattern, presenting @@ -31,13 +32,16 @@ import ( // first from the overlay trie, and falls back to the base trie if the key isn't // found. All writes go to the overlay trie. type TransitionTrie struct { - overlay *VerkleTrie - base *SecureTrie + overlay *bintrie.BinaryTrie + base *trie.SecureTrie storage bool } // NewTransitionTrie creates a new TransitionTrie. -func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie { +// Note: base can be nil when using TransitionTrie as a wrapper for BinaryTrie +// to work around import cycles. This is a temporary hack that should be +// refactored in future PRs (see core/state/reader.go for details). +func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bool) *TransitionTrie { return &TransitionTrie{ overlay: overlay, base: base, @@ -46,12 +50,12 @@ func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *Transiti } // Base returns the base trie. -func (t *TransitionTrie) Base() *SecureTrie { +func (t *TransitionTrie) Base() *trie.SecureTrie { return t.base } // Overlay returns the overlay trie. -func (t *TransitionTrie) Overlay() *VerkleTrie { +func (t *TransitionTrie) Overlay() *bintrie.BinaryTrie { return t.overlay } @@ -61,7 +65,10 @@ func (t *TransitionTrie) GetKey(key []byte) []byte { if key := t.overlay.GetKey(key); key != nil { return key } - return t.base.GetKey(key) + if t.base != nil { + return t.base.GetKey(key) + } + return nil } // GetStorage returns the value for key stored in the trie. The value bytes must @@ -74,8 +81,11 @@ func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, er if len(val) != 0 { return val, nil } - // TODO also insert value into overlay - return t.base.GetStorage(addr, key) + if t.base != nil { + // TODO also insert value into overlay + return t.base.GetStorage(addr, key) + } + return nil, nil } // PrefetchStorage attempts to resolve specific storage slots from the database @@ -102,7 +112,10 @@ func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount if data != nil { return data, nil } - return t.base.GetAccount(address) + if t.base != nil { + return t.base.GetAccount(address) + } + return nil, nil } // PrefetchAccount attempts to resolve specific accounts from the database @@ -174,7 +187,7 @@ func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSe // NodeIterator returns an iterator that returns nodes of the trie. Iteration // starts at the key after the given start key. -func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) { +func (t *TransitionTrie) NodeIterator(startKey []byte) (trie.NodeIterator, error) { panic("not implemented") // TODO: Implement } @@ -197,14 +210,10 @@ func (t *TransitionTrie) IsVerkle() bool { // UpdateStem updates a group of values, given the stem they are using. If // a value already exists, it is overwritten. +// TODO: This is Verkle-specific and requires access to private fields. +// Not currently used in the codebase. func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { - trie := t.overlay - switch root := trie.root.(type) { - case *verkle.InternalNode: - return root.InsertValuesAtStem(key, values, t.overlay.nodeResolver) - default: - panic("invalid root type") - } + panic("UpdateStem is not implemented for TransitionTrie") } // Copy creates a deep copy of the transition trie. diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index dea210c046..2e42477b8d 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -45,6 +45,10 @@ var ( verkleNodeWidth = uint256.NewInt(256) codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset) mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2)) + CodeOffset = uint256.NewInt(128) + VerkleNodeWidth = uint256.NewInt(256) + HeaderStorageOffset = uint256.NewInt(64) + VerkleNodeWidthLog2 = 8 index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64] @@ -200,6 +204,22 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte { return GetTreeKey(address, treeIndex, subIndex) } +func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return treeIndex, subIndex +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk) + return GetTreeKey(address, treeIndex, subIndex) +} + func StorageIndex(storageKey []byte) (*uint256.Int, byte) { // If the storage slot is in the header, we need to add the header offset. var key uint256.Int @@ -297,3 +317,97 @@ func evaluateAddressPoint(address []byte) *verkle.Point { ret.Add(ret, index0Point) return ret } + +func EvaluateAddressPoint(address []byte) *verkle.Point { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [3]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, index0Point) + + return ret +} + +func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { + treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey) + return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { + var pos uint256.Int + pos.SetBytes(storageKey) + + // If the storage slot is in the header, we need to add the header offset. + if pos.Cmp(codeStorageDelta) < 0 { + // This addition is always safe; it can't ever overflow since pos Date: Sat, 15 Nov 2025 16:04:21 +0200 Subject: [PATCH 123/277] ethapi: deref gasUsed pointer in eth_simulate log (#33192) Show the actual gas used in the block limit error so RPC clients see useful numbers. --- internal/ethapi/simulate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 0d1a59b371..e0732c327a 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -377,7 +377,7 @@ func (sim *simulator) sanitizeCall(call *TransactionArgs, state vm.StateDB, head call.Gas = (*hexutil.Uint64)(&remaining) } if *gasUsed+uint64(*call.Gas) > blockContext.GasLimit { - return &blockGasLimitReachedError{fmt.Sprintf("block gas limit reached: %d >= %d", gasUsed, blockContext.GasLimit)} + return &blockGasLimitReachedError{fmt.Sprintf("block gas limit reached: %d >= %d", *gasUsed, blockContext.GasLimit)} } if err := call.CallDefaults(sim.gp.Gas(), header.BaseFee, sim.chainConfig.ChainID); err != nil { return err From e0d81d1e993ad6dc3e618cd06e56b7be916efd8e Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Tue, 18 Nov 2025 18:54:53 +0100 Subject: [PATCH 124/277] eth: fix panic in randomDuration when min equals max (#33193) Fixes a potential panic in `randomDuration` when `min == max` by handling the edge case explicitly. --- eth/dropper.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/eth/dropper.go b/eth/dropper.go index 51f2a7a95a..dada5d07c0 100644 --- a/eth/dropper.go +++ b/eth/dropper.go @@ -145,6 +145,9 @@ func randomDuration(min, max time.Duration) time.Duration { if min > max { panic("min duration must be less than or equal to max duration") } + if min == max { + return min + } return time.Duration(mrand.Int63n(int64(max-min)) + int64(min)) } From f4817b7a5326a14b5648904a5881396f22fcbc37 Mon Sep 17 00:00:00 2001 From: wit liu Date: Thu, 20 Nov 2025 02:00:31 +0800 Subject: [PATCH 125/277] core: initialize tracer before DAO fork logic (#33214) `StateDB` lacks recording functionality, so it has been replaced with `tractStateDB` and advanced --- core/state_processor.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index b66046f501..b4b22e4318 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -69,9 +69,14 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg gp = new(GasPool).AddGas(block.GasLimit()) ) + var tracingStateDB = vm.StateDB(statedb) + if hooks := cfg.Tracer; hooks != nil { + tracingStateDB = state.NewHookedState(statedb, hooks) + } + // Mutate the block and state according to any hard-fork specs if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(statedb) + misc.ApplyDAOHardFork(tracingStateDB) } var ( context vm.BlockContext @@ -79,10 +84,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) // Apply pre-execution system calls. - var tracingStateDB = vm.StateDB(statedb) - if hooks := cfg.Tracer; hooks != nil { - tracingStateDB = state.NewHookedState(statedb, hooks) - } context = NewEVMBlockContext(header, p.chain, nil) evm := vm.NewEVM(context, tracingStateDB, config, cfg) From f8e5b53f887effb68ac0a9ac517432bba38c19ff Mon Sep 17 00:00:00 2001 From: phrwlk Date: Mon, 24 Nov 2025 15:01:00 +0200 Subject: [PATCH 126/277] cmd/utils: make datadir.minfreedisk an IntFlag (#33252) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 5a7e40767c..c95efd9fd7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -114,7 +114,7 @@ var ( Usage: "Root directory for era1 history (default = inside ancient/chain)", Category: flags.EthCategory, } - MinFreeDiskSpaceFlag = &flags.DirectoryFlag{ + MinFreeDiskSpaceFlag = &cli.IntFlag{ Name: "datadir.minfreedisk", Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)", Category: flags.EthCategory, From 495a1d2b1ab77121b64a0c4e8173033db8c628e5 Mon Sep 17 00:00:00 2001 From: Cedrick AH Date: Mon, 24 Nov 2025 14:50:48 +0100 Subject: [PATCH 127/277] core, cmd: removed tablewriter from the dependencies (#33218) Fix #33212. This PR remove `github.com/olekukonko/tablewriter` from dependencies and use a naive stub implementation. `github.com/olekukonko/tablewriter` is used to format database inspection output neatly. However, it requires custom adjustments for TinyGo and is incompatible with the latest version. --------- Co-authored-by: MariusVanDerWijden --- cmd/geth/dbcmd.go | 3 +- cmd/keeper/go.mod | 3 -- cmd/keeper/go.sum | 7 ---- core/rawdb/database.go | 2 +- ...iter_tinygo.go => database_tablewriter.go} | 17 +++++----- ...o_test.go => database_tablewriter_test.go} | 13 +++----- core/rawdb/database_tablewriter_unix.go | 33 ------------------- go.mod | 1 - go.sum | 3 -- 9 files changed, 16 insertions(+), 66 deletions(-) rename core/rawdb/{database_tablewriter_tinygo.go => database_tablewriter.go} (93%) rename core/rawdb/{database_tablewriter_tinygo_test.go => database_tablewriter_test.go} (94%) delete mode 100644 core/rawdb/database_tablewriter_unix.go diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index c57add0656..fb688793e3 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -41,7 +41,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" - "github.com/olekukonko/tablewriter" "github.com/urfave/cli/v2" ) @@ -760,7 +759,7 @@ func showMetaData(ctx *cli.Context) error { data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)}) } - table := tablewriter.NewWriter(os.Stdout) + table := rawdb.NewTableWriter(os.Stdout) table.SetHeader([]string{"Field", "Value"}) table.AppendBulk(data) table.Render() diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 9486347b1f..f47dc54c06 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -28,11 +28,8 @@ require ( github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.3.2 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/rivo/uniseg v0.2.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index ad4c98c4b3..5744ae2093 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -80,17 +80,12 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -105,8 +100,6 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= diff --git a/core/rawdb/database.go b/core/rawdb/database.go index d5c0f0aab2..131e370650 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -650,7 +650,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { total.Add(uint64(ancient.size())) } - table := newTableWriter(os.Stdout) + table := NewTableWriter(os.Stdout) table.SetHeader([]string{"Database", "Category", "Size", "Items"}) table.SetFooter([]string{"", "Total", common.StorageSize(total.Load()).String(), fmt.Sprintf("%d", count.Load())}) table.AppendBulk(stats) diff --git a/core/rawdb/database_tablewriter_tinygo.go b/core/rawdb/database_tablewriter.go similarity index 93% rename from core/rawdb/database_tablewriter_tinygo.go rename to core/rawdb/database_tablewriter.go index 2f8e456fd5..e1cda5c93f 100644 --- a/core/rawdb/database_tablewriter_tinygo.go +++ b/core/rawdb/database_tablewriter.go @@ -14,10 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// TODO: naive stub implementation for tablewriter - -//go:build tinygo -// +build tinygo +// Naive stub implementation for tablewriter package rawdb @@ -40,7 +37,7 @@ type Table struct { rows [][]string } -func newTableWriter(w io.Writer) *Table { +func NewTableWriter(w io.Writer) *Table { return &Table{out: w} } @@ -89,6 +86,7 @@ func (t *Table) render() error { rowSeparator := t.buildRowSeparator(widths) if len(t.headers) > 0 { + fmt.Fprintln(t.out, rowSeparator) t.printRow(t.headers, widths) fmt.Fprintln(t.out, rowSeparator) } @@ -100,6 +98,7 @@ func (t *Table) render() error { if len(t.footer) > 0 { fmt.Fprintln(t.out, rowSeparator) t.printRow(t.footer, widths) + fmt.Fprintln(t.out, rowSeparator) } return nil @@ -172,21 +171,22 @@ func (t *Table) calculateColumnWidths() []int { // // It generates a string with dashes (-) for each column width, joined by plus signs (+). // -// Example output: "----------+--------+-----------" +// Example output: "+----------+--------+-----------+" func (t *Table) buildRowSeparator(widths []int) string { parts := make([]string, len(widths)) for i, w := range widths { parts[i] = strings.Repeat("-", w) } - return strings.Join(parts, "+") + return "+" + strings.Join(parts, "+") + "+" } // printRow outputs a single row to the table writer. // // Each cell is padded with spaces and separated by pipe characters (|). // -// Example output: " Database | Size | Items " +// Example output: "| Database | Size | Items |" func (t *Table) printRow(row []string, widths []int) { + fmt.Fprintf(t.out, "|") for i, cell := range row { if i > 0 { fmt.Fprint(t.out, "|") @@ -204,5 +204,6 @@ func (t *Table) printRow(row []string, widths []int) { fmt.Fprintf(t.out, "%s%s%s", leftPadding, cell, rightPadding) } + fmt.Fprintf(t.out, "|") fmt.Fprintln(t.out) } diff --git a/core/rawdb/database_tablewriter_tinygo_test.go b/core/rawdb/database_tablewriter_test.go similarity index 94% rename from core/rawdb/database_tablewriter_tinygo_test.go rename to core/rawdb/database_tablewriter_test.go index 3bcf93832b..e9de5d8ce8 100644 --- a/core/rawdb/database_tablewriter_tinygo_test.go +++ b/core/rawdb/database_tablewriter_test.go @@ -14,9 +14,6 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build tinygo -// +build tinygo - package rawdb import ( @@ -27,7 +24,7 @@ import ( func TestTableWriterTinyGo(t *testing.T) { var buf bytes.Buffer - table := newTableWriter(&buf) + table := NewTableWriter(&buf) headers := []string{"Database", "Size", "Items", "Status"} rows := [][]string{ @@ -51,7 +48,7 @@ func TestTableWriterValidationErrors(t *testing.T) { // Test missing headers t.Run("MissingHeaders", func(t *testing.T) { var buf bytes.Buffer - table := newTableWriter(&buf) + table := NewTableWriter(&buf) rows := [][]string{{"x", "y", "z"}} @@ -66,7 +63,7 @@ func TestTableWriterValidationErrors(t *testing.T) { t.Run("NotEnoughRowColumns", func(t *testing.T) { var buf bytes.Buffer - table := newTableWriter(&buf) + table := NewTableWriter(&buf) headers := []string{"A", "B", "C"} badRows := [][]string{ @@ -85,7 +82,7 @@ func TestTableWriterValidationErrors(t *testing.T) { t.Run("TooManyRowColumns", func(t *testing.T) { var buf bytes.Buffer - table := newTableWriter(&buf) + table := NewTableWriter(&buf) headers := []string{"A", "B", "C"} badRows := [][]string{ @@ -105,7 +102,7 @@ func TestTableWriterValidationErrors(t *testing.T) { // Test mismatched footer columns t.Run("MismatchedFooterColumns", func(t *testing.T) { var buf bytes.Buffer - table := newTableWriter(&buf) + table := NewTableWriter(&buf) headers := []string{"A", "B", "C"} rows := [][]string{{"x", "y", "z"}} diff --git a/core/rawdb/database_tablewriter_unix.go b/core/rawdb/database_tablewriter_unix.go deleted file mode 100644 index 8bec5396e8..0000000000 --- a/core/rawdb/database_tablewriter_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2025 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !tinygo -// +build !tinygo - -package rawdb - -import ( - "io" - - "github.com/olekukonko/tablewriter" -) - -// Re-export the real tablewriter types and functions -type Table = tablewriter.Table - -func newTableWriter(w io.Writer) *Table { - return tablewriter.NewWriter(w) -} diff --git a/go.mod b/go.mod index 3590a54929..9c9e873a5e 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,6 @@ require ( github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 - github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/pion/stun/v2 v2.0.0 github.com/protolambda/bls12-381-util v0.1.0 diff --git a/go.sum b/go.sum index 6ecb0b7ec3..89dd9af52a 100644 --- a/go.sum +++ b/go.sum @@ -257,7 +257,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -274,8 +273,6 @@ github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcou github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= From b5c3b32eeb9d27f3d699759a1efcd8a22ffcc24f Mon Sep 17 00:00:00 2001 From: georgehao Date: Mon, 24 Nov 2025 22:02:13 +0800 Subject: [PATCH 128/277] eth/catalyst: remove the outdated comments of ForkchoiceUpdatedV1 (#33251) --- eth/catalyst/api.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 75b263bf6b..f88acb5cff 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -163,9 +163,6 @@ func newConsensusAPIWithoutHeartbeat(eth *eth.Ethereum) *ConsensusAPI { // // We try to set our blockchain to the headBlock. // -// If the method is called with an empty head block: we return success, which can be used -// to check if the engine API is enabled. -// // If the total difficulty was not reached: we return INVALID. // // If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db, From 5748dd18e738e1e6de6e31d1809b3fd6689bfdc7 Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Mon, 24 Nov 2025 15:30:44 +0100 Subject: [PATCH 129/277] consensus/beacon: fix blob gas error message formatting (#33201) --- consensus/beacon/consensus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 84926c3d0b..dbba73947f 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -258,11 +258,11 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa if !cancun { switch { case header.ExcessBlobGas != nil: - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *header.ExcessBlobGas) case header.BlobGasUsed != nil: - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) case header.ParentBeaconRoot != nil: - return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) } } else { if header.ParentBeaconRoot == nil { From a6191d8272a189a5e551446101f9f5e6888a8494 Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Tue, 25 Nov 2025 04:23:30 +0200 Subject: [PATCH 130/277] core/txpool/blobpool: drain and signal pending conversion tasks on shutdown (#33260) --- core/txpool/blobpool/conversion.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/txpool/blobpool/conversion.go b/core/txpool/blobpool/conversion.go index 95828d83b2..80b97af5d7 100644 --- a/core/txpool/blobpool/conversion.go +++ b/core/txpool/blobpool/conversion.go @@ -183,6 +183,15 @@ func (q *conversionQueue) loop() { log.Debug("Waiting for blobpool billy conversion to exit") <-q.billyTaskDone } + // Signal any tasks that were queued for the next batch but never started + // so callers blocked in convert() receive an error instead of hanging. + for _, t := range txTasks { + // Best-effort notify; t.done is a buffered channel of size 1 + // created by convert(), and we send exactly once per task. + t.done <- errors.New("conversion queue closed") + } + // Drop references to allow GC of the backing array. + txTasks = txTasks[:0] return } } From 2a4847a7d1462cdda375429b447e7162514721e0 Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Tue, 25 Nov 2025 04:24:17 +0200 Subject: [PATCH 131/277] core/rawdb: fix underflow in freezer inspect for empty ancients (#33203) --- core/rawdb/ancient_utils.go | 18 ++++++++++++------ core/rawdb/database.go | 2 +- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index b940d91040..7af3d2e197 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -34,14 +34,10 @@ type freezerInfo struct { name string // The identifier of freezer head uint64 // The number of last stored item in the freezer tail uint64 // The number of first stored item in the freezer + count uint64 // The number of stored items in the freezer sizes []tableSize // The storage size per table } -// count returns the number of stored items in the freezer. -func (info *freezerInfo) count() uint64 { - return info.head - info.tail + 1 -} - // size returns the storage size of the entire freezer. func (info *freezerInfo) size() common.StorageSize { var total common.StorageSize @@ -65,7 +61,11 @@ func inspect(name string, order map[string]freezerTableConfig, reader ethdb.Anci if err != nil { return freezerInfo{}, err } - info.head = ancients - 1 + if ancients > 0 { + info.head = ancients - 1 + } else { + info.head = 0 + } // Retrieve the number of first stored item tail, err := reader.Tail() @@ -73,6 +73,12 @@ func inspect(name string, order map[string]freezerTableConfig, reader ethdb.Anci return freezerInfo{}, err } info.tail = tail + + if ancients == 0 { + info.count = 0 + } else { + info.count = info.head - info.tail + 1 + } return info, nil } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 131e370650..8260391802 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -644,7 +644,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)), strings.Title(table.name), table.size.String(), - fmt.Sprintf("%d", ancient.count()), + fmt.Sprintf("%d", ancient.count), }) } total.Add(uint64(ancient.size())) From b04df226fa142525cdc4bf0defe31bf977d35fb9 Mon Sep 17 00:00:00 2001 From: Bashmunta Date: Tue, 25 Nov 2025 14:34:58 +0200 Subject: [PATCH 132/277] cmd/geth: skip resolver for zero-commitment verkle children (#33265) --- cmd/geth/verkle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 67dc7257c0..c064d70aba 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -76,10 +76,10 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error for i, child := range node.Children() { childC := child.Commit().Bytes() - childS, err := resolver(childC[:]) if bytes.Equal(childC[:], zero[:]) { continue } + childS, err := resolver(childC[:]) if err != nil { return fmt.Errorf("could not find child %x in db: %w", childC, err) } From ebf93555b13e072ad6b92dcbe854ebbef308f6fe Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Wed, 26 Nov 2025 09:05:30 +0100 Subject: [PATCH 133/277] consensus/misc: fix blob gas error message formatting (#33275) Dereference the `header.BlobGasUsed` pointer when formatting the error message in `VerifyEIP4844Header`. --- consensus/misc/eip4844/eip4844.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index c1a21195e3..47b54d2e85 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -115,7 +115,7 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, bcfg.maxBlobGas()) } if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { - return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) + return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", *header.BlobGasUsed, params.BlobTxBlobGasPerBlob) } // Verify the excessBlobGas is correct based on the parent header From 960c87a9442d566132a9d233e3fbc6dfa5aa4372 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 26 Nov 2025 16:07:16 +0800 Subject: [PATCH 134/277] triedb/pathdb: implement iterator of history index (#32981) This change introduces an iterator for the history index in the pathdb. It provides sequential access to historical entries, enabling efficient scanning and future features built on top of historical state traversal. --- triedb/pathdb/history_index.go | 43 +-- triedb/pathdb/history_index_block.go | 54 +-- triedb/pathdb/history_index_iterator.go | 359 +++++++++++++++++++ triedb/pathdb/history_index_iterator_test.go | 297 +++++++++++++++ 4 files changed, 684 insertions(+), 69 deletions(-) create mode 100644 triedb/pathdb/history_index_iterator.go create mode 100644 triedb/pathdb/history_index_iterator_test.go diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index 5b4c91d7e6..87b6e377af 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "math" - "sort" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" @@ -119,30 +118,34 @@ func (r *indexReader) refresh() error { return nil } +// newIterator creates an iterator for traversing the index entries. +func (r *indexReader) newIterator() *indexIterator { + return newIndexIterator(r.descList, func(id uint32) (*blockReader, error) { + br, ok := r.readers[id] + if !ok { + var err error + br, err = newBlockReader(readStateIndexBlock(r.state, r.db, id)) + if err != nil { + return nil, err + } + r.readers[id] = br + } + return br, nil + }) +} + // readGreaterThan locates the first element that is greater than the specified // id. If no such element is found, MaxUint64 is returned. func (r *indexReader) readGreaterThan(id uint64) (uint64, error) { - index := sort.Search(len(r.descList), func(i int) bool { - return id < r.descList[i].max - }) - if index == len(r.descList) { + it := r.newIterator() + found := it.SeekGT(id) + if err := it.Error(); err != nil { + return 0, err + } + if !found { return math.MaxUint64, nil } - desc := r.descList[index] - - br, ok := r.readers[desc.id] - if !ok { - var err error - blob := readStateIndexBlock(r.state, r.db, desc.id) - br, err = newBlockReader(blob) - if err != nil { - return 0, err - } - r.readers[desc.id] = br - } - // The supplied ID is not greater than block.max, ensuring that an element - // satisfying the condition can be found. - return br.readGreaterThan(id) + return it.ID(), nil } // indexWriter is responsible for writing index data for a specific state (either diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go index 5abdee682a..7b59c8e882 100644 --- a/triedb/pathdb/history_index_block.go +++ b/triedb/pathdb/history_index_block.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math" - "sort" ) const ( @@ -164,58 +163,15 @@ func newBlockReader(blob []byte) (*blockReader, error) { // readGreaterThan locates the first element in the block that is greater than // the specified value. If no such element is found, MaxUint64 is returned. func (br *blockReader) readGreaterThan(id uint64) (uint64, error) { - var err error - index := sort.Search(len(br.restarts), func(i int) bool { - item, n := binary.Uvarint(br.data[br.restarts[i]:]) - if n <= 0 { - err = fmt.Errorf("failed to decode item at restart %d", br.restarts[i]) - } - return item > id - }) - if err != nil { + it := newBlockIterator(br.data, br.restarts) + found := it.SeekGT(id) + if err := it.Error(); err != nil { return 0, err } - if index == 0 { - item, _ := binary.Uvarint(br.data[br.restarts[0]:]) - return item, nil - } - var ( - start int - limit int - result uint64 - ) - if index == len(br.restarts) { - // The element being searched falls within the last restart section, - // there is no guarantee such element can be found. - start = int(br.restarts[len(br.restarts)-1]) - limit = len(br.data) - } else { - // The element being searched falls within the non-last restart section, - // such element can be found for sure. - start = int(br.restarts[index-1]) - limit = int(br.restarts[index]) - } - pos := start - for pos < limit { - x, n := binary.Uvarint(br.data[pos:]) - if pos == start { - result = x - } else { - result += x - } - if result > id { - return result, nil - } - pos += n - } - // The element which is greater than specified id is not found. - if index == len(br.restarts) { + if !found { return math.MaxUint64, nil } - // The element which is the first one greater than the specified id - // is exactly the one located at the restart point. - item, _ := binary.Uvarint(br.data[br.restarts[index]:]) - return item, nil + return it.ID(), nil } type blockWriter struct { diff --git a/triedb/pathdb/history_index_iterator.go b/triedb/pathdb/history_index_iterator.go new file mode 100644 index 0000000000..1ccb39ad09 --- /dev/null +++ b/triedb/pathdb/history_index_iterator.go @@ -0,0 +1,359 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see id + }) + if err != nil { + it.setErr(err) + return false + } + if index == 0 { + item, n := binary.Uvarint(it.data[it.restarts[0]:]) + + // If the restart size is 1, then the restart pointer shouldn't be 0. + // It's not practical and should be denied in the first place. + it.set(int(it.restarts[0])+n, 0, item) + return true + } + var ( + start int + limit int + restartIndex int // The restart section being searched below + ) + if index == len(it.restarts) { + // The element being searched falls within the last restart section, + // there is no guarantee such element can be found. + start = int(it.restarts[len(it.restarts)-1]) + limit = len(it.data) + restartIndex = len(it.restarts) - 1 + } else { + // The element being searched falls within the non-last restart section, + // such element can be found for sure. + start = int(it.restarts[index-1]) + limit = int(it.restarts[index]) + restartIndex = index - 1 + } + var ( + result uint64 + pos = start + ) + for pos < limit { + x, n := binary.Uvarint(it.data[pos:]) + if n <= 0 { + it.setErr(fmt.Errorf("failed to decode item at pos %d", pos)) + return false + } + if pos == start { + result = x + } else { + result += x + } + pos += n + + if result > id { + if pos == limit { + it.set(pos, restartIndex+1, result) + } else { + it.set(pos, restartIndex, result) + } + return true + } + } + // The element which is greater than specified id is not found. + if index == len(it.restarts) { + it.reset() + return false + } + // The element which is the first one greater than the specified id + // is exactly the one located at the restart point. + item, n := binary.Uvarint(it.data[it.restarts[index]:]) + it.set(int(it.restarts[index])+n, index, item) + return true +} + +func (it *blockIterator) init() { + if it.dataPtr != -1 { + return + } + it.dataPtr = 0 + it.restartPtr = 0 +} + +// Next implements the HistoryIndexIterator, moving the iterator to the next +// element. If the iterator has been exhausted, and boolean with false should +// be returned. +func (it *blockIterator) Next() bool { + if it.exhausted || it.err != nil { + return false + } + it.init() + + // Decode the next element pointed by the iterator + v, n := binary.Uvarint(it.data[it.dataPtr:]) + if n <= 0 { + it.setErr(fmt.Errorf("failed to decode item at pos %d", it.dataPtr)) + return false + } + + var val uint64 + if it.dataPtr == int(it.restarts[it.restartPtr]) { + val = v + } else { + val = it.id + v + } + + // Move to the next restart section if the data pointer crosses the boundary + nextRestartPtr := it.restartPtr + if it.restartPtr < len(it.restarts)-1 && it.dataPtr+n == int(it.restarts[it.restartPtr+1]) { + nextRestartPtr = it.restartPtr + 1 + } + it.set(it.dataPtr+n, nextRestartPtr, val) + + return true +} + +// ID implements HistoryIndexIterator, returning the id of the element where the +// iterator is positioned at. +func (it *blockIterator) ID() uint64 { + return it.id +} + +// Error implements HistoryIndexIterator, returning any accumulated error. +// Exhausting all the elements is not considered to be an error. +func (it *blockIterator) Error() error { return it.err } + +// blockLoader defines the method to retrieve the specific block for reading. +type blockLoader func(id uint32) (*blockReader, error) + +// indexIterator is an iterator to traverse the history indices belonging to the +// specific state entry. +type indexIterator struct { + // immutable fields + descList []*indexBlockDesc + loader blockLoader + + // mutable fields + blockIt *blockIterator + blockPtr int + exhausted bool + err error +} + +func newIndexIterator(descList []*indexBlockDesc, loader blockLoader) *indexIterator { + it := &indexIterator{ + descList: descList, + loader: loader, + } + it.reset() + return it +} + +func (it *indexIterator) setErr(err error) { + if it.err != nil { + return + } + it.err = err +} + +func (it *indexIterator) reset() { + it.blockIt = nil + it.blockPtr = -1 + it.exhausted = false + it.err = nil + + if len(it.descList) == 0 { + it.exhausted = true + } +} + +func (it *indexIterator) open(blockPtr int) error { + id := it.descList[blockPtr].id + br, err := it.loader(id) + if err != nil { + return err + } + it.blockIt = newBlockIterator(br.data, br.restarts) + it.blockPtr = blockPtr + return nil +} + +// SeekGT moves the iterator to the first element whose id is greater than the +// given number. It returns whether such element exists. +// +// Note, this operation will unset the exhausted status and subsequent traversal +// is allowed. +func (it *indexIterator) SeekGT(id uint64) bool { + if it.err != nil { + return false + } + index := sort.Search(len(it.descList), func(i int) bool { + return id < it.descList[i].max + }) + if index == len(it.descList) { + return false + } + it.exhausted = false + + if it.blockIt == nil || it.blockPtr != index { + if err := it.open(index); err != nil { + it.setErr(err) + return false + } + } + return it.blockIt.SeekGT(id) +} + +func (it *indexIterator) init() error { + if it.blockIt != nil { + return nil + } + return it.open(0) +} + +// Next implements the HistoryIndexIterator, moving the iterator to the next +// element. If the iterator has been exhausted, and boolean with false should +// be returned. +func (it *indexIterator) Next() bool { + if it.exhausted || it.err != nil { + return false + } + if err := it.init(); err != nil { + it.setErr(err) + return false + } + + if it.blockIt.Next() { + return true + } + if it.blockPtr == len(it.descList)-1 { + it.exhausted = true + return false + } + if err := it.open(it.blockPtr + 1); err != nil { + it.setErr(err) + return false + } + return it.blockIt.Next() +} + +// Error implements HistoryIndexIterator, returning any accumulated error. +// Exhausting all the elements is not considered to be an error. +func (it *indexIterator) Error() error { + if it.err != nil { + return it.err + } + if it.blockIt != nil { + return it.blockIt.Error() + } + return nil +} + +// ID implements HistoryIndexIterator, returning the id of the element where the +// iterator is positioned at. +func (it *indexIterator) ID() uint64 { + return it.blockIt.ID() +} diff --git a/triedb/pathdb/history_index_iterator_test.go b/triedb/pathdb/history_index_iterator_test.go new file mode 100644 index 0000000000..da60dc6e8f --- /dev/null +++ b/triedb/pathdb/history_index_iterator_test.go @@ -0,0 +1,297 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see input + }) + var ( + exp bool + expVal uint64 + remains []uint64 + ) + if index == len(elements) { + exp = false + } else { + exp = true + expVal = elements[index] + if index < len(elements) { + remains = elements[index+1:] + } + } + if err := checkSeekGT(it, input, exp, expVal); err != nil { + t.Fatal(err) + } + if exp { + if err := checkNext(it, remains); err != nil { + t.Fatal(err) + } + } + } +} + +func TestIndexIteratorSeekGT(t *testing.T) { + ident := newAccountIdent(common.Hash{0x1}) + + dbA := rawdb.NewMemoryDatabase() + testIndexIterator(t, ident, dbA, makeTestIndexBlocks(dbA, ident, 1)) + + dbB := rawdb.NewMemoryDatabase() + testIndexIterator(t, ident, dbB, makeTestIndexBlocks(dbB, ident, 3*indexBlockEntriesCap)) + + dbC := rawdb.NewMemoryDatabase() + testIndexIterator(t, ident, dbC, makeTestIndexBlocks(dbC, ident, indexBlockEntriesCap-1)) + + dbD := rawdb.NewMemoryDatabase() + testIndexIterator(t, ident, dbD, makeTestIndexBlocks(dbD, ident, indexBlockEntriesCap+1)) +} + +func testIndexIterator(t *testing.T, stateIdent stateIdent, db ethdb.Database, elements []uint64) { + ir, err := newIndexReader(db, stateIdent) + if err != nil { + t.Fatalf("Failed to open the index reader, %v", err) + } + it := newIndexIterator(ir.descList, func(id uint32) (*blockReader, error) { + return newBlockReader(readStateIndexBlock(stateIdent, db, id)) + }) + + for i := 0; i < 128; i++ { + var input uint64 + if rand.Intn(2) == 0 { + input = elements[rand.Intn(len(elements))] + } else { + input = uint64(rand.Uint32()) + } + index := sort.Search(len(elements), func(i int) bool { + return elements[i] > input + }) + var ( + exp bool + expVal uint64 + remains []uint64 + ) + if index == len(elements) { + exp = false + } else { + exp = true + expVal = elements[index] + if index < len(elements) { + remains = elements[index+1:] + } + } + if err := checkSeekGT(it, input, exp, expVal); err != nil { + t.Fatal(err) + } + if exp { + if err := checkNext(it, remains); err != nil { + t.Fatal(err) + } + } + } +} + +func TestBlockIteratorTraversal(t *testing.T) { + /* 0-size index block is not allowed + + data, elements := makeTestIndexBlock(0) + testBlockIterator(t, data, elements) + */ + + data, elements := makeTestIndexBlock(1) + testBlockIteratorTraversal(t, data, elements) + + data, elements = makeTestIndexBlock(indexBlockRestartLen) + testBlockIteratorTraversal(t, data, elements) + + data, elements = makeTestIndexBlock(3 * indexBlockRestartLen) + testBlockIteratorTraversal(t, data, elements) + + data, elements = makeTestIndexBlock(indexBlockEntriesCap) + testBlockIteratorTraversal(t, data, elements) +} + +func testBlockIteratorTraversal(t *testing.T, data []byte, elements []uint64) { + br, err := newBlockReader(data) + if err != nil { + t.Fatalf("Failed to open the block for reading, %v", err) + } + it := newBlockIterator(br.data, br.restarts) + + if err := checkNext(it, elements); err != nil { + t.Fatal(err) + } +} + +func TestIndexIteratorTraversal(t *testing.T) { + ident := newAccountIdent(common.Hash{0x1}) + + dbA := rawdb.NewMemoryDatabase() + testIndexIteratorTraversal(t, ident, dbA, makeTestIndexBlocks(dbA, ident, 1)) + + dbB := rawdb.NewMemoryDatabase() + testIndexIteratorTraversal(t, ident, dbB, makeTestIndexBlocks(dbB, ident, 3*indexBlockEntriesCap)) + + dbC := rawdb.NewMemoryDatabase() + testIndexIteratorTraversal(t, ident, dbC, makeTestIndexBlocks(dbC, ident, indexBlockEntriesCap-1)) + + dbD := rawdb.NewMemoryDatabase() + testIndexIteratorTraversal(t, ident, dbD, makeTestIndexBlocks(dbD, ident, indexBlockEntriesCap+1)) +} + +func testIndexIteratorTraversal(t *testing.T, stateIdent stateIdent, db ethdb.KeyValueReader, elements []uint64) { + ir, err := newIndexReader(db, stateIdent) + if err != nil { + t.Fatalf("Failed to open the index reader, %v", err) + } + it := newIndexIterator(ir.descList, func(id uint32) (*blockReader, error) { + return newBlockReader(readStateIndexBlock(stateIdent, db, id)) + }) + if err := checkNext(it, elements); err != nil { + t.Fatal(err) + } +} From 62334a9d460d49777012894abd899698c21051e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felf=C3=B6ldi=20Zsolt?= Date: Wed, 26 Nov 2025 11:07:56 +0100 Subject: [PATCH 135/277] beacon: update beacon light client for fusaka (#33272) This PR adds the "FULU" beacon chain config entries for all networks and fixes the select statements that choose the appropriate engine API call versions (no new version there but the "default" was always the first version; now it's the latest version so no need to change unless there is actually a new version). New beacon checkpoints are also added for mainnet, sepolia and hoodi (not for holesky because it's not finalizing at the moment). Note that though unrelated to fusaka, the log indexer checkpoints are also updated for mainnet (not for the other testnets, mainly because I only have mainnet synced here on my travel SSD; this should be fine though because the index is also reverse generated for a year by default so it does not really affect the indexing time) Links for the new checkpoints: https://beaconcha.in/slot/13108192 https://light-sepolia.beaconcha.in/slot/9032384 https://hoodi.beaconcha.in/slot/1825728 --- beacon/blsync/engineclient.go | 28 ++++++++-------- beacon/params/checkpoint_hoodi.hex | 2 +- beacon/params/checkpoint_mainnet.hex | 2 +- beacon/params/checkpoint_sepolia.hex | 2 +- beacon/params/networks.go | 41 +++++++++++++----------- core/filtermaps/checkpoints_mainnet.json | 29 ++++++++++++++++- 6 files changed, 67 insertions(+), 37 deletions(-) diff --git a/beacon/blsync/engineclient.go b/beacon/blsync/engineclient.go index f9821fc6f3..9fc6a18a57 100644 --- a/beacon/blsync/engineclient.go +++ b/beacon/blsync/engineclient.go @@ -101,7 +101,16 @@ func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) params = []any{execData} ) switch fork { - case "electra": + case "altair", "bellatrix": + method = "engine_newPayloadV1" + case "capella": + method = "engine_newPayloadV2" + case "deneb": + method = "engine_newPayloadV3" + parentBeaconRoot := event.BeaconHead.ParentRoot + blobHashes := collectBlobHashes(event.Block) + params = append(params, blobHashes, parentBeaconRoot) + default: // electra, fulu and above method = "engine_newPayloadV4" parentBeaconRoot := event.BeaconHead.ParentRoot blobHashes := collectBlobHashes(event.Block) @@ -110,15 +119,6 @@ func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) hexRequests[i] = hexutil.Bytes(event.ExecRequests[i]) } params = append(params, blobHashes, parentBeaconRoot, hexRequests) - case "deneb": - method = "engine_newPayloadV3" - parentBeaconRoot := event.BeaconHead.ParentRoot - blobHashes := collectBlobHashes(event.Block) - params = append(params, blobHashes, parentBeaconRoot) - case "capella": - method = "engine_newPayloadV2" - default: - method = "engine_newPayloadV1" } ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5) @@ -145,12 +145,12 @@ func (ec *engineClient) callForkchoiceUpdated(fork string, event types.ChainHead var method string switch fork { - case "deneb", "electra": - method = "engine_forkchoiceUpdatedV3" + case "altair", "bellatrix": + method = "engine_forkchoiceUpdatedV1" case "capella": method = "engine_forkchoiceUpdatedV2" - default: - method = "engine_forkchoiceUpdatedV1" + default: // deneb, electra, fulu and above + method = "engine_forkchoiceUpdatedV3" } ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5) diff --git a/beacon/params/checkpoint_hoodi.hex b/beacon/params/checkpoint_hoodi.hex index 2885d7c996..7bac591f96 100644 --- a/beacon/params/checkpoint_hoodi.hex +++ b/beacon/params/checkpoint_hoodi.hex @@ -1 +1 @@ -0x1bbf958008172591b6cbdb3d8d52e26998258e83d4bdb9eec10969d84519a6bd \ No newline at end of file +0xbb7a7f3c40d8ea0b450f91587db65d0f1c079669277e01a0426c8911702a863a \ No newline at end of file diff --git a/beacon/params/checkpoint_mainnet.hex b/beacon/params/checkpoint_mainnet.hex index 417e69a24b..c2886cc564 100644 --- a/beacon/params/checkpoint_mainnet.hex +++ b/beacon/params/checkpoint_mainnet.hex @@ -1 +1 @@ -0x2fe39a39b6f7cbd549e0f74d259de6db486005a65bd3bd92840dd6ce21d6f4c8 \ No newline at end of file +0x2af778d703186526a1b6304b423f338f11556206f618643c3f7fa0d7b1ef5c9b \ No newline at end of file diff --git a/beacon/params/checkpoint_sepolia.hex b/beacon/params/checkpoint_sepolia.hex index 02faf72187..55842f8ac0 100644 --- a/beacon/params/checkpoint_sepolia.hex +++ b/beacon/params/checkpoint_sepolia.hex @@ -1 +1 @@ -0x86686b2b366e24134e0e3969a9c5f3759f92e5d2b04785b42e22cc7d468c2107 \ No newline at end of file +0x48a89c9ea7ba19de2931797974cf8722344ab231c0edada278b108ef74125478 \ No newline at end of file diff --git a/beacon/params/networks.go b/beacon/params/networks.go index b35db34fd6..5dcf08cc5d 100644 --- a/beacon/params/networks.go +++ b/beacon/params/networks.go @@ -40,36 +40,39 @@ var ( GenesisTime: 1606824023, Checkpoint: common.HexToHash(checkpointMainnet), }). - AddFork("GENESIS", 0, []byte{0, 0, 0, 0}). - AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}). - AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}). - AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}). - AddFork("DENEB", 269568, []byte{4, 0, 0, 0}). - AddFork("ELECTRA", 364032, []byte{5, 0, 0, 0}) + AddFork("GENESIS", 0, common.FromHex("0x00000000")). + AddFork("ALTAIR", 74240, common.FromHex("0x01000000")). + AddFork("BELLATRIX", 144896, common.FromHex("0x02000000")). + AddFork("CAPELLA", 194048, common.FromHex("0x03000000")). + AddFork("DENEB", 269568, common.FromHex("0x04000000")). + AddFork("ELECTRA", 364032, common.FromHex("0x05000000")). + AddFork("FULU", 411392, common.FromHex("0x06000000")) SepoliaLightConfig = (&ChainConfig{ GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"), GenesisTime: 1655733600, Checkpoint: common.HexToHash(checkpointSepolia), }). - AddFork("GENESIS", 0, []byte{144, 0, 0, 105}). - AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}). - AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}). - AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}). - AddFork("DENEB", 132608, []byte{144, 0, 0, 115}). - AddFork("ELECTRA", 222464, []byte{144, 0, 0, 116}) + AddFork("GENESIS", 0, common.FromHex("0x90000069")). + AddFork("ALTAIR", 50, common.FromHex("0x90000070")). + AddFork("BELLATRIX", 100, common.FromHex("0x90000071")). + AddFork("CAPELLA", 56832, common.FromHex("0x90000072")). + AddFork("DENEB", 132608, common.FromHex("0x90000073")). + AddFork("ELECTRA", 222464, common.FromHex("0x90000074")). + AddFork("FULU", 272640, common.FromHex("0x90000075")) HoleskyLightConfig = (&ChainConfig{ GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"), GenesisTime: 1695902400, Checkpoint: common.HexToHash(checkpointHolesky), }). - AddFork("GENESIS", 0, []byte{1, 1, 112, 0}). - AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}). - AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}). - AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}). - AddFork("DENEB", 29696, []byte{5, 1, 112, 0}). - AddFork("ELECTRA", 115968, []byte{6, 1, 112, 0}) + AddFork("GENESIS", 0, common.FromHex("0x01017000")). + AddFork("ALTAIR", 0, common.FromHex("0x02017000")). + AddFork("BELLATRIX", 0, common.FromHex("0x03017000")). + AddFork("CAPELLA", 256, common.FromHex("0x04017000")). + AddFork("DENEB", 29696, common.FromHex("0x05017000")). + AddFork("ELECTRA", 115968, common.FromHex("0x06017000")). + AddFork("FULU", 165120, common.FromHex("0x07017000")) HoodiLightConfig = (&ChainConfig{ GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"), @@ -82,5 +85,5 @@ var ( AddFork("CAPELLA", 0, common.FromHex("0x40000910")). AddFork("DENEB", 0, common.FromHex("0x50000910")). AddFork("ELECTRA", 2048, common.FromHex("0x60000910")). - AddFork("FULU", 18446744073709551615, common.FromHex("0x70000910")) + AddFork("FULU", 50688, common.FromHex("0x70000910")) ) diff --git a/core/filtermaps/checkpoints_mainnet.json b/core/filtermaps/checkpoints_mainnet.json index 2ea065ddb7..795967405d 100644 --- a/core/filtermaps/checkpoints_mainnet.json +++ b/core/filtermaps/checkpoints_mainnet.json @@ -288,5 +288,32 @@ {"blockNumber": 22958100, "blockId": "0xe38e0ff7b0c4065ca42ea577bc32f2566ca46f2ddeedcc4bc1f8fb00e7f26329", "firstIndex": 19260242424}, {"blockNumber": 22988600, "blockId": "0x04ca74758b22e0ea54b8c992022ff21c16a2af9c45144c3b0f80de921a7eee82", "firstIndex": 19327351273}, {"blockNumber": 23018392, "blockId": "0x61cc979b00bc97b48356f986a5b9ec997d674bc904c2a2e4b0f17de08e50b3bb", "firstIndex": 19394459627}, -{"blockNumber": 23048524, "blockId": "0x489de15d95739ede4ab15e8b5151d80d4dc85ae10e7be800b1a4723094a678df", "firstIndex": 19461570073} +{"blockNumber": 23048524, "blockId": "0x489de15d95739ede4ab15e8b5151d80d4dc85ae10e7be800b1a4723094a678df", "firstIndex": 19461570073}, +{"blockNumber": 23078983, "blockId": "0xd64da4fe45a0a349101b8ce1a6336fb099d8b00cc274d0eb59356e134190b8f2", "firstIndex": 19528679292}, +{"blockNumber": 23109224, "blockId": "0xbb6c29f91820fcf6caef881bdfe61eb690f9796f8f139c56eaf27aa601fe5ed2", "firstIndex": 19595787850}, +{"blockNumber": 23136690, "blockId": "0xc1915739edff731d469ec1500ad05102c0d68cc1ef062411d2e9741b8ebdc571", "firstIndex": 19662896969}, +{"blockNumber": 23164324, "blockId": "0x97d0078f2a22a8fbde4660d5f11846d00a669c606e42291df55e863190914a9f", "firstIndex": 19730004997}, +{"blockNumber": 23192975, "blockId": "0x8463749ec09f55fccdd962498f93d6468d744faedc35da8097642cd4609e08f2", "firstIndex": 19797111604}, +{"blockNumber": 23219805, "blockId": "0x76ca51d01b5724f1b0f6c3acb50dfcdb00e1648a475dd7599174b301c25e5517", "firstIndex": 19864222312}, +{"blockNumber": 23236331, "blockId": "0x388f9e36b3ec2120d2b4adfdcdb0c0b8de139eef107e327670dd77fc581c2c5f", "firstIndex": 19931331831}, +{"blockNumber": 23260849, "blockId": "0xe4a467164dbc8f9beebf0c8478a9e7ee16dfce65e2da423b1048601831222ba7", "firstIndex": 19998441286}, +{"blockNumber": 23282795, "blockId": "0x74ad210aa1bfdd4bcf61298ebff622da758c36c38d62d001f2440d09e73ef6c7", "firstIndex": 20065548083}, +{"blockNumber": 23300759, "blockId": "0xa405f5ea21a5207d3cde718a1e3fb7f0ce3dd87ac6040a0db52da0e9488e63f6", "firstIndex": 20132623334}, +{"blockNumber": 23319772, "blockId": "0xafae645dd057af450eddf69c7604bde0136524abc5b7d6697426427ef2d30724", "firstIndex": 20199767615}, +{"blockNumber": 23342113, "blockId": "0x8482c4be13294cfd862d500051c8b4efb95b50f4a79717da6aeabb2a6ff3e199", "firstIndex": 20266874986}, +{"blockNumber": 23366974, "blockId": "0xf6047cafea5da7aaf20691df700d759fe84f5aa2176d9dd42c6ae138899a29ea", "firstIndex": 20333983351}, +{"blockNumber": 23397579, "blockId": "0xe94815fe0278659a26e15b023c0c3877abf6f1265710c5dfddf161ee8af01b40", "firstIndex": 20401094335}, +{"blockNumber": 23425940, "blockId": "0xa98d6d48b93d9ef848c340b47cf9d191f36e76375dd747d8adcb39d72251822d", "firstIndex": 20468200506}, +{"blockNumber": 23452402, "blockId": "0x5f26ff308c0883a04f4a92411bcd05e50538d95a96532452f43b71fde4af908d", "firstIndex": 20535311178}, +{"blockNumber": 23478884, "blockId": "0x23a81186c21218f7034ee81dcd72db71edcbc0e97da26703b8026b6d4a81f693", "firstIndex": 20602384106}, +{"blockNumber": 23507136, "blockId": "0xb1d4a2bd836c5d78b678f0bfffd651bdfeae8b7965639b640f8745193b142323", "firstIndex": 20669529935}, +{"blockNumber": 23535463, "blockId": "0x1886a719a6376352753242ec5093c434938b9a90356facdbdaafd2670da97d82", "firstIndex": 20736637374}, +{"blockNumber": 23564278, "blockId": "0xcdc5a1349f45430771fa2080555f20b80b2d84f575728dbb8bd3b633fbb0a00b", "firstIndex": 20803747399}, +{"blockNumber": 23594711, "blockId": "0xbbf41e407367feeb865b305e38aee8bef84ba62b66efbd6fcd3b16a5d50cc055", "firstIndex": 20870854523}, +{"blockNumber": 23627417, "blockId": "0x1b3c6bd39c5aa7c73101e7e946964b3d7afecf3f253efda8de5610dd99cbee82", "firstIndex": 20937963176}, +{"blockNumber": 23660347, "blockId": "0xadc8ac88c281f50c3aaf1d08149100e8f91a16bbe09b28ac86665bcea681d41b", "firstIndex": 21005072865}, +{"blockNumber": 23692228, "blockId": "0xeb4040468161d9d5b6863eb62e4f21f72a1944f910ecfa95ee6a9dbc39e92ef0", "firstIndex": 21072181753}, +{"blockNumber": 23722331, "blockId": "0xdfc9be1b43488148868da0c8ac56e99e0ffde18bcc418755c4765e745fe74024", "firstIndex": 21139291342}, +{"blockNumber": 23752866, "blockId": "0xb9cf0dfee429a1450f5fb7a237729cf7d0c49e7e35c00dc3709f197c091f8b39", "firstIndex": 21206399901}, +{"blockNumber": 23784485, "blockId": "0x92f5f119078b4ef7ad99273e2ae6f874cfb663e80d741a821e0bb7c25c0369c7", "firstIndex": 21273505141} ] From cf93077fab7984d868437ffcb4b28720fd119b08 Mon Sep 17 00:00:00 2001 From: radik878 Date: Wed, 26 Nov 2025 12:30:05 +0200 Subject: [PATCH 136/277] rlp: finalize listIterator on parse error to prevent non-advancing loops (#33245) The list iterator previously returned true on parse errors without advancing the input, which could lead to non-advancing infinite loops for callers that do not check Err() inside the loop; to make iteration safe while preserving error visibility, Next() now marks the iterator as finished when readKind fails, returning true for the error step so existing users that check Err() can handle it, and then false on subsequent calls, and the function comment was updated to document this behavior and the need to check Err(). --- rlp/iterator.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/rlp/iterator.go b/rlp/iterator.go index 95bd3f2582..05567fa05b 100644 --- a/rlp/iterator.go +++ b/rlp/iterator.go @@ -37,15 +37,24 @@ func NewListIterator(data RawValue) (*listIterator, error) { return it, nil } -// Next forwards the iterator one step, returns true if it was not at end yet +// Next forwards the iterator one step. +// Returns true if there is a next item or an error occurred on this step (check Err()). +// On parse error, the iterator is marked finished and subsequent calls return false. func (it *listIterator) Next() bool { if len(it.data) == 0 { return false } _, t, c, err := readKind(it.data) + if err != nil { + it.next = nil + it.err = err + // Mark iteration as finished to avoid potential infinite loops on subsequent Next calls. + it.data = nil + return true + } it.next = it.data[:t+c] it.data = it.data[t+c:] - it.err = err + it.err = nil return true } From 3f7cd905b0880c12a65efbe4450f16c24a6bf65d Mon Sep 17 00:00:00 2001 From: Lucia Date: Thu, 27 Nov 2025 02:58:15 +1300 Subject: [PATCH 137/277] accounts/usbwallet: fix double hashing in SignTextWithPassphrase (#33138) SignTextWithPassphrase calls SignText, which already performs TextHash. --- accounts/usbwallet/wallet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index 0fd0415a9e..f1597ca1a7 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -632,7 +632,7 @@ func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID // data is not supported for Ledger wallets, so this method will always return // an error. func (w *wallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { - return w.SignText(account, accounts.TextHash(text)) + return w.SignText(account, text) } // SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given From 1468331f9d8e8c78125c202c909f6f5fccf601df Mon Sep 17 00:00:00 2001 From: oxBoni Date: Wed, 26 Nov 2025 15:34:11 +0100 Subject: [PATCH 138/277] p2p/discover/v5wire: remove redundant bytes clone in WHOAREYOU encoding (#33180) head.AuthData is assigned later in the function, so the earlier assignment can safely be removed. --- p2p/discover/v5wire/encoding.go | 1 - 1 file changed, 1 deletion(-) diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go index 08292a70ba..d6a30a17ca 100644 --- a/p2p/discover/v5wire/encoding.go +++ b/p2p/discover/v5wire/encoding.go @@ -326,7 +326,6 @@ func (c *Codec) encodeWhoareyou(toID enode.ID, packet *Whoareyou) (Header, error // Create header. head := c.makeHeader(toID, flagWhoareyou, 0) - head.AuthData = slices.Clone(c.buf.Bytes()) head.Nonce = packet.Nonce // Encode auth data. From ed4d00fd83f33e56b7bdb16e14735dd0f846b717 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 26 Nov 2025 15:53:03 +0100 Subject: [PATCH 139/277] miner: add --miner.maxblobs flag (#33129) Adds a flag to specify how many blobs a node is willing to include in their locally build block as specified in https://eips.ethereum.org/EIPS/eip-7872 I deviated from the EIP in one case, I allowed for specifying 0 as the minimum blobs/block --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 9 +++++++++ miner/miner.go | 1 + miner/worker.go | 16 +++++++++++++--- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 109b36836a..851ae1ce0b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -118,6 +118,7 @@ var ( utils.MinerGasPriceFlag, utils.MinerEtherbaseFlag, // deprecated utils.MinerExtraDataFlag, + utils.MinerMaxBlobsFlag, utils.MinerRecommitIntervalFlag, utils.MinerPendingFeeRecipientFlag, utils.MinerNewPayloadTimeoutFlag, // deprecated diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c95efd9fd7..536f8c9e65 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -553,6 +553,11 @@ var ( Usage: "0x prefixed public address for the pending block producer (not used for actual block production)", Category: flags.MinerCategory, } + MinerMaxBlobsFlag = &cli.IntFlag{ + Name: "miner.maxblobs", + Usage: "Maximum number of blobs per block (falls back to protocol maximum if unspecified)", + Category: flags.MinerCategory, + } // Account settings PasswordFileFlag = &cli.PathFlag{ @@ -1571,6 +1576,10 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { log.Warn("The flag --miner.newpayload-timeout is deprecated and will be removed, please use --miner.recommit") cfg.Recommit = ctx.Duration(MinerNewPayloadTimeoutFlag.Name) } + if ctx.IsSet(MinerMaxBlobsFlag.Name) { + maxBlobs := ctx.Int(MinerMaxBlobsFlag.Name) + cfg.MaxBlobsPerBlock = &maxBlobs + } } func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/miner/miner.go b/miner/miner.go index 810cc20a6c..4c40b0c4f8 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -48,6 +48,7 @@ type Config struct { GasCeil uint64 // Target gas ceiling for mined blocks. GasPrice *big.Int // Minimum gas price for mining a transaction Recommit time.Duration // The time interval for miner to re-create mining work. + MaxBlobsPerBlock *int // Maximum number of blobs per block (unset uses protocol default) } // DefaultConfig contains default settings for miner. diff --git a/miner/worker.go b/miner/worker.go index c0574eac23..e0dcdca456 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -43,6 +43,16 @@ var ( errBlockInterruptedByTimeout = errors.New("timeout while building block") ) +// maxBlobsPerBlock returns the maximum number of blobs per block. +// Users can specify the maximum number of blobs per block if necessary. +func (miner *Miner) maxBlobsPerBlock(time uint64) int { + maxBlobs := eip4844.MaxBlobsPerBlock(miner.chainConfig, time) + if miner.config.MaxBlobsPerBlock != nil { + maxBlobs = *miner.config.MaxBlobsPerBlock + } + return maxBlobs +} + // environment is the worker's current environment and holds all // information of the sealing block generation. type environment struct { @@ -309,7 +319,7 @@ func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transactio // isn't really a better place right now. The blob gas limit is checked at block validation time // and not during execution. This means core.ApplyTransaction will not return an error if the // tx has too many blobs. So we have to explicitly check it here. - maxBlobs := eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) + maxBlobs := miner.maxBlobsPerBlock(env.header.Time) if env.blobs+len(sc.Blobs) > maxBlobs { return errors.New("max data blobs reached") } @@ -364,7 +374,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran } // If we don't have enough blob space for any further blob transactions, // skip that list altogether - if !blobTxs.Empty() && env.blobs >= eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) { + if !blobTxs.Empty() && env.blobs >= miner.maxBlobsPerBlock(env.header.Time) { log.Trace("Not enough blob space for further blob transactions") blobTxs.Clear() // Fall though to pick up any plain txs @@ -403,7 +413,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran // blobs or not, however the max check panics when called on a chain without // a defined schedule, so we need to verify it's safe to call. if isCancun { - left := eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) - env.blobs + left := miner.maxBlobsPerBlock(env.header.Time) - env.blobs if left < int(ltx.BlobGas/params.BlobTxBlobGasPerBlob) { log.Trace("Not enough blob space left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas/params.BlobTxBlobGasPerBlob) txs.Pop() From 689ea10f3516fcf62ebd5570af232b3e4ef266f7 Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Wed, 26 Nov 2025 08:58:59 -0600 Subject: [PATCH 140/277] core/vm: implement EIP-8024 (#33095) EIP-8024: Backward compatible SWAPN, DUPN, EXCHANGE Introduces additional instructions for manipulating the stack which allow accessing the stack at higher depths. This is an initial implementation of the EIP, which is still in Review stage. --- core/vm/eips.go | 23 +++++ core/vm/instructions.go | 109 ++++++++++++++++++++++++ core/vm/instructions_test.go | 161 +++++++++++++++++++++++++++++++++++ 3 files changed, 293 insertions(+) diff --git a/core/vm/eips.go b/core/vm/eips.go index d7ed18648e..dfcac4b930 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -42,6 +42,7 @@ var activators = map[int]func(*JumpTable){ 4762: enable4762, 7702: enable7702, 7939: enable7939, + 8024: enable8024, } // EnableEIP enables the given EIP on the config. @@ -342,6 +343,28 @@ func enable6780(jt *JumpTable) { } } +// enable8024 applies EIP-8024 (DUPN, SWAPN, EXCHANGE) +func enable8024(jt *JumpTable) { + jt[DUPN] = &operation{ + execute: opDupN, + constantGas: GasFastestStep, + minStack: minStack(1, 0), + maxStack: maxStack(0, 1), + } + jt[SWAPN] = &operation{ + execute: opSwapN, + constantGas: GasFastestStep, + minStack: minStack(2, 0), + maxStack: maxStack(0, 0), + } + jt[EXCHANGE] = &operation{ + execute: opExchange, + constantGas: GasFastestStep, + minStack: minStack(2, 0), + maxStack: maxStack(0, 0), + } +} + func opExtCodeCopyEIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { var ( stack = scope.Stack diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 44d3e81a9c..29f1f79c49 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -920,6 +920,115 @@ func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, erro return nil, errStopToken } +func decodeSingle(x byte) int { + if x <= 90 { + return int(x) + 17 + } + return int(x) - 20 +} + +func decodePair(x byte) (int, int) { + var k int + if x <= 79 { + k = int(x) + } else { + k = int(x) - 48 + } + q, r := k/16, k%16 + if q < r { + return q + 1, r + 1 + } + return r + 1, 29 - q +} + +func opDupN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + code := scope.Contract.Code + i := *pc + 1 + + // Ensure an immediate byte exists after DUPN + if i >= uint64(len(code)) { + return nil, &ErrInvalidOpCode{opcode: INVALID} + } + x := code[i] + + // This range is excluded to preserve compatibility with existing opcodes. + if x > 90 && x < 128 { + return nil, &ErrInvalidOpCode{opcode: OpCode(x)} + } + n := decodeSingle(x) + + // DUPN duplicates the n'th stack item, so the stack must contain at least n elements. + if scope.Stack.len() < n { + return nil, &ErrStackUnderflow{stackLen: scope.Stack.len(), required: n} + } + + //The n‘th stack item is duplicated at the top of the stack. + scope.Stack.push(scope.Stack.Back(n - 1)) + *pc += 2 + return nil, nil +} + +func opSwapN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + code := scope.Contract.Code + i := *pc + 1 + + // Ensure an immediate byte exists after SWAPN + if i >= uint64(len(code)) { + return nil, &ErrInvalidOpCode{opcode: INVALID} + } + x := code[i] + + // This range is excluded to preserve compatibility with existing opcodes. + if x > 90 && x < 128 { + return nil, &ErrInvalidOpCode{opcode: OpCode(x)} + } + n := decodeSingle(x) + + // SWAPN operates on the top and n+1 stack items, so the stack must contain at least n+1 elements. + if scope.Stack.len() < n+1 { + return nil, &ErrStackUnderflow{stackLen: scope.Stack.len(), required: n + 1} + } + + // The (n+1)‘th stack item is swapped with the top of the stack. + indexTop := scope.Stack.len() - 1 + indexN := scope.Stack.len() - 1 - n + scope.Stack.data[indexTop], scope.Stack.data[indexN] = scope.Stack.data[indexN], scope.Stack.data[indexTop] + *pc += 2 + return nil, nil +} + +func opExchange(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + code := scope.Contract.Code + i := *pc + 1 + + // Ensure an immediate byte exists after EXCHANGE + if i >= uint64(len(code)) { + return nil, &ErrInvalidOpCode{opcode: INVALID} + } + x := code[i] + + // This range is excluded both to preserve compatibility with existing opcodes + // and to keep decode_pair’s 16-aligned arithmetic mapping valid (0–79, 128–255). + if x > 79 && x < 128 { + return nil, &ErrInvalidOpCode{opcode: OpCode(x)} + } + n, m := decodePair(x) + need := max(n, m) + 1 + + // EXCHANGE operates on the (n+1)'th and (m+1)'th stack items, + // so the stack must contain at least max(n, m)+1 elements. + if scope.Stack.len() < need { + return nil, &ErrStackUnderflow{stackLen: scope.Stack.len(), required: need} + } + + // The (n+1)‘th stack item is swapped with the (m+1)‘th stack item. + indexN := scope.Stack.len() - 1 - n + indexM := scope.Stack.len() - 1 - m + scope.Stack.data[indexN], scope.Stack.data[indexM] = scope.Stack.data[indexM], scope.Stack.data[indexN] + *pc += 2 + return nil, nil +} + // following functions are used by the instruction jump table // make log instruction function diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 72f561f4bf..0f91a205f5 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -1008,3 +1008,164 @@ func TestOpCLZ(t *testing.T) { } } } + +func TestEIP8024_Execution(t *testing.T) { + evm := NewEVM(BlockContext{}, nil, params.TestChainConfig, Config{}) + + tests := []struct { + name string + codeHex string + wantErr bool + wantVals []uint64 + }{ + { + name: "DUPN", + codeHex: "60016000808080808080808080808080808080e600", + wantVals: []uint64{ + 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, + }, + }, + { + name: "SWAPN", + codeHex: "600160008080808080808080808080808080806002e700", + wantVals: []uint64{ + 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, + }, + }, + { + name: "EXCHANGE", + codeHex: "600060016002e801", + wantVals: []uint64{2, 0, 1}, + }, + { + name: "INVALID_SWAPN_LOW", + codeHex: "e75b", + wantErr: true, + }, + { + name: "JUMP over INVALID_DUPN", + codeHex: "600456e65b", + wantErr: false, + }, + // Additional test cases + { + name: "INVALID_DUPN_LOW", + codeHex: "e65b", + wantErr: true, + }, + { + name: "INVALID_EXCHANGE_LOW", + codeHex: "e850", + wantErr: true, + }, + { + name: "INVALID_DUPN_HIGH", + codeHex: "e67f", + wantErr: true, + }, + { + name: "INVALID_SWAPN_HIGH", + codeHex: "e77f", + wantErr: true, + }, + { + name: "INVALID_EXCHANGE_HIGH", + codeHex: "e87f", + wantErr: true, + }, + { + name: "UNDERFLOW_DUPN", + codeHex: "5f5f5f5f5f5f5f5f5f5f5f5f5f5f5f5fe600", // (n=17, need 17 items, have 16) + wantErr: true, + }, + { + name: "UNDERFLOW_SWAPN", + codeHex: "5f5f5f5f5f5f5f5f5f5f5f5f5f5f5f5f5fe700", // (n=17, need 18 items, have 17) + wantErr: true, + }, + { + name: "UNDERFLOW_EXCHANGE", + codeHex: "60016002e801", // (n,m)=(1,2), need 3 items, have 2 + wantErr: true, + }, + { + name: "MISSING_IMMEDIATE_DUPN", + codeHex: "e6", // no operand + wantErr: true, + }, + { + name: "MISSING_IMMEDIATE_SWAPN", + codeHex: "e7", // no operand + wantErr: true, + }, + { + name: "MISSING_IMMEDIATE_EXCHANGE", + codeHex: "e8", // no operand + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + code := common.FromHex(tc.codeHex) + stack := newstack() + pc := uint64(0) + scope := &ScopeContext{Stack: stack, Contract: &Contract{Code: code}} + var err error + for pc < uint64(len(code)) && err == nil { + op := code[pc] + switch op { + case 0x00: + return + case 0x60: + _, err = opPush1(&pc, evm, scope) + pc++ + case 0x80: + dup1 := makeDup(1) + _, err = dup1(&pc, evm, scope) + pc++ + case 0x56: + _, err = opJump(&pc, evm, scope) + pc++ + case 0x5b: + _, err = opJumpdest(&pc, evm, scope) + pc++ + case 0xe6: + _, err = opDupN(&pc, evm, scope) + case 0xe7: + _, err = opSwapN(&pc, evm, scope) + case 0xe8: + _, err = opExchange(&pc, evm, scope) + default: + err = &ErrInvalidOpCode{opcode: OpCode(op)} + } + } + if tc.wantErr { + if err == nil { + t.Fatalf("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + got := make([]uint64, 0, stack.len()) + for i := stack.len() - 1; i >= 0; i-- { + got = append(got, stack.data[i].Uint64()) + } + if len(got) != len(tc.wantVals) { + t.Fatalf("stack len=%d; want %d", len(got), len(tc.wantVals)) + } + for i := range got { + if got[i] != tc.wantVals[i] { + t.Fatalf("[%s] stack[%d]=%d; want %d\nstack=%v", + tc.name, i, got[i], tc.wantVals[i], got) + } + } + }) + } +} From 3e48e0779cee923ad7a385e0492cceb6eb5581a1 Mon Sep 17 00:00:00 2001 From: Klimov Sergei Date: Wed, 26 Nov 2025 23:15:28 +0800 Subject: [PATCH 141/277] beacon/config: add ELECTRA, FULU to knownForks (#32674) --- beacon/params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon/params/config.go b/beacon/params/config.go index b01b739e07..5336220efd 100644 --- a/beacon/params/config.go +++ b/beacon/params/config.go @@ -38,7 +38,7 @@ import ( // across signing different data structures. const syncCommitteeDomain = 7 -var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"} +var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB", "ELECTRA", "FULU"} // ClientConfig contains beacon light client configuration. type ClientConfig struct { From c55a12197e0495a0c5d3e5f049cdffd4e696aece Mon Sep 17 00:00:00 2001 From: Klimov Sergei Date: Wed, 26 Nov 2025 23:19:33 +0800 Subject: [PATCH 142/277] beacon/config: ignore nil values in config file (#33065) YAML supports leaving out the value, so we should handle this condition in our limited parser. --- beacon/params/config.go | 3 +++ beacon/params/config_test.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/beacon/params/config.go b/beacon/params/config.go index 5336220efd..437aa53788 100644 --- a/beacon/params/config.go +++ b/beacon/params/config.go @@ -103,6 +103,9 @@ func (c *ChainConfig) LoadForks(file []byte) error { epochs["GENESIS"] = 0 for key, value := range config { + if value == nil { + continue + } if strings.HasSuffix(key, "_FORK_VERSION") { name := key[:len(key)-len("_FORK_VERSION")] switch version := value.(type) { diff --git a/beacon/params/config_test.go b/beacon/params/config_test.go index 41e120469b..0b569b604c 100644 --- a/beacon/params/config_test.go +++ b/beacon/params/config_test.go @@ -15,6 +15,9 @@ ALTAIR_FORK_EPOCH: 1 EIP7928_FORK_VERSION: 0xb0000038 EIP7928_FORK_EPOCH: 18446744073709551615 +EIP7XXX_FORK_VERSION: +EIP7XXX_FORK_EPOCH: + BLOB_SCHEDULE: [] ` c := &ChainConfig{} From 3bbf5f5b6a9cd5ba998f6580586ddf208217e915 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 26 Nov 2025 16:50:16 +0100 Subject: [PATCH 143/277] core/vm: improve memory resize (#33056) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like (in some very EVM specific tests) we spent a lot of time resizing memory. If the underlying array is big enough, we can speed it up a bit by simply slicing the memory. goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/core/vm cpu: Intel(R) Core(TM) Ultra 7 155U │ /tmp/old.txt │ /tmp/new.txt │ │ sec/op │ sec/op vs base │ Resize-14 6.145n ± 9% 1.854n ± 14% -69.83% (p=0.000 n=10) │ /tmp/old.txt │ /tmp/new.txt │ │ B/op │ B/op vs base │ Resize-14 5.000 ± 0% 5.000 ± 0% ~ (p=1.000 n=10) │ /tmp/old.txt │ /tmp/new.txt │ │ allocs/op │ allocs/op vs base │ Resize-14 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ From the blocktest benchmark: 620ms 10.93s (flat, cum) 9.92% of Total . . 80:func (m *Memory) Resize(size uint64) { 30ms 60ms 81: if uint64(m.Len()) < size { 590ms 10.87s 82: m.store = append(m.store, make([]byte, size-uint64(m.Len()))...) . . 83: } . . 84:} --------- Co-authored-by: Felix Lange --- core/vm/memory.go | 11 ++++++++--- core/vm/memory_test.go | 7 +++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/core/vm/memory.go b/core/vm/memory.go index 5e11e83748..54bc2b2849 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -44,6 +44,7 @@ func (m *Memory) Free() { // To reduce peak allocation, return only smaller memory instances to the pool. const maxBufferSize = 16 << 10 if cap(m.store) <= maxBufferSize { + clear(m.store) m.store = m.store[:0] m.lastGasCost = 0 memoryPool.Put(m) @@ -76,10 +77,14 @@ func (m *Memory) Set32(offset uint64, val *uint256.Int) { val.PutUint256(m.store[offset:]) } -// Resize resizes the memory to size +// Resize grows the memory to the requested size. func (m *Memory) Resize(size uint64) { - if uint64(m.Len()) < size { - m.store = append(m.store, make([]byte, size-uint64(m.Len()))...) + if uint64(len(m.store)) < size { + if uint64(cap(m.store)) >= size { + m.store = m.store[:size] + } else { + m.store = append(m.store, make([]byte, size-uint64(len(m.store)))...) + } } } diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go index 41389b729a..3890d18cb5 100644 --- a/core/vm/memory_test.go +++ b/core/vm/memory_test.go @@ -83,3 +83,10 @@ func TestMemoryCopy(t *testing.T) { } } } + +func BenchmarkResize(b *testing.B) { + memory := NewMemory() + for i := range b.N { + memory.Resize(uint64(i)) + } +} From 7805e203f03ca129035d068ad31c12a979bd96a0 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 26 Nov 2025 23:36:35 +0100 Subject: [PATCH 144/277] .github/workflows: validate that the directories exist (#33289) A new pointless fad appeared recently where people just create a fairly low information tag at the beginning of their github PR titles. Something like `feat` or other keywords. This seems to originate from the angular community and to be used for automation scripts over there. We do not use any of those scripts and if we did we would be using the github labels, which offer strictly equivalent functionalities without wasting useful PR title space. In order for these keywords to fail the validation, I am adding a check that these directories listed indeed exist in the repository. --- .github/workflows/validate_pr.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/.github/workflows/validate_pr.yml b/.github/workflows/validate_pr.yml index 0719ca2e3d..57e8c12b5e 100644 --- a/.github/workflows/validate_pr.yml +++ b/.github/workflows/validate_pr.yml @@ -8,10 +8,15 @@ jobs: validate-pr: runs-on: ubuntu-latest steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Check PR Title Format uses: actions/github-script@v7 with: script: | + const fs = require('fs'); + const path = require('path'); const prTitle = context.payload.pull_request.title; const titleRegex = /^([\w\s,{}/.]+): .+/; @@ -19,5 +24,21 @@ jobs: core.setFailed(`PR title "${prTitle}" does not match required format: directory, ...: description`); return; } + + const match = prTitle.match(titleRegex); + const dirPart = match[1]; + const directories = dirPart.split(',').map(d => d.trim()); + const missingDirs = []; + for (const dir of directories) { + const fullPath = path.join(process.env.GITHUB_WORKSPACE, dir); + if (!fs.existsSync(fullPath)) { + missingDirs.push(dir); + } + } + + if (missingDirs.length > 0) { + core.setFailed(`The following directories in the PR title do not exist: ${missingDirs.join(', ')}`); + return; + } console.log('✅ PR title format is valid'); From 6452b7ad0581963672f71bf8d4351430a22adb07 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 27 Nov 2025 03:35:22 +0100 Subject: [PATCH 145/277] beacon/light: optimize database key assembling (#33292) --- beacon/light/canonical.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/beacon/light/canonical.go b/beacon/light/canonical.go index b5371493b4..56622425b2 100644 --- a/beacon/light/canonical.go +++ b/beacon/light/canonical.go @@ -69,7 +69,10 @@ func newCanonicalStore[T any](db ethdb.Iteratee, keyPrefix []byte) (*canonicalSt // databaseKey returns the database key belonging to the given period. func (cs *canonicalStore[T]) databaseKey(period uint64) []byte { - return binary.BigEndian.AppendUint64(append([]byte{}, cs.keyPrefix...), period) + key := make([]byte, len(cs.keyPrefix)+8) + copy(key, cs.keyPrefix) + binary.BigEndian.PutUint64(key[len(cs.keyPrefix):], period) + return key } // add adds the given item to the database. It also ensures that the range remains From 9bab01bee4babb34717b98f4655d1282fe9f277f Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Thu, 27 Nov 2025 09:03:21 +0100 Subject: [PATCH 146/277] consensus/clique: fix blob gas error message formatting (#33296) Fixes error messages to print the actual blob gas value instead of the pointer address by dereferencing `ExcessBlobGas`, `BlobGasUsed` and `ParentBeaconRoot`. --- consensus/clique/clique.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index b593d2117d..a6f02c8c2b 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -305,11 +305,11 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H // Verify the non-existence of cancun-specific header fields switch { case header.ExcessBlobGas != nil: - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *header.ExcessBlobGas) case header.BlobGasUsed != nil: - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) case header.ParentBeaconRoot != nil: - return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) } // All basic checks passed, verify cascading fields return c.verifyCascadingFields(chain, header, parents) From 6426257c0ffe866362d366ee0eacdd5961cca346 Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Thu, 27 Nov 2025 10:05:24 +0200 Subject: [PATCH 147/277] eth/tracers/logger: rename WriteTo to Write (#33227) --- eth/tracers/logger/logger.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index 824a5e0c3e..67e07f78d0 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -94,8 +94,8 @@ func (s *StructLog) ErrorString() string { return "" } -// WriteTo writes the human-readable log data into the supplied writer. -func (s *StructLog) WriteTo(writer io.Writer) { +// Write writes the human-readable log data into the supplied writer. +func (s *StructLog) Write(writer io.Writer) { fmt.Fprintf(writer, "%-16spc=%08d gas=%v cost=%v", s.Op, s.Pc, s.Gas, s.GasCost) if s.Err != nil { fmt.Fprintf(writer, " ERROR: %v", s.Err) @@ -324,7 +324,7 @@ func (l *StructLogger) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope l.logs = append(l.logs, entry) return } - log.WriteTo(l.writer) + log.Write(l.writer) } // OnExit is called a call frame finishes processing. @@ -405,7 +405,7 @@ func (l *StructLogger) Output() []byte { return l.output } // @deprecated func WriteTrace(writer io.Writer, logs []StructLog) { for _, log := range logs { - log.WriteTo(writer) + log.Write(writer) } } From 8d1b1c20d00d326bc7c4f1a42c1493e6f4b897d7 Mon Sep 17 00:00:00 2001 From: radik878 Date: Thu, 27 Nov 2025 13:43:37 +0200 Subject: [PATCH 148/277] core/txpool/blobpool: auto-start next conversion batch after completion (#33301) This change fixes a stall in the legacy blob sidecar conversion pipeline where tasks that arrived during an active batch could remain unprocessed indefinitely after that batch completed, unless a new external event arrived. The root cause was that the loop did not restart processing in the case <-done: branch even when txTasks had accumulated work, relying instead on a future event to retrigger the scheduler. This behavior is inconsistent with the billy task pipeline, which immediately chains to the next task via runNextBillyTask() without requiring an external trigger. The fix adds a symmetric restart path in `case <-done`: that checks `len(txTasks) > 0`, clones the accumulated tasks, clears the queue, and launches a new run with a fresh done and interrupt. This preserves batching semantics, prevents indefinite blocking of callers of convert(), and remains safe during shutdown since the quit path still interrupts and awaits the active batch. No public interfaces or logging were changed. --- core/txpool/blobpool/conversion.go | 6 +++ core/txpool/blobpool/conversion_test.go | 70 +++++++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/core/txpool/blobpool/conversion.go b/core/txpool/blobpool/conversion.go index 80b97af5d7..afdc10554f 100644 --- a/core/txpool/blobpool/conversion.go +++ b/core/txpool/blobpool/conversion.go @@ -161,6 +161,12 @@ func (q *conversionQueue) loop() { case <-done: done, interrupt = nil, nil + if len(txTasks) > 0 { + done, interrupt = make(chan struct{}), new(atomic.Int32) + tasks := slices.Clone(txTasks) + txTasks = txTasks[:0] + go q.run(tasks, done, interrupt) + } case fn := <-q.startBilly: q.billyQueue = append(q.billyQueue, fn) diff --git a/core/txpool/blobpool/conversion_test.go b/core/txpool/blobpool/conversion_test.go index a9fd26dbaf..7ffffb2e4d 100644 --- a/core/txpool/blobpool/conversion_test.go +++ b/core/txpool/blobpool/conversion_test.go @@ -19,7 +19,9 @@ package blobpool import ( "crypto/ecdsa" "crypto/sha256" + "sync" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -99,3 +101,71 @@ func TestConversionQueueDoubleClose(t *testing.T) { queue.close() queue.close() // Should not panic } + +func TestConversionQueueAutoRestartBatch(t *testing.T) { + queue := newConversionQueue() + defer queue.close() + + key, _ := crypto.GenerateKey() + + // Create a heavy transaction to ensure the first batch runs long enough + // for subsequent tasks to be queued while it is active. + heavy := makeMultiBlobTx(0, 1, 1, 1, int(params.BlobTxMaxBlobs), 0, key, types.BlobSidecarVersion0) + + var wg sync.WaitGroup + wg.Add(1) + heavyDone := make(chan error, 1) + go func() { + defer wg.Done() + heavyDone <- queue.convert(heavy) + }() + + // Give the conversion worker a head start so that the following tasks are + // enqueued while the first batch is running. + time.Sleep(200 * time.Millisecond) + + tx1 := makeTx(1, 1, 1, 1, key) + tx2 := makeTx(2, 1, 1, 1, key) + + wg.Add(2) + done1 := make(chan error, 1) + done2 := make(chan error, 1) + go func() { defer wg.Done(); done1 <- queue.convert(tx1) }() + go func() { defer wg.Done(); done2 <- queue.convert(tx2) }() + + select { + case err := <-done1: + if err != nil { + t.Fatalf("tx1 conversion error: %v", err) + } + case <-time.After(30 * time.Second): + t.Fatal("timeout waiting for tx1 conversion") + } + + select { + case err := <-done2: + if err != nil { + t.Fatalf("tx2 conversion error: %v", err) + } + case <-time.After(30 * time.Second): + t.Fatal("timeout waiting for tx2 conversion") + } + + select { + case err := <-heavyDone: + if err != nil { + t.Fatalf("heavy conversion error: %v", err) + } + case <-time.After(30 * time.Second): + t.Fatal("timeout waiting for heavy conversion") + } + + wg.Wait() + + if tx1.BlobTxSidecar().Version != types.BlobSidecarVersion1 { + t.Fatalf("tx1 sidecar version mismatch: have %d, want %d", tx1.BlobTxSidecar().Version, types.BlobSidecarVersion1) + } + if tx2.BlobTxSidecar().Version != types.BlobSidecarVersion1 { + t.Fatalf("tx2 sidecar version mismatch: have %d, want %d", tx2.BlobTxSidecar().Version, types.BlobSidecarVersion1) + } +} From aa1a8dacaeef8a37e8e382d404a5a9709f654107 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:27:49 +0100 Subject: [PATCH 149/277] cmd/keeper/go.mod: bump github.com/consensys/gnark-crypto from 0.18.0 to 0.18.1 in /cmd/keeper (#33256) Bumps [github.com/consensys/gnark-crypto](https://github.com/consensys/gnark-crypto) from 0.18.0 to 0.18.1.

Release notes

Sourced from github.com/consensys/gnark-crypto's releases.

v0.18.1

Full Changelog: https://github.com/Consensys/gnark-crypto/compare/v0.18.0...v0.18.1

Changelog

Sourced from github.com/consensys/gnark-crypto's changelog.

[v0.18.1] - 2025-10-28

Docs

  • add CHANGELOG for 0.18.1

Perf

  • limit memory allocation during Vector deserialization (#759)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/consensys/gnark-crypto&package-manager=go_modules&previous-version=0.18.0&new-version=0.18.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ethereum/go-ethereum/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/keeper/go.mod | 2 +- cmd/keeper/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index f47dc54c06..8402382a9b 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -12,7 +12,7 @@ require ( github.com/VictoriaMetrics/fastcache v1.13.0 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 5744ae2093..4f4c0dbba0 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -26,8 +26,8 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= -github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= +github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= From 795a7ab58a6a59cca524d025f5bc4dce6a6d4e01 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 27 Nov 2025 20:43:05 +0100 Subject: [PATCH 150/277] go.mod: bump gnark-crypto to 0.18.1 (#33305) Fix for https://github.com/ethereum/go-ethereum/pull/33060 which I can't directly fix in the branch. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9c9e873a5e..aff1d53923 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.114.0 github.com/cockroachdb/pebble v1.1.5 - github.com/consensys/gnark-crypto v0.18.0 + github.com/consensys/gnark-crypto v0.18.1 github.com/crate-crypto/go-eth-kzg v1.4.0 github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 89dd9af52a..503e0975d6 100644 --- a/go.sum +++ b/go.sum @@ -75,8 +75,8 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= -github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= +github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= From b3b46ce4351851e50c5e3d9cd0f7141885ca3df3 Mon Sep 17 00:00:00 2001 From: ANtutov Date: Fri, 28 Nov 2025 04:42:22 +0200 Subject: [PATCH 151/277] eth/downloader: remove dead proc counter (#33309) --- eth/downloader/queue.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 9fe169d5f7..76a14345e5 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -418,7 +418,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common skip := make([]*types.Header, 0) progress := false throttled := false - for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { + for len(send) < count && !taskQueue.Empty() { // the task queue will pop items in order, so the highest prio block // is also the lowest block number. header, _ := taskQueue.Peek() @@ -433,7 +433,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common taskQueue.PopItem() progress = true delete(taskPool, header.Hash()) - proc = proc - 1 log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) continue } @@ -455,7 +454,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common // If it's a noop, we can skip this task delete(taskPool, header.Hash()) taskQueue.PopItem() - proc = proc - 1 progress = true continue } From 446fdebdc38144ac948629b9c67c5803956b6fb9 Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Fri, 28 Nov 2025 05:14:30 +0100 Subject: [PATCH 152/277] consensus/ethash: fix blob gas error message formatting (#33300) Fixes error messages to print the actual blob gas value instead of the pointer address by dereferencing `ExcessBlobGas`, `BlobGasUsed` and `ParentBeaconRoot`. --- consensus/ethash/consensus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 4f92f1282b..376cbac8c0 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -278,11 +278,11 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa // Verify the non-existence of cancun-specific header fields switch { case header.ExcessBlobGas != nil: - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *header.ExcessBlobGas) case header.BlobGasUsed != nil: - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) case header.ParentBeaconRoot != nil: - return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) } // Add some fake checks for tests if ethash.fakeDelay != nil { From f43228152b361c18e4f75388950ef7647f94a7f9 Mon Sep 17 00:00:00 2001 From: cui Date: Fri, 28 Nov 2025 17:13:01 +0800 Subject: [PATCH 153/277] cmd/utils: fix dumpconfig (#33302) --- cmd/utils/flags.go | 3 +-- miner/miner.go | 2 +- miner/worker.go | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 536f8c9e65..072a1d607b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1577,8 +1577,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { cfg.Recommit = ctx.Duration(MinerNewPayloadTimeoutFlag.Name) } if ctx.IsSet(MinerMaxBlobsFlag.Name) { - maxBlobs := ctx.Int(MinerMaxBlobsFlag.Name) - cfg.MaxBlobsPerBlock = &maxBlobs + cfg.MaxBlobsPerBlock = ctx.Int(MinerMaxBlobsFlag.Name) } } diff --git a/miner/miner.go b/miner/miner.go index 4c40b0c4f8..ee890b5e54 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -48,7 +48,7 @@ type Config struct { GasCeil uint64 // Target gas ceiling for mined blocks. GasPrice *big.Int // Minimum gas price for mining a transaction Recommit time.Duration // The time interval for miner to re-create mining work. - MaxBlobsPerBlock *int // Maximum number of blobs per block (unset uses protocol default) + MaxBlobsPerBlock int // Maximum number of blobs per block (0 for unset uses protocol default) } // DefaultConfig contains default settings for miner. diff --git a/miner/worker.go b/miner/worker.go index e0dcdca456..45d7073ed7 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -47,8 +47,8 @@ var ( // Users can specify the maximum number of blobs per block if necessary. func (miner *Miner) maxBlobsPerBlock(time uint64) int { maxBlobs := eip4844.MaxBlobsPerBlock(miner.chainConfig, time) - if miner.config.MaxBlobsPerBlock != nil { - maxBlobs = *miner.config.MaxBlobsPerBlock + if miner.config.MaxBlobsPerBlock != 0 { + maxBlobs = miner.config.MaxBlobsPerBlock } return maxBlobs } From f691d661c4ade255894cb570f55e8c30b888290c Mon Sep 17 00:00:00 2001 From: cui Date: Fri, 28 Nov 2025 17:52:21 +0800 Subject: [PATCH 154/277] cmd/utils: fix disabling discovery through config file (#33279) No matter what value of P2P.DiscoveryV4 or DiscoveryV5 is set in config file, it will be overwritten by the CLI flag, even if the flag is not set. This fixes it to apply the flag only if set. --- cmd/utils/flags.go | 12 ++++++++---- node/defaults.go | 8 +++++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 072a1d607b..9f69581754 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -864,14 +864,14 @@ var ( Aliases: []string{"discv4"}, Usage: "Enables the V4 discovery mechanism", Category: flags.NetworkingCategory, - Value: true, + Value: node.DefaultConfig.P2P.DiscoveryV4, } DiscoveryV5Flag = &cli.BoolFlag{ Name: "discovery.v5", Aliases: []string{"discv5"}, Usage: "Enables the V5 discovery mechanism", Category: flags.NetworkingCategory, - Value: true, + Value: node.DefaultConfig.P2P.DiscoveryV5, } NetrestrictFlag = &cli.StringFlag{ Name: "netrestrict", @@ -1373,8 +1373,12 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { flags.CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag) flags.CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag) - cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name) - cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name) + if ctx.IsSet(DiscoveryV4Flag.Name) { + cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name) + } + if ctx.IsSet(DiscoveryV5Flag.Name) { + cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name) + } if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" { list, err := netutil.ParseNetlist(netrestrict) diff --git a/node/defaults.go b/node/defaults.go index 6c643e2b54..403a7f88a3 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -69,9 +69,11 @@ var DefaultConfig = Config{ BatchResponseMaxSize: 25 * 1000 * 1000, GraphQLVirtualHosts: []string{"localhost"}, P2P: p2p.Config{ - ListenAddr: ":30303", - MaxPeers: 50, - NAT: nat.Any(), + ListenAddr: ":30303", + MaxPeers: 50, + NAT: nat.Any(), + DiscoveryV4: true, + DiscoveryV5: true, }, DBEngine: "", // Use whatever exists, will default to Pebble if non-existent and supported } From fed8e09ab09e4500cb8f7a917e50654f7c20fd0e Mon Sep 17 00:00:00 2001 From: Bhargava Shastry Date: Fri, 28 Nov 2025 11:22:36 +0100 Subject: [PATCH 155/277] cmd/evm: add stdin support to blocktest command (#32824) Enable blocktest to read filenames from stdin when no path argument is provided, matching the existing statetest behavior. This allows efficient batch processing of blockchain tests. Usage: - Single file: evm blocktest - Batch mode: find tests/ -name "*.json" | evm blocktest --------- Co-authored-by: Claude Co-authored-by: MariusVanDerWijden Co-authored-by: Felix Lange --- cmd/evm/blockrunner.go | 71 ++++++++++++++++++++++++++++++++-------- cmd/evm/main.go | 5 +++ tests/block_test_util.go | 11 ++++++- 3 files changed, 73 insertions(+), 14 deletions(-) diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index f6538b1356..c6fac5396e 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -17,16 +17,18 @@ package main import ( + "bufio" "encoding/json" - "errors" "fmt" "maps" "os" "regexp" "slices" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -34,33 +36,52 @@ import ( var blockTestCommand = &cli.Command{ Action: blockTestCmd, Name: "blocktest", - Usage: "Executes the given blockchain tests", + Usage: "Executes the given blockchain tests. Filenames can be fed via standard input (batch mode) or as an argument (one-off execution).", ArgsUsage: "", Flags: slices.Concat([]cli.Flag{ DumpFlag, HumanReadableFlag, RunFlag, WitnessCrossCheckFlag, + FuzzFlag, }, traceFlags), } func blockTestCmd(ctx *cli.Context) error { path := ctx.Args().First() - if len(path) == 0 { - return errors.New("path argument required") + + // If path is provided, run the tests at that path. + if len(path) != 0 { + var ( + collected = collectFiles(path) + results []testResult + ) + for _, fname := range collected { + r, err := runBlockTest(ctx, fname) + if err != nil { + return err + } + results = append(results, r...) + } + report(ctx, results) + return nil } - var ( - collected = collectFiles(path) - results []testResult - ) - for _, fname := range collected { - r, err := runBlockTest(ctx, fname) + // Otherwise, read filenames from stdin and execute back-to-back. + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + fname := scanner.Text() + if len(fname) == 0 { + return nil + } + results, err := runBlockTest(ctx, fname) if err != nil { return err } - results = append(results, r...) + // During fuzzing, we report the result after every block + if !ctx.IsSet(FuzzFlag.Name) { + report(ctx, results) + } } - report(ctx, results) return nil } @@ -79,6 +100,11 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) { } tracer := tracerFromFlags(ctx) + // Suppress INFO logs during fuzzing + if ctx.IsSet(FuzzFlag.Name) { + log.SetDefault(log.NewLogger(log.DiscardHandler())) + } + // Pull out keys to sort and ensure tests are run in order. keys := slices.Sorted(maps.Keys(tests)) @@ -88,16 +114,35 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) { if !re.MatchString(name) { continue } + test := tests[name] result := &testResult{Name: name, Pass: true} - if err := tests[name].Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) { + var finalRoot *common.Hash + if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) { if ctx.Bool(DumpFlag.Name) { if s, _ := chain.State(); s != nil { result.State = dump(s) } } + // Capture final state root for end marker + if chain != nil { + root := chain.CurrentBlock().Root + finalRoot = &root + } }); err != nil { result.Pass, result.Error = false, err.Error() } + + // Always assign fork (regardless of pass/fail or tracer) + result.Fork = test.Network() + // Assign root if test succeeded + if result.Pass && finalRoot != nil { + result.Root = finalRoot + } + + // When fuzzing, write results after every block + if ctx.IsSet(FuzzFlag.Name) { + report(ctx, []testResult{*result}) + } results = append(results, *result) } return results, nil diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 5238d5920c..57741b5f9c 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -55,6 +55,11 @@ var ( Usage: "benchmark the execution", Category: flags.VMCategory, } + FuzzFlag = &cli.BoolFlag{ + Name: "fuzz", + Usage: "adapts output format for fuzzing", + Category: flags.VMCategory, + } WitnessCrossCheckFlag = &cli.BoolFlag{ Name: "cross-check", Aliases: []string{"xc"}, diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 2ced18787a..52fe58e702 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -116,6 +116,15 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t if !ok { return UnsupportedForkError{t.json.Network} } + return t.run(config, snapshotter, scheme, witness, tracer, postCheck) +} + +// Network returns the network/fork name for this test. +func (t *BlockTest) Network() string { + return t.json.Network +} + +func (t *BlockTest) run(config *params.ChainConfig, snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) { // import pre accounts & construct test genesis block & state root // Commit genesis state var ( @@ -260,7 +269,7 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) } if b.BlockHeader == nil { if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil { - fmt.Fprintf(os.Stderr, "block (index %d) insertion should have failed due to: %v:\n%v\n", + fmt.Fprintf(os.Stdout, "block (index %d) insertion should have failed due to: %v:\n%v\n", bi, b.ExpectException, string(data)) } return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v", From a122dbe459932868a73fb937376c6badd01d364f Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 28 Nov 2025 11:28:31 +0100 Subject: [PATCH 156/277] internal/ethapi: return error code -32602 for invalid storage key (#33282) This was found because other clients are failing RPC tests generated by Geth. Nethermind and Besu return the correct error code, -32602, in this situation. --- internal/ethapi/api.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d7cf47468c..997ed47926 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -374,9 +374,9 @@ func (api *BlockChainAPI) GetProof(ctx context.Context, address common.Address, // Deserialize all keys. This prevents state access on invalid input. for i, hexKey := range storageKeys { var err error - keys[i], keyLengths[i], err = decodeHash(hexKey) + keys[i], keyLengths[i], err = decodeStorageKey(hexKey) if err != nil { - return nil, err + return nil, &invalidParamsError{fmt.Sprintf("%v: %q", err, hexKey)} } } statedb, header, err := api.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) @@ -441,9 +441,10 @@ func (api *BlockChainAPI) GetProof(ctx context.Context, address common.Address, }, statedb.Error() } -// decodeHash parses a hex-encoded 32-byte hash. The input may optionally -// be prefixed by 0x and can have a byte length up to 32. -func decodeHash(s string) (h common.Hash, inputLength int, err error) { +// decodeStorageKey parses a hex-encoded 32-byte hash. +// For legacy compatibility reasons, we parse these keys leniently, +// with the 0x prefix being optional. +func decodeStorageKey(s string) (h common.Hash, inputLength int, err error) { if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { s = s[2:] } @@ -451,11 +452,11 @@ func decodeHash(s string) (h common.Hash, inputLength int, err error) { s = "0" + s } if len(s) > 64 { - return common.Hash{}, len(s) / 2, errors.New("hex string too long, want at most 32 bytes") + return common.Hash{}, len(s) / 2, errors.New("storage key too long (want at most 32 bytes)") } b, err := hex.DecodeString(s) if err != nil { - return common.Hash{}, 0, errors.New("hex string invalid") + return common.Hash{}, 0, errors.New("invalid hex in storage key") } return common.BytesToHash(b), len(b), nil } @@ -589,9 +590,9 @@ func (api *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Addre if state == nil || err != nil { return nil, err } - key, _, err := decodeHash(hexKey) + key, _, err := decodeStorageKey(hexKey) if err != nil { - return nil, fmt.Errorf("unable to decode storage key: %s", err) + return nil, &invalidParamsError{fmt.Sprintf("%v: %q", err, hexKey)} } res := state.GetState(address, key) return res[:], state.Error() From 28376aea788a95e078ff800989e01b9c43cba3fc Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Fri, 28 Nov 2025 10:02:24 -0300 Subject: [PATCH 157/277] eth/catalyst: check fork timestamps during `engine_getPayload` (#32754) This adds checks into getPayload to ensure the correct version is called for the fork which applies to the payload. --------- Co-authored-by: jsvisa --- eth/catalyst/api.go | 74 ++++++++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index f88acb5cff..12013485e5 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -402,10 +402,12 @@ func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.Transit // GetPayloadV1 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { - if !payloadID.Is(engine.PayloadV1) { - return nil, engine.UnsupportedFork - } - data, err := api.getPayload(payloadID, false) + data, err := api.getPayload( + payloadID, + false, + []engine.PayloadVersion{engine.PayloadV1}, + nil, + ) if err != nil { return nil, err } @@ -414,35 +416,34 @@ func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.Execu // GetPayloadV2 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV2(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - // executionPayload: ExecutionPayloadV1 | ExecutionPayloadV2 where: - // - // - ExecutionPayloadV1 MUST be returned if the payload timestamp is lower - // than the Shanghai timestamp - // - // - ExecutionPayloadV2 MUST be returned if the payload timestamp is greater - // or equal to the Shanghai timestamp - if !payloadID.Is(engine.PayloadV1, engine.PayloadV2) { - return nil, engine.UnsupportedFork - } - return api.getPayload(payloadID, false) + return api.getPayload( + payloadID, + false, + []engine.PayloadVersion{engine.PayloadV1, engine.PayloadV2}, + []forks.Fork{forks.Shanghai}, + ) } // GetPayloadV3 returns a cached payload by id. This endpoint should only // be used for the Cancun fork. func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - if !payloadID.Is(engine.PayloadV3) { - return nil, engine.UnsupportedFork - } - return api.getPayload(payloadID, false) + return api.getPayload( + payloadID, + false, + []engine.PayloadVersion{engine.PayloadV3}, + []forks.Fork{forks.Cancun}, + ) } // GetPayloadV4 returns a cached payload by id. This endpoint should only // be used for the Prague fork. func (api *ConsensusAPI) GetPayloadV4(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - if !payloadID.Is(engine.PayloadV3) { - return nil, engine.UnsupportedFork - } - return api.getPayload(payloadID, false) + return api.getPayload( + payloadID, + false, + []engine.PayloadVersion{engine.PayloadV3}, + []forks.Fork{forks.Prague}, + ) } // GetPayloadV5 returns a cached payload by id. This endpoint should only @@ -451,18 +452,35 @@ func (api *ConsensusAPI) GetPayloadV4(payloadID engine.PayloadID) (*engine.Execu // This method follows the same specification as engine_getPayloadV4 with // changes of returning BlobsBundleV2 with BlobSidecar version 1. func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - if !payloadID.Is(engine.PayloadV3) { - return nil, engine.UnsupportedFork - } - return api.getPayload(payloadID, false) + return api.getPayload( + payloadID, + false, + []engine.PayloadVersion{engine.PayloadV3}, + []forks.Fork{ + forks.Osaka, + forks.BPO1, + forks.BPO2, + forks.BPO3, + forks.BPO4, + forks.BPO5, + }) } -func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*engine.ExecutionPayloadEnvelope, error) { +// getPayload will retreive the specified payload and verify it conforms to the +// endpoint's allowed payload versions and forks. +func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) { log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID) + if !payloadID.Is(versions...) { + return nil, engine.UnsupportedFork + } data := api.localBlocks.get(payloadID, full) if data == nil { return nil, engine.UnknownPayload } + if forks != nil && !api.checkFork(data.ExecutionPayload.Timestamp, forks...) { + return nil, engine.UnsupportedFork + } + return data, nil } From fbbaa3c84923a2557abc8ebcb77b3a6664133625 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 28 Nov 2025 15:06:11 +0100 Subject: [PATCH 158/277] eth/catalyst: fix tests for getPayload change (#33322) Fixes a test/lint regression introduced by #32754 --- eth/catalyst/api.go | 6 ++++-- eth/catalyst/api_test.go | 8 ++++---- eth/catalyst/simulated_beacon.go | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 12013485e5..0386bac556 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -466,11 +466,13 @@ func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.Execu }) } -// getPayload will retreive the specified payload and verify it conforms to the +// getPayload will retrieve the specified payload and verify it conforms to the // endpoint's allowed payload versions and forks. +// +// Note passing nil `forks`, `versions` disables the respective check. func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool, versions []engine.PayloadVersion, forks []forks.Fork) (*engine.ExecutionPayloadEnvelope, error) { log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID) - if !payloadID.Is(versions...) { + if versions != nil && !payloadID.Is(versions...) { return nil, engine.UnsupportedFork } data := api.localBlocks.get(payloadID, full) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index a29fee1a06..2284a33453 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -203,7 +203,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV1, }).Id() - execData, err := api.getPayload(payloadID, true) + execData, err := api.getPayload(payloadID, true, nil, nil) if err != nil { t.Fatalf("error getting payload, err=%v", err) } @@ -636,7 +636,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) { t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status) } // give the payload some time to be built - if payload, err = api.getPayload(*resp.PayloadID, true); err != nil { + if payload, err = api.getPayload(*resp.PayloadID, true, nil, nil); err != nil { t.Fatalf("can't get payload: %v", err) } if len(payload.ExecutionPayload.Transactions) > 0 { @@ -1219,7 +1219,7 @@ func TestNilWithdrawals(t *testing.T) { Random: test.blockParams.Random, Version: payloadVersion, }).Id() - execData, err := api.GetPayloadV2(payloadID) + execData, err := api.getPayload(payloadID, false, nil, nil) if err != nil { t.Fatalf("error getting payload, err=%v", err) } @@ -1674,7 +1674,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) { BeaconRoot: blockParams.BeaconRoot, Version: engine.PayloadV3, }).Id() - envelope, err := api.getPayload(payloadID, true) + envelope, err := api.getPayload(payloadID, true, nil, nil) if err != nil { t.Fatalf("error getting payload, err=%v", err) } diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index d9f01240a7..92f9798e71 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -214,7 +214,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u return nil } - envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true) + envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true, nil, nil) if err != nil { return err } From f12f0ec0cda9f9a1a57c78f8a302815afafb2adf Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Fri, 28 Nov 2025 11:24:21 -0300 Subject: [PATCH 159/277] cmd/utils: allow --networkid to override other config options (#32999) Recently in #31630 we removed support for overriding the network id in preset networks. While this feature is niche, it is useful for shadow forks. This PR proposes we add the functionality back, but in a simpler way. Instead of checking whether the flag is set in each branch of the network switch statement, simply apply the network flag after the switch statement is complete. This retains the following behavior: 1. Auto network id based on chain id still works, because `IsSet` only returns true if the flag is _actually_ set. Not if it just has a default set. 2. The preset networks will set their network id directly and only if the network id flag is set is it overridden. This, combined with the override genesis flag is what allows the shadow forks. 3. Setting the network id to the same network id that the preset _would have_ set causes no issues and simply emits the `WARN` that the flag is being set explicitly. I don't think people explicitly set the network id flag often. ``` WARN [10-22|09:36:15.052] Setting network id with flag id=10 ``` --------- Co-authored-by: Felix Lange --- cmd/geth/consolecmd_test.go | 3 ++- cmd/utils/flags.go | 19 ++++++++++++------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 4e1f6340a0..871e8c175f 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -39,8 +39,9 @@ const ( // child g gets a temporary data directory. func runMinimalGeth(t *testing.T, args ...string) *testgeth { // --holesky to make the 'writing genesis to disk' faster (no accounts) + // --networkid=1337 to avoid cache bump // --syncmode=full to avoid allocating fast sync bloom - allArgs := []string{"--holesky", "--authrpc.port", "0", "--syncmode=full", "--port", "0", + allArgs := []string{"--holesky", "--networkid", "1337", "--authrpc.port", "0", "--syncmode=full", "--port", "0", "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64", "--datadir.minfreedisk", "0"} return runGeth(t, append(allArgs, args...)...) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9f69581754..49d8677ca2 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -137,7 +137,7 @@ var ( } NetworkIdFlag = &cli.Uint64Flag{ Name: "networkid", - Usage: "Explicitly set network id (integer)(For testnets: use --sepolia, --holesky, --hoodi instead)", + Usage: "Explicitly set network ID (integer)(For testnets: use --sepolia, --holesky, --hoodi instead)", Value: ethconfig.Defaults.NetworkId, Category: flags.EthCategory, } @@ -1615,8 +1615,8 @@ func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { - // Avoid conflicting network flags, don't allow network id override on preset networks - flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, NetworkIdFlag, OverrideGenesisFlag) + // Avoid conflicting network flags + flags.CheckExclusive(ctx, MainnetFlag, DeveloperFlag, SepoliaFlag, HoleskyFlag, HoodiFlag, OverrideGenesisFlag) flags.CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer // Set configurations from CLI flags @@ -1663,9 +1663,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } } - if ctx.IsSet(NetworkIdFlag.Name) { - cfg.NetworkId = ctx.Uint64(NetworkIdFlag.Name) - } if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheDatabaseFlag.Name) { cfg.DatabaseCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100 } @@ -1915,10 +1912,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } cfg.Genesis = genesis default: - if cfg.NetworkId == 1 { + if ctx.Uint64(NetworkIdFlag.Name) == 1 { SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) } } + if ctx.IsSet(NetworkIdFlag.Name) { + // Typically it's best to automatically set the network ID to the chainID, + // by not passing the --networkid flag at all. Emit a warning when set + // explicitly in case overriding the network ID is not the user's intention. + id := ctx.Uint64(NetworkIdFlag.Name) + log.Warn("Setting network ID with command-line flag", "id", id) + cfg.NetworkId = id + } // Set any dangling config values if ctx.String(CryptoKZGFlag.Name) != "gokzg" && ctx.String(CryptoKZGFlag.Name) != "ckzg" { Fatalf("--%s flag must be 'gokzg' or 'ckzg'", CryptoKZGFlag.Name) From 5d512083347d7bdd1c9d92416dfd1d20dfc01544 Mon Sep 17 00:00:00 2001 From: Sina M <1591639+s1na@users.noreply.github.com> Date: Fri, 28 Nov 2025 15:32:40 +0100 Subject: [PATCH 160/277] internal/ethapi: change default tx type to 0x2 (#33058) We still default to legacy txes for methods like eth_sendTransaction, eth_signTransaction. We can default to 0x2 and if someone would like to stay on legacy they can do so by setting the `gasPrice` field. cc @deffrian --- eth/tracers/api.go | 2 +- internal/ethapi/api.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 5cfbc24b8e..5f2f16627a 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -986,7 +986,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc } var ( msg = args.ToMessage(blockContext.BaseFee, true) - tx = args.ToTransaction(types.LegacyTxType) + tx = args.ToTransaction(types.DynamicFeeTxType) traceConfig *TraceConfig ) // Lower the basefee to 0 to avoid breaking EVM diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 997ed47926..eb437201d5 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1607,7 +1607,7 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction return common.Hash{}, err } // Assemble the transaction and sign with the wallet - tx := args.ToTransaction(types.LegacyTxType) + tx := args.ToTransaction(types.DynamicFeeTxType) signed, err := wallet.SignTx(account, tx, api.b.ChainConfig().ChainID) if err != nil { @@ -1629,7 +1629,7 @@ func (api *TransactionAPI) FillTransaction(ctx context.Context, args Transaction return nil, err } // Assemble the transaction and obtain rlp - tx := args.ToTransaction(types.LegacyTxType) + tx := args.ToTransaction(types.DynamicFeeTxType) data, err := tx.MarshalBinary() if err != nil { return nil, err @@ -1825,7 +1825,7 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction return nil, err } // Before actually sign the transaction, ensure the transaction fee is reasonable. - tx := args.ToTransaction(types.LegacyTxType) + tx := args.ToTransaction(types.DynamicFeeTxType) if err := checkTxFee(tx.GasPrice(), tx.Gas(), api.b.RPCTxFeeCap()); err != nil { return nil, err } @@ -1881,7 +1881,7 @@ func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, if err := sendArgs.setDefaults(ctx, api.b, sidecarConfig{}); err != nil { return common.Hash{}, err } - matchTx := sendArgs.ToTransaction(types.LegacyTxType) + matchTx := sendArgs.ToTransaction(types.DynamicFeeTxType) // Before replacing the old transaction, ensure the _new_ transaction fee is reasonable. price := matchTx.GasPrice() @@ -1911,7 +1911,7 @@ func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, if gasLimit != nil && *gasLimit != 0 { sendArgs.Gas = gasLimit } - signedTx, err := api.sign(sendArgs.from(), sendArgs.ToTransaction(types.LegacyTxType)) + signedTx, err := api.sign(sendArgs.from(), sendArgs.ToTransaction(types.DynamicFeeTxType)) if err != nil { return common.Hash{}, err } From cd3f9b24e964a2dfca036387e51452798a59dfea Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 1 Dec 2025 10:12:59 +0800 Subject: [PATCH 161/277] cmd/utils: fix disabling cache preimages through config file (#33330) --- cmd/utils/flags.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 49d8677ca2..996cb276ee 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1683,8 +1683,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(CacheNoPrefetchFlag.Name) { cfg.NoPrefetch = ctx.Bool(CacheNoPrefetchFlag.Name) } - // Read the value from the flag no matter if it's set or not. - cfg.Preimages = ctx.Bool(CachePreimagesFlag.Name) + if ctx.IsSet(CachePreimagesFlag.Name) { + cfg.Preimages = ctx.Bool(CachePreimagesFlag.Name) + } if cfg.NoPruning && !cfg.Preimages { cfg.Preimages = true log.Info("Enabling recording of key preimages since archive mode is used") From 6f2cbb7a27ba7e62b0bdb2090755ef0d271714be Mon Sep 17 00:00:00 2001 From: Forostovec Date: Mon, 1 Dec 2025 04:19:21 +0200 Subject: [PATCH 162/277] triedb/pathdb: allow single-element history ranges (#33329) --- triedb/pathdb/history_inspect.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index a839a184ca..74b8bb8df2 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -55,7 +55,7 @@ func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint last = end } // Make sure the range is valid - if first >= last { + if first > last { return 0, 0, fmt.Errorf("range is invalid, first: %d, last: %d", first, last) } return first, last, nil From da3822dcec13f30de5ae0f0b71a4bf61f0e2bac7 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Mon, 1 Dec 2025 14:50:03 +0100 Subject: [PATCH 163/277] internal/debug: fix log memory limit format (#33336) --- internal/debug/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/debug/api.go b/internal/debug/api.go index 1bac36e908..5a2781cc77 100644 --- a/internal/debug/api.go +++ b/internal/debug/api.go @@ -252,7 +252,7 @@ func (*HandlerT) SetGCPercent(v int) int { // - Geth also allocates memory off-heap, particularly for fastCache and Pebble, // which can be non-trivial (a few gigabytes by default). func (*HandlerT) SetMemoryLimit(limit int64) int64 { - log.Info("Setting memory limit", "size", common.PrettyDuration(limit)) + log.Info("Setting memory limit", "size", common.StorageSize(limit)) return debug.SetMemoryLimit(limit) } From 042c47ce1a089ab15b7b0eed58e9d0b629b6dde6 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 2 Dec 2025 21:43:51 +0800 Subject: [PATCH 164/277] core: log detailed statistics for slow block (#32812) This PR introduces a new debug feature, logging the slow blocks with detailed performance statistics, such as state read, EVM execution and so on. Notably, the detailed performance statistics of slow blocks won't be logged during the sync to not overwhelm users. Specifically, the statistics are only logged if there is a single block processed. Example output ``` ########## SLOW BLOCK ######### Block: 23537063 (0xa7f878611c2dd27f245fc41107d12ebcf06b4e289f1d6acf44d49a169554ee09) txs: 248, mgasps: 202.99 EVM execution: 63.295ms Validation: 1.130ms Account read: 6.634ms(648) Storage read: 17.391ms(1434) State hash: 6.722ms DB commit: 3.260ms Block write: 1.954ms Total: 99.094ms State read cache: account (hit: 622, miss: 26), storage (hit: 1325, miss: 109) ############################## ``` --- cmd/geth/chaincmd.go | 1 + cmd/geth/main.go | 1 + cmd/utils/flags.go | 13 ++++ core/blockchain.go | 143 +++++++++++++++++++++--------------- core/blockchain_stats.go | 138 ++++++++++++++++++++++++++++++++++ core/blockchain_test.go | 4 +- core/state/reader.go | 53 +++++++++---- core/state/statedb.go | 9 ++- eth/api_debug.go | 6 -- eth/backend.go | 1 + eth/ethconfig/config.go | 5 ++ eth/ethconfig/gen_config.go | 6 ++ 12 files changed, 293 insertions(+), 87 deletions(-) create mode 100644 core/blockchain_stats.go diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index c5145bbfb7..e535d7d892 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -96,6 +96,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.CacheNoPrefetchFlag, utils.CachePreimagesFlag, utils.NoCompactionFlag, + utils.LogSlowBlockFlag, utils.MetricsEnabledFlag, utils.MetricsEnabledExpensiveFlag, utils.MetricsHTTPFlag, diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 851ae1ce0b..b294ee593e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -157,6 +157,7 @@ var ( utils.BeaconGenesisTimeFlag, utils.BeaconCheckpointFlag, utils.BeaconCheckpointFileFlag, + utils.LogSlowBlockFlag, }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 996cb276ee..0d53716f6c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -672,6 +672,12 @@ var ( Usage: "Disables db compaction after import", Category: flags.LoggingCategory, } + LogSlowBlockFlag = &cli.DurationFlag{ + Name: "debug.logslowblock", + Usage: "Block execution time threshold beyond which detailed statistics will be logged (0 means disable)", + Value: ethconfig.Defaults.SlowBlockThreshold, + Category: flags.LoggingCategory, + } // MISC settings SyncTargetFlag = &cli.StringFlag{ @@ -1720,6 +1726,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(LogNoHistoryFlag.Name) { cfg.LogNoHistory = true } + if ctx.IsSet(LogSlowBlockFlag.Name) { + cfg.SlowBlockThreshold = ctx.Duration(LogSlowBlockFlag.Name) + } if ctx.IsSet(LogExportCheckpointsFlag.Name) { cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name) } @@ -2299,6 +2308,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh Preimages: ctx.Bool(CachePreimagesFlag.Name), StateScheme: scheme, StateHistory: ctx.Uint64(StateHistoryFlag.Name), + // Disable transaction indexing/unindexing. TxLookupLimit: -1, @@ -2310,6 +2320,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh // Enable state size tracking if enabled StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name), + + // Configure the slow block statistic logger + SlowBlockThreshold: ctx.Duration(LogSlowBlockFlag.Name), } if options.ArchiveMode && !options.Preimages { options.Preimages = true diff --git a/core/blockchain.go b/core/blockchain.go index b7acd12aca..7fe39e2b65 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -198,6 +198,10 @@ type BlockChainConfig struct { // StateSizeTracking indicates whether the state size tracking is enabled. StateSizeTracking bool + + // SlowBlockThreshold is the block execution time threshold beyond which + // detailed statistics will be logged. + SlowBlockThreshold time.Duration } // DefaultConfig returns the default config. @@ -337,7 +341,8 @@ type BlockChain struct { logger *tracing.Hooks stateSizer *state.SizeTracker // State size tracking - lastForkReadyAlert time.Time // Last time there was a fork readiness print out + lastForkReadyAlert time.Time // Last time there was a fork readiness print out + slowBlockThreshold time.Duration // Block execution time threshold beyond which detailed statistics will be logged } // NewBlockChain returns a fully initialised block chain using information @@ -372,19 +377,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, log.Info("") bc := &BlockChain{ - chainConfig: chainConfig, - cfg: cfg, - db: db, - triedb: triedb, - triegc: prque.New[int64, common.Hash](nil), - chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), - bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), - receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), - engine: engine, - logger: cfg.VmConfig.Tracer, + chainConfig: chainConfig, + cfg: cfg, + db: db, + triedb: triedb, + triegc: prque.New[int64, common.Hash](nil), + chainmu: syncx.NewClosableMutex(), + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), + engine: engine, + logger: cfg.VmConfig.Tracer, + slowBlockThreshold: cfg.SlowBlockThreshold, } bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) if err != nil { @@ -1847,7 +1853,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness // still need re-execution to generate snapshots that are missing case err != nil && !errors.Is(err, ErrKnownBlock): stats.ignored += len(it.chain) - bc.reportBlock(block, nil, err) + bc.reportBadBlock(block, nil, err) return nil, it.index, err } // Track the singleton witness from this chain insertion (if any) @@ -1915,6 +1921,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness if err != nil { return nil, it.index, err } + res.stats.reportMetrics() + + // Log slow block only if a single block is inserted (usually after the + // initial sync) to not overwhelm the users. + if len(chain) == 1 { + res.stats.logSlow(block, bc.slowBlockThreshold) + } + // Report the import stats before returning the various results stats.processed++ stats.usedGas += res.usedGas @@ -1975,15 +1989,20 @@ type blockProcessingResult struct { procTime time.Duration status WriteStatus witness *stateless.Witness + stats *ExecuteStats } func (bpr *blockProcessingResult) Witness() *stateless.Witness { return bpr.witness } +func (bpr *blockProcessingResult) Stats() *ExecuteStats { + return bpr.stats +} + // ProcessBlock executes and validates the given block. If there was no error // it writes the block and associated state to database. -func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) { +func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (result *blockProcessingResult, blockEndErr error) { var ( err error startTime = time.Now() @@ -2017,16 +2036,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Upload the statistics of reader at the end defer func() { - stats := prefetch.GetStats() - accountCacheHitPrefetchMeter.Mark(stats.AccountHit) - accountCacheMissPrefetchMeter.Mark(stats.AccountMiss) - storageCacheHitPrefetchMeter.Mark(stats.StorageHit) - storageCacheMissPrefetchMeter.Mark(stats.StorageMiss) - stats = process.GetStats() - accountCacheHitMeter.Mark(stats.AccountHit) - accountCacheMissMeter.Mark(stats.AccountMiss) - storageCacheHitMeter.Mark(stats.StorageHit) - storageCacheMissMeter.Mark(stats.StorageMiss) + pStat := prefetch.GetStats() + accountCacheHitPrefetchMeter.Mark(pStat.AccountCacheHit) + accountCacheMissPrefetchMeter.Mark(pStat.AccountCacheMiss) + storageCacheHitPrefetchMeter.Mark(pStat.StorageCacheHit) + storageCacheMissPrefetchMeter.Mark(pStat.StorageCacheMiss) + + rStat := process.GetStats() + accountCacheHitMeter.Mark(rStat.AccountCacheHit) + accountCacheMissMeter.Mark(rStat.AccountCacheMiss) + storageCacheHitMeter.Mark(rStat.StorageCacheHit) + storageCacheMissMeter.Mark(rStat.StorageCacheMiss) + + if result != nil { + result.stats.StatePrefetchCacheStats = pStat + result.stats.StateReadCacheStats = rStat + } }() go func(start time.Time, throwaway *state.StateDB, block *types.Block) { @@ -2083,14 +2108,14 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s pstart := time.Now() res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig) if err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } ptime := time.Since(pstart) vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, res, false); err != nil { - bc.reportBlock(block, res, err) + bc.reportBadBlock(block, res, err) return nil, err } vtime := time.Since(vstart) @@ -2124,26 +2149,28 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } } - xvtime := time.Since(xvstart) - proctime := time.Since(startTime) // processing + validation + cross validation - + var ( + xvtime = time.Since(xvstart) + proctime = time.Since(startTime) // processing + validation + cross validation + stats = &ExecuteStats{} + ) // Update the metrics touched during block processing and validation - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) - if statedb.AccountLoaded != 0 { - accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded)) - } - if statedb.StorageLoaded != 0 { - storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded)) - } - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) - accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) - triehash := statedb.AccountHashes // The time spent on tries hashing - trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update - blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing - blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation - blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation + stats.AccountReads = statedb.AccountReads // Account reads are complete(in processing) + stats.StorageReads = statedb.StorageReads // Storage reads are complete(in processing) + stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation) + stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation) + stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation) + + stats.AccountLoaded = statedb.AccountLoaded + stats.AccountUpdated = statedb.AccountUpdated + stats.AccountDeleted = statedb.AccountDeleted + stats.StorageLoaded = statedb.StorageLoaded + stats.StorageUpdated = int(statedb.StorageUpdated.Load()) + stats.StorageDeleted = int(statedb.StorageDeleted.Load()) + + stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing + stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation + stats.CrossValidation = xvtime // The time spent on stateless cross validation // Write the block to the chain and get the status. var ( @@ -2165,24 +2192,22 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Update the metrics touched during block commit - accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them - storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them - snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them + stats.AccountCommits = statedb.AccountCommits // Account commits are complete, we can mark them + stats.StorageCommits = statedb.StorageCommits // Storage commits are complete, we can mark them + stats.SnapshotCommit = statedb.SnapshotCommits // Snapshot commits are complete, we can mark them + stats.TrieDBCommit = statedb.TrieDBCommits // Trie database commits are complete, we can mark them + stats.BlockWrite = time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits - blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) elapsed := time.Since(startTime) + 1 // prevent zero division - blockInsertTimer.Update(elapsed) - - // TODO(rjl493456442) generalize the ResettingTimer - mgasps := float64(res.GasUsed) * 1000 / float64(elapsed) - chainMgaspsMeter.Update(time.Duration(mgasps)) + stats.TotalTime = elapsed + stats.MgasPerSecond = float64(res.GasUsed) * 1000 / float64(elapsed) return &blockProcessingResult{ usedGas: res.GasUsed, procTime: proctime, status: status, witness: witness, + stats: stats, }, nil } @@ -2667,8 +2692,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } -// reportBlock logs a bad block error. -func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) { +// reportBadBlock logs a bad block error. +func (bc *BlockChain) reportBadBlock(block *types.Block, res *ProcessResult, err error) { var receipts types.Receipts if res != nil { receipts = res.Receipts diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go new file mode 100644 index 0000000000..0cebebc20a --- /dev/null +++ b/core/blockchain_stats.go @@ -0,0 +1,138 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "fmt" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// ExecuteStats includes all the statistics of a block execution in details. +type ExecuteStats struct { + // State read times + AccountReads time.Duration // Time spent on the account reads + StorageReads time.Duration // Time spent on the storage reads + AccountHashes time.Duration // Time spent on the account trie hash + AccountUpdates time.Duration // Time spent on the account trie update + AccountCommits time.Duration // Time spent on the account trie commit + StorageUpdates time.Duration // Time spent on the storage trie update + StorageCommits time.Duration // Time spent on the storage trie commit + + AccountLoaded int // Number of accounts loaded + AccountUpdated int // Number of accounts updated + AccountDeleted int // Number of accounts deleted + StorageLoaded int // Number of storage slots loaded + StorageUpdated int // Number of storage slots updated + StorageDeleted int // Number of storage slots deleted + + Execution time.Duration // Time spent on the EVM execution + Validation time.Duration // Time spent on the block validation + CrossValidation time.Duration // Optional, time spent on the block cross validation + SnapshotCommit time.Duration // Time spent on snapshot commit + TrieDBCommit time.Duration // Time spent on database commit + BlockWrite time.Duration // Time spent on block write + TotalTime time.Duration // The total time spent on block execution + MgasPerSecond float64 // The million gas processed per second + + // Cache hit rates + StateReadCacheStats state.ReaderStats + StatePrefetchCacheStats state.ReaderStats +} + +// reportMetrics uploads execution statistics to the metrics system. +func (s *ExecuteStats) reportMetrics() { + accountReadTimer.Update(s.AccountReads) // Account reads are complete(in processing) + storageReadTimer.Update(s.StorageReads) // Storage reads are complete(in processing) + if s.AccountLoaded != 0 { + accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded)) + } + if s.StorageLoaded != 0 { + storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded)) + } + + accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation) + storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation) + accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation) + + accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them + storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them + + blockExecutionTimer.Update(s.Execution) // The time spent on EVM processing + blockValidationTimer.Update(s.Validation) // The time spent on block validation + blockCrossValidationTimer.Update(s.CrossValidation) // The time spent on stateless cross validation + snapshotCommitTimer.Update(s.SnapshotCommit) // Snapshot commits are complete, we can mark them + triedbCommitTimer.Update(s.TrieDBCommit) // Trie database commits are complete, we can mark them + blockWriteTimer.Update(s.BlockWrite) // The time spent on block write + blockInsertTimer.Update(s.TotalTime) // The total time spent on block execution + chainMgaspsMeter.Update(time.Duration(s.MgasPerSecond)) // TODO(rjl493456442) generalize the ResettingTimer + + // Cache hit rates + accountCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheHit) + accountCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.AccountCacheMiss) + storageCacheHitPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheHit) + storageCacheMissPrefetchMeter.Mark(s.StatePrefetchCacheStats.StorageCacheMiss) + + accountCacheHitMeter.Mark(s.StateReadCacheStats.AccountCacheHit) + accountCacheMissMeter.Mark(s.StateReadCacheStats.AccountCacheMiss) + storageCacheHitMeter.Mark(s.StateReadCacheStats.StorageCacheHit) + storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss) +} + +// logSlow prints the detailed execution statistics if the block is regarded as slow. +func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Duration) { + if slowBlockThreshold == 0 { + return + } + if s.TotalTime < slowBlockThreshold { + return + } + msg := fmt.Sprintf(` +########## SLOW BLOCK ######### +Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v + +EVM execution: %v +Validation: %v +Account read: %v(%d) +Storage read: %v(%d) +Account hash: %v +Storage hash: %v +DB commit: %v +Block write: %v + +%s +############################## +`, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime), + common.PrettyDuration(s.Execution), common.PrettyDuration(s.Validation+s.CrossValidation), + common.PrettyDuration(s.AccountReads), s.AccountLoaded, + common.PrettyDuration(s.StorageReads), s.StorageLoaded, + common.PrettyDuration(s.AccountHashes+s.AccountCommits+s.AccountUpdates), + common.PrettyDuration(s.StorageCommits+s.StorageUpdates), + common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), common.PrettyDuration(s.BlockWrite), + s.StateReadCacheStats) + for _, line := range strings.Split(msg, "\n") { + if line == "" { + continue + } + log.Info(line) + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index b749798f9c..3e3053d9bf 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -162,12 +162,12 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { } res, err := blockchain.processor.Process(block, statedb, vm.Config{}) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } err = blockchain.validator.ValidateState(block, statedb, res, false) if err != nil { - blockchain.reportBlock(block, res, err) + blockchain.reportBadBlock(block, res, err) return err } diff --git a/core/state/reader.go b/core/state/reader.go index 93083c8ae2..21e76c5b66 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -18,6 +18,7 @@ package state import ( "errors" + "fmt" "sync" "sync/atomic" @@ -88,10 +89,29 @@ type Reader interface { // ReaderStats wraps the statistics of reader. type ReaderStats struct { - AccountHit int64 - AccountMiss int64 - StorageHit int64 - StorageMiss int64 + // Cache stats + AccountCacheHit int64 + AccountCacheMiss int64 + StorageCacheHit int64 + StorageCacheMiss int64 +} + +// String implements fmt.Stringer, returning string format statistics. +func (s ReaderStats) String() string { + var ( + accountCacheHitRate float64 + storageCacheHitRate float64 + ) + if s.AccountCacheHit > 0 { + accountCacheHitRate = float64(s.AccountCacheHit) / float64(s.AccountCacheHit+s.AccountCacheMiss) * 100 + } + if s.StorageCacheHit > 0 { + storageCacheHitRate = float64(s.StorageCacheHit) / float64(s.StorageCacheHit+s.StorageCacheMiss) * 100 + } + msg := fmt.Sprintf("Reader statistics\n") + msg += fmt.Sprintf("account: hit: %d, miss: %d, rate: %.2f\n", s.AccountCacheHit, s.AccountCacheMiss, accountCacheHitRate) + msg += fmt.Sprintf("storage: hit: %d, miss: %d, rate: %.2f\n", s.StorageCacheHit, s.StorageCacheMiss, storageCacheHitRate) + return msg } // ReaderWithStats wraps the additional method to retrieve the reader statistics from. @@ -544,10 +564,11 @@ func (r *readerWithCache) Storage(addr common.Address, slot common.Hash) (common type readerWithCacheStats struct { *readerWithCache - accountHit atomic.Int64 - accountMiss atomic.Int64 - storageHit atomic.Int64 - storageMiss atomic.Int64 + + accountCacheHit atomic.Int64 + accountCacheMiss atomic.Int64 + storageCacheHit atomic.Int64 + storageCacheMiss atomic.Int64 } // newReaderWithCacheStats constructs the reader with additional statistics tracked. @@ -567,9 +588,9 @@ func (r *readerWithCacheStats) Account(addr common.Address) (*types.StateAccount return nil, err } if incache { - r.accountHit.Add(1) + r.accountCacheHit.Add(1) } else { - r.accountMiss.Add(1) + r.accountCacheMiss.Add(1) } return account, nil } @@ -585,9 +606,9 @@ func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (c return common.Hash{}, err } if incache { - r.storageHit.Add(1) + r.storageCacheHit.Add(1) } else { - r.storageMiss.Add(1) + r.storageCacheMiss.Add(1) } return value, nil } @@ -595,9 +616,9 @@ func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (c // GetStats implements ReaderWithStats, returning the statistics of state reader. func (r *readerWithCacheStats) GetStats() ReaderStats { return ReaderStats{ - AccountHit: r.accountHit.Load(), - AccountMiss: r.accountMiss.Load(), - StorageHit: r.storageHit.Load(), - StorageMiss: r.storageMiss.Load(), + AccountCacheHit: r.accountCacheHit.Load(), + AccountCacheMiss: r.accountCacheMiss.Load(), + StorageCacheHit: r.storageCacheHit.Load(), + StorageCacheMiss: r.storageCacheMiss.Load(), } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 364bc40850..73d4af7dcf 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -141,10 +141,11 @@ type StateDB struct { witnessStats *stateless.WitnessStats // Measurements gathered during execution for debugging purposes - AccountReads time.Duration - AccountHashes time.Duration - AccountUpdates time.Duration - AccountCommits time.Duration + AccountReads time.Duration + AccountHashes time.Duration + AccountUpdates time.Duration + AccountCommits time.Duration + StorageReads time.Duration StorageUpdates time.Duration StorageCommits time.Duration diff --git a/eth/api_debug.go b/eth/api_debug.go index 892e103213..db1b842e90 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -499,17 +499,14 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness if err != nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v not found", bn) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } @@ -519,16 +516,13 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit if block == nil { return &stateless.ExtWitness{}, fmt.Errorf("block hash %x not found", hash) } - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) if parent == nil { return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash) } - result, err := bc.ProcessBlock(parent.Root, block, false, true) if err != nil { return nil, err } - return result.Witness().ToExtWitness(), nil } diff --git a/eth/backend.go b/eth/backend.go index 8509561822..95ae9d4a41 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -244,6 +244,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // - DATADIR/triedb/verkle.journal TrieJournalDirectory: stack.ResolvePath("triedb"), StateSizeTracking: config.EnableStateSizeTracking, + SlowBlockThreshold: config.SlowBlockThreshold, } ) if config.VMTrace != "" { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index c4a0956b3b..d6ed2c2576 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -72,6 +72,7 @@ var Defaults = Config{ RPCTxFeeCap: 1, // 1 ether TxSyncDefaultTimeout: 20 * time.Second, TxSyncMaxTimeout: 1 * time.Minute, + SlowBlockThreshold: time.Second * 2, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -118,6 +119,10 @@ type Config struct { // presence of these blocks for every new peer connection. RequiredBlocks map[uint64]common.Hash `toml:"-"` + // SlowBlockThreshold is the block execution speed threshold (Mgas/s) + // below which detailed statistics are logged. + SlowBlockThreshold time.Duration `toml:",omitempty"` + // Database options SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 6f18dc34c5..97c5db3ecd 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) { StateHistory uint64 `toml:",omitempty"` StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold time.Duration `toml:",omitempty"` SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int @@ -82,6 +83,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.StateHistory = c.StateHistory enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks + enc.SlowBlockThreshold = c.SlowBlockThreshold enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache @@ -135,6 +137,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { StateHistory *uint64 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` + SlowBlockThreshold *time.Duration `toml:",omitempty"` SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int @@ -219,6 +222,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks } + if dec.SlowBlockThreshold != nil { + c.SlowBlockThreshold = *dec.SlowBlockThreshold + } if dec.SkipBcVersionCheck != nil { c.SkipBcVersionCheck = *dec.SkipBcVersionCheck } From be94ea1c4028f82b90de5f1e08a40f0b0a73d5da Mon Sep 17 00:00:00 2001 From: cui Date: Tue, 2 Dec 2025 23:11:56 +0800 Subject: [PATCH 165/277] cmd/utils: fix handling of boolean flags when they are set to false (#33338) geth --nodiscover=false may result in ctx.IsSet(NoDiscoverFlag.Name) is true, but cfg. NoDiscovery should be false, not true. --- cmd/utils/flags.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0d53716f6c..b73fa80b17 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1374,7 +1374,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name) } if ctx.IsSet(NoDiscoverFlag.Name) { - cfg.NoDiscovery = true + cfg.NoDiscovery = ctx.Bool(NoDiscoverFlag.Name) } flags.CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag) @@ -1724,7 +1724,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.LogHistory = ctx.Uint64(LogHistoryFlag.Name) } if ctx.IsSet(LogNoHistoryFlag.Name) { - cfg.LogNoHistory = true + cfg.LogNoHistory = ctx.Bool(LogNoHistoryFlag.Name) } if ctx.IsSet(LogSlowBlockFlag.Name) { cfg.SlowBlockThreshold = ctx.Duration(LogSlowBlockFlag.Name) From 212967d0e18e456a532c9ba8c8883d8cb1f3f895 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 2 Dec 2025 23:19:20 +0800 Subject: [PATCH 166/277] ethdb/pebble: add configuration changes (#33315) This introduces two main changes to Pebble's configuration: (a) Remove the Bloom filter at Level 6 The Bloom filter is never used at the bottom-most level, so keeping it serves no purpose. Removing it saves storage without affecting read performance. (b) Re-enable read-sampling compaction Read-sampling compaction was previously disabled in the hash-based scheme because all data was identified by hashes and basically no data overwrite. Read sampling compaction makes no sense. After switching to the path-based scheme, data overwrites are much more common, making read-sampling compaction beneficial and reasonable to re-enable. --- ethdb/pebble/pebble.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 8abe7d4bc7..94311fc12b 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -263,7 +263,9 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, {TargetFileSize: 32 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, {TargetFileSize: 64 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, - {TargetFileSize: 128 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + + // Pebble doesn't use the Bloom filter at level6 for read efficiency. + {TargetFileSize: 128 * 1024 * 1024}, }, ReadOnly: readonly, EventListener: &pebble.EventListener{ @@ -294,10 +296,6 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( // debt will be less than 1GB, but with more frequent compactions scheduled. L0CompactionThreshold: 2, } - // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 - // for more details. - opt.Experimental.ReadSamplingMultiplier = -1 - // Open the db and recover any potential corruptions innerDB, err := pebble.Open(file, opt) if err != nil { From d3679c2f2e9d7f6d354375bfad0165016e6d059c Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 2 Dec 2025 23:28:51 +0800 Subject: [PATCH 167/277] core/state: export statistics to metrics (#33254) This PR exposes the state size statistics to the metrics, making them easier to demonstrate. Note that the contract code included in the metrics is not de-duplicated, so the reported size will appear larger than the actual storage footprint. --- core/state/state_sizer.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go index 2066c94845..636b158da6 100644 --- a/core/state/state_sizer.go +++ b/core/state/state_sizer.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/triedb" "golang.org/x/sync/errgroup" ) @@ -48,6 +49,21 @@ var ( codeKeySize = int64(len(rawdb.CodePrefix) + common.HashLength) ) +// State size metrics +var ( + stateSizeChainHeightGauge = metrics.NewRegisteredGauge("state/height", nil) + stateSizeAccountsCountGauge = metrics.NewRegisteredGauge("state/accounts/count", nil) + stateSizeAccountsBytesGauge = metrics.NewRegisteredGauge("state/accounts/bytes", nil) + stateSizeStoragesCountGauge = metrics.NewRegisteredGauge("state/storages/count", nil) + stateSizeStoragesBytesGauge = metrics.NewRegisteredGauge("state/storages/bytes", nil) + stateSizeAccountTrieNodesCountGauge = metrics.NewRegisteredGauge("state/trienodes/account/count", nil) + stateSizeAccountTrieNodesBytesGauge = metrics.NewRegisteredGauge("state/trienodes/account/bytes", nil) + stateSizeStorageTrieNodesCountGauge = metrics.NewRegisteredGauge("state/trienodes/storage/count", nil) + stateSizeStorageTrieNodesBytesGauge = metrics.NewRegisteredGauge("state/trienodes/storage/bytes", nil) + stateSizeContractsCountGauge = metrics.NewRegisteredGauge("state/contracts/count", nil) + stateSizeContractsBytesGauge = metrics.NewRegisteredGauge("state/contracts/bytes", nil) +) + // SizeStats represents either the current state size statistics or the size // differences resulting from a state transition. type SizeStats struct { @@ -76,6 +92,20 @@ func (s SizeStats) String() string { ) } +func (s SizeStats) publish() { + stateSizeChainHeightGauge.Update(int64(s.BlockNumber)) + stateSizeAccountsCountGauge.Update(s.Accounts) + stateSizeAccountsBytesGauge.Update(s.AccountBytes) + stateSizeStoragesCountGauge.Update(s.Storages) + stateSizeStoragesBytesGauge.Update(s.StorageBytes) + stateSizeAccountTrieNodesCountGauge.Update(s.AccountTrienodes) + stateSizeAccountTrieNodesBytesGauge.Update(s.AccountTrienodeBytes) + stateSizeStorageTrieNodesCountGauge.Update(s.StorageTrienodes) + stateSizeStorageTrieNodesBytesGauge.Update(s.StorageTrienodeBytes) + stateSizeContractsCountGauge.Update(s.ContractCodes) + stateSizeContractsBytesGauge.Update(s.ContractCodeBytes) +} + // add applies the given state diffs and produces a new version of the statistics. func (s SizeStats) add(diff SizeStats) SizeStats { s.StateRoot = diff.StateRoot @@ -309,6 +339,10 @@ func (t *SizeTracker) run() { stats[u.root] = stat last = u.root + // Publish statistics to metric system + stat.publish() + + // Evict the stale statistics heap.Push(&h, stats[u.root]) for u.blockNumber-h[0].BlockNumber > statEvictThreshold { delete(stats, h[0].StateRoot) From 129c562900b8fe52d6d67df7f19287a494732538 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Wed, 3 Dec 2025 22:35:00 +0100 Subject: [PATCH 168/277] eth/catalyst: benchmark GetBlobsV2 at API level (#33196) This is to benchmark how much the internal parts of GetBlobsV2 take. This is not an RPC-level benchmark, so JSON-RPC overhead is not included. Signed-off-by: Csaba Kiraly --- eth/catalyst/api_test.go | 87 +++++++++++++++++++++++++--------------- 1 file changed, 54 insertions(+), 33 deletions(-) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 2284a33453..a023962b81 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -426,7 +426,7 @@ func TestEth2DeepReorg(t *testing.T) { } // startEthService creates a full node instance for testing. -func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) { +func startEthService(t testing.TB, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) { t.Helper() n, err := node.New(&node.Config{ @@ -1873,7 +1873,7 @@ func makeMultiBlobTx(chainConfig *params.ChainConfig, nonce uint64, blobCount in return types.MustSignNewTx(key, types.LatestSigner(chainConfig), blobtx) } -func newGetBlobEnv(t *testing.T, version byte) (*node.Node, *ConsensusAPI) { +func newGetBlobEnv(t testing.TB, version byte) (*node.Node, *ConsensusAPI) { var ( // Create a database pre-initialize with a genesis block config = *params.MergedTestChainConfig @@ -2045,36 +2045,57 @@ func TestGetBlobsV2(t *testing.T) { }, } for i, suite := range suites { - // Fill the request for retrieving blobs - var ( - vhashes []common.Hash - expect []*engine.BlobAndProofV2 - ) - // fill missing blob - if suite.fillRandom { - vhashes = append(vhashes, testrand.Hash()) - } - for j := suite.start; j < suite.limit; j++ { - vhashes = append(vhashes, testBlobVHashes[j]) - var cellProofs []hexutil.Bytes - for _, proof := range testBlobCellProofs[j] { - cellProofs = append(cellProofs, proof[:]) - } - expect = append(expect, &engine.BlobAndProofV2{ - Blob: testBlobs[j][:], - CellProofs: cellProofs, - }) - } - result, err := api.GetBlobsV2(vhashes) - if err != nil { - t.Errorf("Unexpected error for case %d, %v", i, err) - } - // null is responded if any blob is missing - if suite.fillRandom { - expect = nil - } - if !reflect.DeepEqual(result, expect) { - t.Fatalf("Unexpected result for case %d", i) - } + runGetBlobsV2(t, api, suite.start, suite.limit, suite.fillRandom, fmt.Sprintf("suite=%d", i)) + } +} + +// Benchmark GetBlobsV2 internals +// Note that this is not an RPC-level benchmark, so JSON-RPC overhead is not included. +func BenchmarkGetBlobsV2(b *testing.B) { + n, api := newGetBlobEnv(b, 1) + defer n.Close() + + // for blobs in [1, 2, 4, 6], print string and run benchmark + for _, blobs := range []int{1, 2, 4, 6} { + name := fmt.Sprintf("blobs=%d", blobs) + b.Run(name, func(b *testing.B) { + for b.Loop() { + runGetBlobsV2(b, api, 0, blobs, false, name) + } + }) + } +} + +func runGetBlobsV2(t testing.TB, api *ConsensusAPI, start, limit int, fillRandom bool, name string) { + // Fill the request for retrieving blobs + var ( + vhashes []common.Hash + expect []*engine.BlobAndProofV2 + ) + // fill missing blob + if fillRandom { + vhashes = append(vhashes, testrand.Hash()) + } + for j := start; j < limit; j++ { + vhashes = append(vhashes, testBlobVHashes[j]) + var cellProofs []hexutil.Bytes + for _, proof := range testBlobCellProofs[j] { + cellProofs = append(cellProofs, proof[:]) + } + expect = append(expect, &engine.BlobAndProofV2{ + Blob: testBlobs[j][:], + CellProofs: cellProofs, + }) + } + result, err := api.GetBlobsV2(vhashes) + if err != nil { + t.Errorf("Unexpected error for case %s, %v", name, err) + } + // null is responded if any blob is missing + if fillRandom { + expect = nil + } + if !reflect.DeepEqual(result, expect) { + t.Fatalf("Unexpected result for case %s", name) } } From 657c99f116d5d224e859eb42c584077026a55efe Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 3 Dec 2025 23:17:19 +0100 Subject: [PATCH 169/277] beacon/types: update for fulu (#33349) Should fix decoding JSON blocks in the Fulu fork. This diff was missing from https://github.com/ethereum/go-ethereum/pull/33349. --- beacon/types/beacon_block.go | 2 +- beacon/types/exec_header.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon/types/beacon_block.go b/beacon/types/beacon_block.go index a2e31d5abf..82a0814a9f 100644 --- a/beacon/types/beacon_block.go +++ b/beacon/types/beacon_block.go @@ -52,7 +52,7 @@ func BlockFromJSON(forkName string, data []byte) (*BeaconBlock, error) { obj = new(capella.BeaconBlock) case "deneb": obj = new(deneb.BeaconBlock) - case "electra": + case "electra", "fulu": obj = new(electra.BeaconBlock) default: return nil, fmt.Errorf("unsupported fork: %s", forkName) diff --git a/beacon/types/exec_header.go b/beacon/types/exec_header.go index ae79b00841..dbf5c54b36 100644 --- a/beacon/types/exec_header.go +++ b/beacon/types/exec_header.go @@ -45,7 +45,7 @@ func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, er switch forkName { case "capella": obj = new(capella.ExecutionPayloadHeader) - case "deneb", "electra": // note: the payload type was not changed in electra + case "deneb", "electra", "fulu": // note: the payload type was not changed in electra/fulu obj = new(deneb.ExecutionPayloadHeader) default: return nil, fmt.Errorf("unsupported fork: %s", forkName) From 73a2df2b0a7d096714af66ff4ef544f93480894f Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 4 Dec 2025 11:02:42 +0100 Subject: [PATCH 170/277] eth/filters: change error code for invalid parameter errors (#33320) This improves the error code for cases where invalid query parameters are submitted to `eth_getLogs`. I also improved the error message that is emitted when querying into the future. --- eth/filters/api.go | 22 +++++++++++++++++----- eth/filters/filter.go | 5 ++++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/eth/filters/api.go b/eth/filters/api.go index 58baf2c3aa..4ed7e5be0a 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -35,17 +35,29 @@ import ( ) var ( - errInvalidTopic = errors.New("invalid topic(s)") - errFilterNotFound = errors.New("filter not found") - errInvalidBlockRange = errors.New("invalid block range params") + errInvalidTopic = invalidParamsErr("invalid topic(s)") + errInvalidBlockRange = invalidParamsErr("invalid block range params") + errBlockRangeIntoFuture = invalidParamsErr("block range extends beyond current head block") + errBlockHashWithRange = invalidParamsErr("can't specify fromBlock/toBlock with blockHash") + errPendingLogsUnsupported = invalidParamsErr("pending logs are not supported") errUnknownBlock = errors.New("unknown block") - errBlockHashWithRange = errors.New("can't specify fromBlock/toBlock with blockHash") - errPendingLogsUnsupported = errors.New("pending logs are not supported") + errFilterNotFound = errors.New("filter not found") errExceedMaxTopics = errors.New("exceed max topics") errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position") errExceedMaxTxHashes = errors.New("exceed max number of transaction hashes allowed per transactionReceipts subscription") ) +type invalidParamsError struct { + err error +} + +func (e invalidParamsError) Error() string { return e.err.Error() } +func (e invalidParamsError) ErrorCode() int { return -32602 } + +func invalidParamsErr(format string, args ...any) error { + return invalidParamsError{fmt.Errorf(format, args...)} +} + const ( // The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0 maxTopics = 4 diff --git a/eth/filters/filter.go b/eth/filters/filter.go index a818f0b607..10afc84fe9 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -221,9 +221,12 @@ func (s *searchSession) updateChainView() error { if lastBlock == math.MaxUint64 { lastBlock = head } - if firstBlock > lastBlock || lastBlock > head { + if firstBlock > lastBlock { return errInvalidBlockRange } + if lastBlock > head { + return errBlockRangeIntoFuture + } s.searchRange = common.NewRange(firstBlock, lastBlock+1-firstBlock) // Trim existing match set in case a reorg may have invalidated some results From e63e37be5e48a7869abf3f17e2a8cfff1fbfbcbf Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Sat, 6 Dec 2025 05:21:38 +0200 Subject: [PATCH 171/277] core/filtermaps: fix operator precedence in delete logging condition (#33280) The original condition `deleted && !logPrinted || time.Since(...)` was incorrectly grouping due to operator precedence, causing logs to print every 10 seconds even when no deletion was happening (deleted=false). According to SafeDeleteRange documentation, the 'deleted' parameter is "true if entries have actually been deleted already". The logging should only happen when deletion is active. Fixed by adding parentheses: `deleted && (!logPrinted || time.Since(...))`Now logs print only when items are being deleted AND either it's the first log or 10+ seconds have passed since the last one. --- core/filtermaps/filtermaps.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/filtermaps/filtermaps.go b/core/filtermaps/filtermaps.go index fede54df57..f6b1ef26d0 100644 --- a/core/filtermaps/filtermaps.go +++ b/core/filtermaps/filtermaps.go @@ -434,7 +434,7 @@ func (f *FilterMaps) safeDeleteWithLogs(deleteFn func(db ethdb.KeyValueStore, ha lastLogPrinted = start ) switch err := deleteFn(f.db, f.hashScheme, func(deleted bool) bool { - if deleted && !logPrinted || time.Since(lastLogPrinted) > time.Second*10 { + if deleted && (!logPrinted || time.Since(lastLogPrinted) > time.Second*10) { log.Info(action+" in progress...", "elapsed", common.PrettyDuration(time.Since(start))) logPrinted, lastLogPrinted = true, time.Now() } From dbca85869f6d699441a913df9871dc6ac48b1d34 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 8 Dec 2025 20:53:40 +0800 Subject: [PATCH 172/277] ethdb/pebble: change the Pebble database configuration (#33353) This PR changes the Pebble configurations as below: - increase the MemTableStopWritesThreshold for handling temporary spike - decrease the L0CompactionConcurrency and CompactionDebtConcurrency to scale up compaction readily --- ethdb/pebble/pebble.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 94311fc12b..800559ab4b 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -205,8 +205,8 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( // limit unchanged allows writes to be flushed more smoothly. This helps // avoid compaction spikes and mitigates write stalls caused by heavy // compaction workloads. - memTableLimit := 4 - memTableSize := cache * 1024 * 1024 / 2 / memTableLimit + memTableNumber := 4 + memTableSize := cache * 1024 * 1024 / 2 / memTableNumber // The memory table size is currently capped at maxMemTableSize-1 due to a // known bug in the pebble where maxMemTableSize is not recognized as a @@ -243,12 +243,16 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( // Note, there may have more than two memory tables in the system. MemTableSize: uint64(memTableSize), - // MemTableStopWritesThreshold places a hard limit on the size + // MemTableStopWritesThreshold places a hard limit on the number // of the existent MemTables(including the frozen one). + // // Note, this must be the number of tables not the size of all memtables // according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742 // and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903. - MemTableStopWritesThreshold: memTableLimit, + // + // MemTableStopWritesThreshold is set to twice the maximum number of + // allowed memtables to accommodate temporary spikes. + MemTableStopWritesThreshold: memTableNumber * 2, // The default compaction concurrency(1 thread), // Here use all available CPUs for faster compaction. @@ -296,6 +300,16 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( // debt will be less than 1GB, but with more frequent compactions scheduled. L0CompactionThreshold: 2, } + // These two settings define the conditions under which compaction concurrency + // is increased. Specifically, one additional compaction job will be enabled when: + // - there is one more overlapping sub-level0; + // - there is an additional 512 MB of compaction debt; + // + // The maximum concurrency is still capped by MaxConcurrentCompactions, but with + // these settings compactions can scale up more readily. + opt.Experimental.L0CompactionConcurrency = 1 + opt.Experimental.CompactionDebtConcurrency = 1 << 28 // 256MB + // Open the db and recover any potential corruptions innerDB, err := pebble.Open(file, opt) if err != nil { From af47d9b472000d3ddf3d89b59ad75fb4c73c06ba Mon Sep 17 00:00:00 2001 From: Snezhkko Date: Mon, 8 Dec 2025 16:02:24 +0200 Subject: [PATCH 173/277] p2p/nat: fix err shadowing in UPnP addAnyPortMapping (#33355) The random-port retry loop in addAnyPortMapping shadowed the err variable, causing the function to return (0, nil) when all attempts failed. This change removes the shadowing and preserves the last error across both the fixed-port and random-port retries, ensuring failures are reported to callers correctly. --- p2p/nat/natupnp.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/p2p/nat/natupnp.go b/p2p/nat/natupnp.go index d79677db55..b9570e561f 100644 --- a/p2p/nat/natupnp.go +++ b/p2p/nat/natupnp.go @@ -107,30 +107,30 @@ func (n *upnp) addAnyPortMapping(protocol string, extport, intport int, ip net.I }) } // For IGDv1 and v1 services we should first try to add with extport. + var lastErr error for i := 0; i < retryCount+1; i++ { - err := n.withRateLimit(func() error { + lastErr = n.withRateLimit(func() error { return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) }) - if err == nil { + if lastErr == nil { return uint16(extport), nil } - log.Debug("Failed to add port mapping", "protocol", protocol, "extport", extport, "intport", intport, "err", err) + log.Debug("Failed to add port mapping", "protocol", protocol, "extport", extport, "intport", intport, "err", lastErr) } // If above fails, we retry with a random port. // We retry several times because of possible port conflicts. - var err error for i := 0; i < randomCount; i++ { extport = n.randomPort() - err := n.withRateLimit(func() error { + lastErr = n.withRateLimit(func() error { return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) }) - if err == nil { + if lastErr == nil { return uint16(extport), nil } - log.Debug("Failed to add random port mapping", "protocol", protocol, "extport", extport, "intport", intport, "err", err) + log.Debug("Failed to add random port mapping", "protocol", protocol, "extport", extport, "intport", intport, "err", lastErr) } - return 0, err + return 0, lastErr } func (n *upnp) randomPort() int { From 31f9c9ff75af85be24b386ab28399206467f2586 Mon Sep 17 00:00:00 2001 From: cui Date: Tue, 9 Dec 2025 00:40:59 +0800 Subject: [PATCH 174/277] common/bitutil: deprecate XORBytes in favor of stdlib crypto/subtle (#33331) XORBytes was added to package crypto/subtle in Go 1.20, and it's faster than our bitutil.XORBytes. There is only one use of this function across go-ethereum so we can simply deprecate the custom implementation. --------- Co-authored-by: Felix Lange --- common/bitutil/bitutil.go | 49 +++++++--------------------------- common/bitutil/bitutil_test.go | 16 +++++++++-- p2p/rlpx/rlpx.go | 4 +-- 3 files changed, 26 insertions(+), 43 deletions(-) diff --git a/common/bitutil/bitutil.go b/common/bitutil/bitutil.go index a18a6d18ee..578da1cf49 100644 --- a/common/bitutil/bitutil.go +++ b/common/bitutil/bitutil.go @@ -8,6 +8,7 @@ package bitutil import ( + "crypto/subtle" "runtime" "unsafe" ) @@ -17,46 +18,16 @@ const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" | // XORBytes xors the bytes in a and b. The destination is assumed to have enough // space. Returns the number of bytes xor'd. +// +// If dst does not have length at least n, +// XORBytes panics without writing anything to dst. +// +// dst and x or y may overlap exactly or not at all, +// otherwise XORBytes may panic. +// +// Deprecated: use crypto/subtle.XORBytes func XORBytes(dst, a, b []byte) int { - if supportsUnaligned { - return fastXORBytes(dst, a, b) - } - return safeXORBytes(dst, a, b) -} - -// fastXORBytes xors in bulk. It only works on architectures that support -// unaligned read/writes. -func fastXORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } - w := n / wordSize - if w > 0 { - dw := *(*[]uintptr)(unsafe.Pointer(&dst)) - aw := *(*[]uintptr)(unsafe.Pointer(&a)) - bw := *(*[]uintptr)(unsafe.Pointer(&b)) - for i := 0; i < w; i++ { - dw[i] = aw[i] ^ bw[i] - } - } - for i := n - n%wordSize; i < n; i++ { - dst[i] = a[i] ^ b[i] - } - return n -} - -// safeXORBytes xors one by one. It works on all architectures, independent if -// it supports unaligned read/writes or not. -func safeXORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } - for i := 0; i < n; i++ { - dst[i] = a[i] ^ b[i] - } - return n + return subtle.XORBytes(dst, a, b) } // ANDBytes ands the bytes in a and b. The destination is assumed to have enough diff --git a/common/bitutil/bitutil_test.go b/common/bitutil/bitutil_test.go index 12f3fe24a6..1748029794 100644 --- a/common/bitutil/bitutil_test.go +++ b/common/bitutil/bitutil_test.go @@ -29,7 +29,7 @@ func TestXOR(t *testing.T) { d2 := make([]byte, 1023+alignD)[alignD:] XORBytes(d1, p, q) - safeXORBytes(d2, p, q) + naiveXOR(d2, p, q) if !bytes.Equal(d1, d2) { t.Error("not equal", d1, d2) } @@ -38,6 +38,18 @@ func TestXOR(t *testing.T) { } } +// naiveXOR xors bytes one by one. +func naiveXOR(dst, a, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i := 0; i < n; i++ { + dst[i] = a[i] ^ b[i] + } + return n +} + // Tests that bitwise AND works for various alignments. func TestAND(t *testing.T) { for alignP := 0; alignP < 2; alignP++ { @@ -134,7 +146,7 @@ func benchmarkBaseXOR(b *testing.B, size int) { p, q := make([]byte, size), make([]byte, size) for i := 0; i < b.N; i++ { - safeXORBytes(p, p, q) + naiveXOR(p, p, q) } } diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go index c074534d4d..0dc4ecbe2d 100644 --- a/p2p/rlpx/rlpx.go +++ b/p2p/rlpx/rlpx.go @@ -24,6 +24,7 @@ import ( "crypto/ecdsa" "crypto/hmac" "crypto/rand" + "crypto/subtle" "encoding/binary" "errors" "fmt" @@ -33,7 +34,6 @@ import ( "net" "time" - "github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/ethereum/go-ethereum/rlp" @@ -677,6 +677,6 @@ func exportPubkey(pub *ecies.PublicKey) []byte { func xor(one, other []byte) (xor []byte) { xor = make([]byte, len(one)) - bitutil.XORBytes(xor, one, other) + subtle.XORBytes(xor, one, other) return xor } From 66134b35dfde8cc1867d5f70756d8f07332e7ed3 Mon Sep 17 00:00:00 2001 From: Francisco Giordano Date: Mon, 8 Dec 2025 13:45:40 -0300 Subject: [PATCH 175/277] core/vm: fix PC increment for EIP-8024 opcodes (#33361) The EIP says to increment PC by 2 _instead of_ the standard increment by 1. The opcode handlers added in #33095 result in incrementing PC by 3, because they ignored the increment already present in `interpreter.go`. Does this need to be better specified in the EIP? I've added a [new test case](https://github.com/ethereum/EIPs/pull/10859) for it anyway. Found by @0xriptide. --- core/vm/instructions.go | 6 +++--- core/vm/instructions_test.go | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 29f1f79c49..6b04a2daff 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -964,7 +964,7 @@ func opDupN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { //The n‘th stack item is duplicated at the top of the stack. scope.Stack.push(scope.Stack.Back(n - 1)) - *pc += 2 + *pc += 1 return nil, nil } @@ -993,7 +993,7 @@ func opSwapN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { indexTop := scope.Stack.len() - 1 indexN := scope.Stack.len() - 1 - n scope.Stack.data[indexTop], scope.Stack.data[indexN] = scope.Stack.data[indexN], scope.Stack.data[indexTop] - *pc += 2 + *pc += 1 return nil, nil } @@ -1025,7 +1025,7 @@ func opExchange(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { indexN := scope.Stack.len() - 1 - n indexM := scope.Stack.len() - 1 - m scope.Stack.data[indexN], scope.Stack.data[indexM] = scope.Stack.data[indexM], scope.Stack.data[indexN] - *pc += 2 + *pc += 1 return nil, nil } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 0f91a205f5..f38da7fb22 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -1107,6 +1107,11 @@ func TestEIP8024_Execution(t *testing.T) { codeHex: "e8", // no operand wantErr: true, }, + { + name: "PC_INCREMENT", + codeHex: "600060006000e80115", + wantVals: []uint64{1, 0, 0}, + }, } for _, tc := range tests { @@ -1123,17 +1128,15 @@ func TestEIP8024_Execution(t *testing.T) { return case 0x60: _, err = opPush1(&pc, evm, scope) - pc++ case 0x80: dup1 := makeDup(1) _, err = dup1(&pc, evm, scope) - pc++ case 0x56: _, err = opJump(&pc, evm, scope) - pc++ case 0x5b: _, err = opJumpdest(&pc, evm, scope) - pc++ + case 0x15: + _, err = opIszero(&pc, evm, scope) case 0xe6: _, err = opDupN(&pc, evm, scope) case 0xe7: @@ -1143,6 +1146,7 @@ func TestEIP8024_Execution(t *testing.T) { default: err = &ErrInvalidOpCode{opcode: OpCode(op)} } + pc++ } if tc.wantErr { if err == nil { From 228933a6606936ead106b34377c208e5a1b36632 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 9 Dec 2025 05:49:57 +0800 Subject: [PATCH 176/277] eth/downloader: keep current syncmode in downloader only (#33157) This moves the tracking of the current syncmode into the downloader, fixing an issue where the syncmode being requested through the engine API could go out-of-sync with the actual mode being performed by downloader. Fixes #32629 --------- Co-authored-by: Felix Lange --- eth/backend.go | 26 ------- eth/catalyst/api.go | 8 +-- eth/downloader/beacondevsync.go | 4 +- eth/downloader/beaconsync.go | 48 ++----------- eth/downloader/downloader.go | 28 ++++++-- eth/downloader/downloader_test.go | 39 ++++++----- eth/downloader/syncmode.go | 111 ++++++++++++++++++++++++++++++ eth/handler.go | 51 ++------------ eth/handler_eth_test.go | 9 ++- eth/handler_test.go | 8 +-- eth/sync_test.go | 14 ++-- eth/syncer/syncer.go | 6 +- 12 files changed, 192 insertions(+), 160 deletions(-) create mode 100644 eth/downloader/syncmode.go diff --git a/eth/backend.go b/eth/backend.go index 95ae9d4a41..cae2aabe30 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -592,29 +592,3 @@ func (s *Ethereum) Stop() error { return nil } - -// SyncMode retrieves the current sync mode, either explicitly set, or derived -// from the chain status. -func (s *Ethereum) SyncMode() ethconfig.SyncMode { - // If we're in snap sync mode, return that directly - if s.handler.snapSync.Load() { - return ethconfig.SnapSync - } - // We are probably in full sync, but we might have rewound to before the - // snap sync pivot, check if we should re-enable snap sync. - head := s.blockchain.CurrentBlock() - if pivot := rawdb.ReadLastPivotNumber(s.chainDb); pivot != nil { - if head.Number.Uint64() < *pivot { - return ethconfig.SnapSync - } - } - // We are in a full sync, but the associated head state is missing. To complete - // the head state, forcefully rerun the snap sync. Note it doesn't mean the - // persistent state is corrupted, just mismatch with the head block. - if !s.blockchain.HasState(head.Root) { - log.Info("Reenabled snap sync as chain is stateless") - return ethconfig.SnapSync - } - // Nope, we're really full syncing - return ethconfig.FullSync -} diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 0386bac556..d6d3f57936 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -270,7 +270,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl } } log.Info("Forkchoice requested sync to new head", context...) - if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), header, finalized); err != nil { + if err := api.eth.Downloader().BeaconSync(header, finalized); err != nil { return engine.STATUS_SYNCING, err } return engine.STATUS_SYNCING, nil @@ -764,7 +764,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe // tries to make it import a block. That should be denied as pushing something // into the database directly will conflict with the assumptions of snap sync // that it has an empty db that it can fill itself. - if api.eth.SyncMode() != ethconfig.FullSync { + if api.eth.Downloader().ConfigSyncMode() == ethconfig.SnapSync { return api.delayPayloadImport(block), nil } if !api.eth.BlockChain().HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { @@ -812,7 +812,7 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) engine.PayloadSt // Although we don't want to trigger a sync, if there is one already in // progress, try to extend it with the current payload request to relieve // some strain from the forkchoice update. - err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()) + err := api.eth.Downloader().BeaconExtend(block.Header()) if err == nil { log.Debug("Payload accepted for sync extension", "number", block.NumberU64(), "hash", block.Hash()) return engine.PayloadStatusV1{Status: engine.SYNCING} @@ -821,7 +821,7 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) engine.PayloadSt // payload as non-integratable on top of the existing sync. We'll just // have to rely on the beacon client to forcefully update the head with // a forkchoice update request. - if api.eth.SyncMode() == ethconfig.FullSync { + if api.eth.Downloader().ConfigSyncMode() == ethconfig.FullSync { // In full sync mode, failure to import a well-formed block can only mean // that the parent state is missing and the syncer rejected extending the // current cycle with the new payload. diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go index 03f17b1a52..52e43f86b4 100644 --- a/eth/downloader/beacondevsync.go +++ b/eth/downloader/beacondevsync.go @@ -33,14 +33,14 @@ import ( // Note, this must not be used in live code. If the forkchcoice endpoint where // to use this instead of giving us the payload first, then essentially nobody // in the network would have the block yet that we'd attempt to retrieve. -func (d *Downloader) BeaconDevSync(mode SyncMode, header *types.Header) error { +func (d *Downloader) BeaconDevSync(header *types.Header) error { // Be very loud that this code should not be used in a live node log.Warn("----------------------------------") log.Warn("Beacon syncing with hash as target", "number", header.Number, "hash", header.Hash()) log.Warn("This is unhealthy for a live node!") log.Warn("This is incompatible with the consensus layer!") log.Warn("----------------------------------") - return d.BeaconSync(mode, header, header) + return d.BeaconSync(header, header) } // GetHeader tries to retrieve the header with a given hash from a random peer. diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 12b74a1ba9..405643e576 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -34,7 +34,6 @@ import ( // directed by the skeleton sync's head/tail events. type beaconBackfiller struct { downloader *Downloader // Downloader to direct via this callback implementation - syncMode SyncMode // Sync mode to use for backfilling the skeleton chains success func() // Callback to run on successful sync cycle completion filling bool // Flag whether the downloader is backfilling or not filled *types.Header // Last header filled by the last terminated sync loop @@ -92,7 +91,6 @@ func (b *beaconBackfiller) resume() { b.filling = true b.filled = nil b.started = make(chan struct{}) - mode := b.syncMode b.lock.Unlock() // Start the backfilling on its own thread since the downloader does not have @@ -107,7 +105,7 @@ func (b *beaconBackfiller) resume() { }() // If the downloader fails, report an error as in beacon chain mode there // should be no errors as long as the chain we're syncing to is valid. - if err := b.downloader.synchronise(mode, b.started); err != nil { + if err := b.downloader.synchronise(b.started); err != nil { log.Error("Beacon backfilling failed", "err", err) return } @@ -119,27 +117,6 @@ func (b *beaconBackfiller) resume() { }() } -// setMode updates the sync mode from the current one to the requested one. If -// there's an active sync in progress, it will be cancelled and restarted. -func (b *beaconBackfiller) setMode(mode SyncMode) { - // Update the old sync mode and track if it was changed - b.lock.Lock() - oldMode := b.syncMode - updated := oldMode != mode - filling := b.filling - b.syncMode = mode - b.lock.Unlock() - - // If the sync mode was changed mid-sync, restart. This should never ever - // really happen, we just handle it to detect programming errors. - if !updated || !filling { - return - } - log.Error("Downloader sync mode changed mid-run", "old", oldMode.String(), "new", mode.String()) - b.suspend() - b.resume() -} - // SetBadBlockCallback sets the callback to run when a bad block is hit by the // block processor. This method is not thread safe and should be set only once // on startup before system events are fired. @@ -153,8 +130,8 @@ func (d *Downloader) SetBadBlockCallback(onBadBlock badBlockFn) { // // Internally backfilling and state sync is done the same way, but the header // retrieval and scheduling is replaced. -func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header, final *types.Header) error { - return d.beaconSync(mode, head, final, true) +func (d *Downloader) BeaconSync(head *types.Header, final *types.Header) error { + return d.beaconSync(head, final, true) } // BeaconExtend is an optimistic version of BeaconSync, where an attempt is made @@ -163,8 +140,8 @@ func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header, final *types. // // This is useful if a beacon client is feeding us large chunks of payloads to run, // but is not setting the head after each. -func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error { - return d.beaconSync(mode, head, nil, false) +func (d *Downloader) BeaconExtend(head *types.Header) error { + return d.beaconSync(head, nil, false) } // beaconSync is the post-merge version of the chain synchronization, where the @@ -173,20 +150,9 @@ func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error { // // Internally backfilling and state sync is done the same way, but the header // retrieval and scheduling is replaced. -func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, final *types.Header, force bool) error { - // When the downloader starts a sync cycle, it needs to be aware of the sync - // mode to use (full, snap). To keep the skeleton chain oblivious, inject the - // mode into the backfiller directly. - // - // Super crazy dangerous type cast. Should be fine (TM), we're only using a - // different backfiller implementation for skeleton tests. - d.skeleton.filler.(*beaconBackfiller).setMode(mode) - +func (d *Downloader) beaconSync(head *types.Header, final *types.Header, force bool) error { // Signal the skeleton sync to switch to a new head, however it wants - if err := d.skeleton.Sync(head, final, force); err != nil { - return err - } - return nil + return d.skeleton.Sync(head, final, force) } // findBeaconAncestor tries to locate the common ancestor link of the local chain diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 09837a3045..020dd7314b 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -97,8 +97,9 @@ type headerTask struct { } type Downloader struct { - mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode - mux *event.TypeMux // Event multiplexer to announce sync operation events + mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode + moder *syncModer // Sync mode management, deliver the appropriate sync mode choice for each cycle + mux *event.TypeMux // Event multiplexer to announce sync operation events queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed @@ -165,6 +166,9 @@ type BlockChain interface { // HasHeader verifies a header's presence in the local chain. HasHeader(common.Hash, uint64) bool + // HasState checks if state trie is fully present in the database or not. + HasState(root common.Hash) bool + // GetHeaderByHash retrieves a header from the local chain. GetHeaderByHash(common.Hash) *types.Header @@ -221,10 +225,11 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader { +func New(stateDb ethdb.Database, mode ethconfig.SyncMode, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader { cutoffNumber, cutoffHash := chain.HistoryPruningCutoff() dl := &Downloader{ stateDB: stateDb, + moder: newSyncModer(mode, chain, stateDb), mux: mux, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), peers: newPeerSet(), @@ -331,7 +336,7 @@ func (d *Downloader) UnregisterPeer(id string) error { // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error { +func (d *Downloader) synchronise(beaconPing chan struct{}) (err error) { // The beacon header syncer is async. It will start this synchronization and // will continue doing other tasks. However, if synchronization needs to be // cancelled, the syncer needs to know if we reached the startup point (and @@ -356,6 +361,13 @@ func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error if d.notified.CompareAndSwap(false, true) { log.Info("Block synchronisation started") } + mode := d.moder.get() + defer func() { + if err == nil && mode == ethconfig.SnapSync { + d.moder.disableSnap() + log.Info("Disabled snap-sync after the initial sync cycle") + } + }() if mode == ethconfig.SnapSync { // Snap sync will directly modify the persistent state, making the entire // trie database unusable until the state is fully synced. To prevent any @@ -399,6 +411,7 @@ func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error // Atomically set the requested sync mode d.mode.Store(uint32(mode)) + defer d.mode.Store(0) if beaconPing != nil { close(beaconPing) @@ -406,10 +419,17 @@ func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error return d.syncToHead() } +// getMode returns the sync mode used within current cycle. func (d *Downloader) getMode() SyncMode { return SyncMode(d.mode.Load()) } +// ConfigSyncMode returns the sync mode configured for the node. +// The actual running sync mode can differ from this. +func (d *Downloader) ConfigSyncMode() SyncMode { + return d.moder.get() +} + // syncToHead starts a block synchronization based on the hash chain from // the specified head hash. func (d *Downloader) syncToHead() (err error) { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c1a31d6e1c..7fa2522a3d 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/event" @@ -49,12 +50,12 @@ type downloadTester struct { } // newTester creates a new downloader test mocker. -func newTester(t *testing.T) *downloadTester { - return newTesterWithNotification(t, nil) +func newTester(t *testing.T, mode ethconfig.SyncMode) *downloadTester { + return newTesterWithNotification(t, mode, nil) } // newTesterWithNotification creates a new downloader test mocker. -func newTesterWithNotification(t *testing.T, success func()) *downloadTester { +func newTesterWithNotification(t *testing.T, mode ethconfig.SyncMode, success func()) *downloadTester { db, err := rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{}) if err != nil { panic(err) @@ -75,7 +76,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { chain: chain, peers: make(map[string]*downloadTesterPeer), } - tester.downloader = New(db, new(event.TypeMux), tester.chain, tester.dropPeer, success) + tester.downloader = New(db, mode, new(event.TypeMux), tester.chain, tester.dropPeer, success) return tester } @@ -393,7 +394,7 @@ func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) - tester := newTesterWithNotification(t, func() { + tester := newTesterWithNotification(t, mode, func() { close(success) }) defer tester.terminate() @@ -403,7 +404,7 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to beacon-sync chain: %v", err) } select { @@ -420,7 +421,7 @@ func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } func testThrottling(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + tester := newTester(t, mode) defer tester.terminate() // Create a long block chain to download and the tester @@ -437,7 +438,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Start a synchronisation concurrently errc := make(chan error, 1) go func() { - errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) + errc <- tester.downloader.BeaconSync(testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) }() // Iteratively take some blocks, always checking the retrieval count for { @@ -502,7 +503,7 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { success := func() { close(complete) } - tester := newTesterWithNotification(t, success) + tester := newTesterWithNotification(t, mode, success) defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) @@ -514,7 +515,7 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } <-complete @@ -534,7 +535,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { success := func() { close(complete) } - tester := newTesterWithNotification(t, success) + tester := newTesterWithNotification(t, mode, success) defer tester.terminate() // Create a small enough block chain to download @@ -543,7 +544,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Create peers of every type tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to start beacon sync: %v", err) } select { @@ -570,7 +571,7 @@ func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ET func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) - tester := newTesterWithNotification(t, func() { + tester := newTesterWithNotification(t, mode, func() { close(success) }) defer tester.terminate() @@ -588,7 +589,7 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { receiptsHave.Add(int32(len(headers))) } - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } select { @@ -650,7 +651,7 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { success := make(chan struct{}) - tester := newTesterWithNotification(t, func() { + tester := newTesterWithNotification(t, mode, func() { close(success) }) defer tester.terminate() @@ -662,7 +663,7 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { if c.local > 0 { tester.chain.InsertChain(chain.blocks[1 : c.local+1]) } - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) } select { @@ -685,7 +686,7 @@ func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapS func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) - tester := newTesterWithNotification(t, func() { + tester := newTesterWithNotification(t, mode, func() { success <- struct{}{} }) defer tester.terminate() @@ -700,7 +701,7 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { faultyPeer.withholdBodies[header.Hash()] = struct{}{} } - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { t.Fatalf("failed to beacon-sync chain: %v", err) } select { @@ -716,7 +717,7 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Synchronise all the blocks and check continuation progress tester.newPeer("peer-full", protocol, chain.blocks[1:]) - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + if err := tester.downloader.BeaconSync(chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to beacon-sync chain: %v", err) } startingBlock := uint64(len(chain.blocks)/2 - 1) diff --git a/eth/downloader/syncmode.go b/eth/downloader/syncmode.go new file mode 100644 index 0000000000..7983d39e3a --- /dev/null +++ b/eth/downloader/syncmode.go @@ -0,0 +1,111 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "sync" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// syncModer is responsible for managing the downloader's sync mode. It takes the +// user's preference at startup and then determines the appropriate sync mode +// based on the current chain status. +type syncModer struct { + mode ethconfig.SyncMode + chain BlockChain + disk ethdb.KeyValueReader + lock sync.Mutex +} + +func newSyncModer(mode ethconfig.SyncMode, chain BlockChain, disk ethdb.KeyValueReader) *syncModer { + if mode == ethconfig.FullSync { + // The database seems empty as the current block is the genesis. Yet the snap + // block is ahead, so snap sync was enabled for this node at a certain point. + // The scenarios where this can happen is + // * if the user manually (or via a bad block) rolled back a snap sync node + // below the sync point. + // * the last snap sync is not finished while user specifies a full sync this + // time. But we don't have any recent state for full sync. + // In these cases however it's safe to reenable snap sync. + fullBlock, snapBlock := chain.CurrentBlock(), chain.CurrentSnapBlock() + if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { + mode = ethconfig.SnapSync + log.Warn("Switching from full-sync to snap-sync", "reason", "snap-sync incomplete") + } else if !chain.HasState(fullBlock.Root) { + mode = ethconfig.SnapSync + log.Warn("Switching from full-sync to snap-sync", "reason", "head state missing") + } else { + // Grant the full sync mode + log.Info("Enabled full-sync", "head", fullBlock.Number, "hash", fullBlock.Hash()) + } + } else { + head := chain.CurrentBlock() + if head.Number.Uint64() > 0 && chain.HasState(head.Root) { + mode = ethconfig.FullSync + log.Info("Switching from snap-sync to full-sync", "reason", "snap-sync complete") + } else { + // If snap sync was requested and our database is empty, grant it + log.Info("Enabled snap-sync", "head", head.Number, "hash", head.Hash()) + } + } + return &syncModer{ + mode: mode, + chain: chain, + disk: disk, + } +} + +// get retrieves the current sync mode, either explicitly set, or derived +// from the chain status. +func (m *syncModer) get() ethconfig.SyncMode { + m.lock.Lock() + defer m.lock.Unlock() + + // If we're in snap sync mode, return that directly + if m.mode == ethconfig.SnapSync { + return ethconfig.SnapSync + } + // We are probably in full sync, but we might have rewound to before the + // snap sync pivot, check if we should re-enable snap sync. + head := m.chain.CurrentBlock() + if pivot := rawdb.ReadLastPivotNumber(m.disk); pivot != nil { + if head.Number.Uint64() < *pivot { + log.Info("Reenabled snap-sync as chain is lagging behind the pivot", "head", head.Number, "pivot", pivot) + return ethconfig.SnapSync + } + } + // We are in a full sync, but the associated head state is missing. To complete + // the head state, forcefully rerun the snap sync. Note it doesn't mean the + // persistent state is corrupted, just mismatch with the head block. + if !m.chain.HasState(head.Root) { + log.Info("Reenabled snap-sync as chain is stateless") + return ethconfig.SnapSync + } + // Nope, we're really full syncing + return ethconfig.FullSync +} + +// disableSnap disables the snap sync mode, usually it's called after a successful snap sync. +func (m *syncModer) disableSnap() { + m.lock.Lock() + m.mode = ethconfig.FullSync + m.lock.Unlock() +} diff --git a/eth/handler.go b/eth/handler.go index ff970e2ba6..4510dd32f0 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -111,9 +111,7 @@ type handlerConfig struct { type handler struct { nodeID enode.ID networkID uint64 - - snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) - synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) + synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) database ethdb.Database txpool txPool @@ -161,40 +159,13 @@ func newHandler(config *handlerConfig) (*handler, error) { handlerDoneCh: make(chan struct{}), handlerStartCh: make(chan struct{}), } - if config.Sync == ethconfig.FullSync { - // The database seems empty as the current block is the genesis. Yet the snap - // block is ahead, so snap sync was enabled for this node at a certain point. - // The scenarios where this can happen is - // * if the user manually (or via a bad block) rolled back a snap sync node - // below the sync point. - // * the last snap sync is not finished while user specifies a full sync this - // time. But we don't have any recent state for full sync. - // In these cases however it's safe to reenable snap sync. - fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() - if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { - h.snapSync.Store(true) - log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete") - } else if !h.chain.HasState(fullBlock.Root) { - h.snapSync.Store(true) - log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing") - } - } else { - head := h.chain.CurrentBlock() - if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { - log.Info("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") - } else { - // If snap sync was requested and our database is empty, grant it - h.snapSync.Store(true) - log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash()) - } - } + // Construct the downloader (long sync) + h.downloader = downloader.New(config.Database, config.Sync, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures) + // If snap sync is requested but snapshots are disabled, fail loudly - if h.snapSync.Load() && (config.Chain.Snapshots() == nil && config.Chain.TrieDB().Scheme() == rawdb.HashScheme) { + if h.downloader.ConfigSyncMode() == ethconfig.SnapSync && (config.Chain.Snapshots() == nil && config.Chain.TrieDB().Scheme() == rawdb.HashScheme) { return nil, errors.New("snap sync not supported with snapshots disabled") } - // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures) - fetchTx := func(peer string, hashes []common.Hash) error { p := h.peers.peer(peer) if p == nil { @@ -267,7 +238,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { return err } reject := false // reserved peer slots - if h.snapSync.Load() { + if h.downloader.ConfigSyncMode() == ethconfig.SnapSync { if snap == nil { // If we are running snap-sync, we want to reserve roughly half the peer // slots for peers supporting the snap protocol. @@ -544,15 +515,7 @@ func (h *handler) txBroadcastLoop() { // enableSyncedFeatures enables the post-sync functionalities when the initial // sync is finished. func (h *handler) enableSyncedFeatures() { - // Mark the local node as synced. h.synced.Store(true) - - // If we were running snap sync and it finished, disable doing another - // round on next sync cycle - if h.snapSync.Load() { - log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } } // blockRangeState holds the state of the block range update broadcasting mechanism. @@ -590,7 +553,7 @@ func (h *handler) blockRangeLoop(st *blockRangeState) { if ev == nil { continue } - if _, ok := ev.Data.(downloader.StartEvent); ok && h.snapSync.Load() { + if _, ok := ev.Data.(downloader.StartEvent); ok && h.downloader.ConfigSyncMode() == ethconfig.SnapSync { h.blockRangeWhileSnapSyncing(st) } case <-st.headCh: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 058a0d5949..1343cae03e 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -232,7 +232,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { t.Parallel() // Create a message handler, configure it to accept transactions and watch them - handler := newTestHandler() + handler := newTestHandler(ethconfig.FullSync) defer handler.close() handler.handler.synced.Store(true) // mark synced to accept transactions @@ -284,7 +284,7 @@ func testSendTransactions(t *testing.T, protocol uint) { t.Parallel() // Create a message handler and fill the pool with big transactions - handler := newTestHandler() + handler := newTestHandler(ethconfig.FullSync) defer handler.close() insert := make([]*types.Transaction, 100) @@ -365,13 +365,12 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // Create a source handler to send transactions from and a number of sinks // to receive them. We need multiple sinks since a one-to-one peering would // broadcast all transactions without announcement. - source := newTestHandler() - source.handler.snapSync.Store(false) // Avoid requiring snap, otherwise some will be dropped below + source := newTestHandler(ethconfig.FullSync) defer source.close() sinks := make([]*testHandler, 10) for i := 0; i < len(sinks); i++ { - sinks[i] = newTestHandler() + sinks[i] = newTestHandler(ethconfig.FullSync) defer sinks[i].close() sinks[i].handler.synced.Store(true) // mark synced to accept transactions diff --git a/eth/handler_test.go b/eth/handler_test.go index b37e6227f4..312e5625ba 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -174,13 +174,13 @@ type testHandler struct { } // newTestHandler creates a new handler for testing purposes with no blocks. -func newTestHandler() *testHandler { - return newTestHandlerWithBlocks(0) +func newTestHandler(mode ethconfig.SyncMode) *testHandler { + return newTestHandlerWithBlocks(0, mode) } // newTestHandlerWithBlocks creates a new handler for testing purposes, with a // given number of initial blocks. -func newTestHandlerWithBlocks(blocks int) *testHandler { +func newTestHandlerWithBlocks(blocks int, mode ethconfig.SyncMode) *testHandler { // Create a database pre-initialize with a genesis block db := rawdb.NewMemoryDatabase() gspec := &core.Genesis{ @@ -200,7 +200,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler { Chain: chain, TxPool: txpool, Network: 1, - Sync: ethconfig.SnapSync, + Sync: mode, BloomCache: 1, }) handler.Start(1000) diff --git a/eth/sync_test.go b/eth/sync_test.go index dc295f2790..509b836f82 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -36,17 +36,11 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { t.Parallel() // Create an empty handler and ensure it's in snap sync mode - empty := newTestHandler() - if !empty.handler.snapSync.Load() { - t.Fatalf("snap sync disabled on pristine blockchain") - } + empty := newTestHandler(ethconfig.SnapSync) defer empty.close() // Create a full handler and ensure snap sync ends up disabled - full := newTestHandlerWithBlocks(1024) - if full.handler.snapSync.Load() { - t.Fatalf("snap sync not disabled on non-empty blockchain") - } + full := newTestHandlerWithBlocks(1024, ethconfig.SnapSync) defer full.close() // Sync up the two handlers via both `eth` and `snap` @@ -85,7 +79,7 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { time.Sleep(250 * time.Millisecond) // Check that snap sync was disabled - if err := empty.handler.downloader.BeaconSync(ethconfig.SnapSync, full.chain.CurrentBlock(), nil); err != nil { + if err := empty.handler.downloader.BeaconSync(full.chain.CurrentBlock(), nil); err != nil { t.Fatal("sync failed:", err) } // Downloader internally has to wait for a timer (3s) to be expired before @@ -96,7 +90,7 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { case <-timeout: t.Fatalf("snap sync not disabled after successful synchronisation") case <-time.After(100 * time.Millisecond): - if !empty.handler.snapSync.Load() { + if empty.handler.downloader.ConfigSyncMode() == ethconfig.FullSync { return } } diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go index 83fe3ad230..c0d54b953b 100644 --- a/eth/syncer/syncer.go +++ b/eth/syncer/syncer.go @@ -130,7 +130,11 @@ func (s *Syncer) run() { break } if resync { - req.errc <- s.backend.Downloader().BeaconDevSync(ethconfig.FullSync, target) + if mode := s.backend.Downloader().ConfigSyncMode(); mode != ethconfig.FullSync { + req.errc <- fmt.Errorf("unsupported syncmode %v, please relaunch geth with --syncmode full", mode) + } else { + req.errc <- s.backend.Downloader().BeaconDevSync(target) + } } case <-ticker.C: From e58c785424f3f60cbeaff980f546a9d6f228d07b Mon Sep 17 00:00:00 2001 From: wit liu Date: Tue, 9 Dec 2025 05:58:26 +0800 Subject: [PATCH 177/277] build: fix check_generate not printing changed files (#33299) Fixes an issue where HashFolder skipped the root directory upon hitting the first file in the excludes list. This happened because the walk function returned SkipDir even for regular files. --- build/ci.go | 11 ++++++++--- internal/build/file.go | 13 +++++++------ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/build/ci.go b/build/ci.go index e589cd2b40..abb7c4997f 100644 --- a/build/ci.go +++ b/build/ci.go @@ -343,7 +343,7 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) ( } ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'") } - // TODO(gballet): revisit after the input api has been defined + // TODO(gballet): revisit after the input api has been defined if runtime.GOARCH == "wasm" { ld = append(ld, "-gcflags=all=-d=softfloat") } @@ -462,9 +462,14 @@ func doCheckGenerate() { ) pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")} + excludes := []string{"tests/testdata", "build/cache", ".git"} + for i := range excludes { + excludes[i] = filepath.FromSlash(excludes[i]) + } + for _, mod := range goModules { // Compute the origin hashes of all the files - hashes, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"}) + hashes, err := build.HashFolder(mod, excludes) if err != nil { log.Fatal("Error computing hashes", "err", err) } @@ -474,7 +479,7 @@ func doCheckGenerate() { c.Dir = mod build.MustRun(c) // Check if generate file hashes have changed - generated, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"}) + generated, err := build.HashFolder(mod, excludes) if err != nil { log.Fatalf("Error re-computing hashes: %v", err) } diff --git a/internal/build/file.go b/internal/build/file.go index 2cd090c42c..b7c00eb842 100644 --- a/internal/build/file.go +++ b/internal/build/file.go @@ -21,20 +21,21 @@ import ( "io" "os" "path/filepath" - "sort" - "strings" + "slices" ) // HashFolder iterates all files under the given directory, computing the hash // of each. -func HashFolder(folder string, exlude []string) (map[string][32]byte, error) { +func HashFolder(folder string, excludes []string) (map[string][32]byte, error) { res := make(map[string][32]byte) err := filepath.WalkDir(folder, func(path string, d os.DirEntry, _ error) error { // Skip anything that's exluded or not a regular file - for _, skip := range exlude { - if strings.HasPrefix(path, filepath.FromSlash(skip)) { + // Skip anything that's excluded or not a regular file + if slices.Contains(excludes, path) { + if d.IsDir() { return filepath.SkipDir } + return nil } if !d.Type().IsRegular() { return nil @@ -71,6 +72,6 @@ func DiffHashes(a map[string][32]byte, b map[string][32]byte) []string { updates = append(updates, file) } } - sort.Strings(updates) + slices.Sort(updates) return updates } From 9a346873b8d3b8e7c1bec9e88370aac7a924a26d Mon Sep 17 00:00:00 2001 From: Ng Wei Han <47109095+weiihann@users.noreply.github.com> Date: Wed, 10 Dec 2025 11:33:59 +0800 Subject: [PATCH 178/277] core/state: fix incorrect contract code state metrics (#33376) ## Description This PR fixes incorrect contract code state metrics by ensuring duplicate codes are not counted towards the reported results. ## Rationale The contract code metrics don't consider database deduplication. The current implementation assumes that the results are only **slightly inaccurate**, but this is not true, especially for data collection efforts that started from the genesis block. --- core/blockchain.go | 19 +++++++++++++------ core/state/reader.go | 11 +++++++++++ core/state/state_sizer.go | 8 +++++--- core/state/state_sizer_test.go | 15 ++++++++------- core/state/statedb.go | 21 +++++++++++++-------- core/state/statedb_fuzz_test.go | 2 +- core/state/stateupdate.go | 24 ++++++++++++++++++++++-- 7 files changed, 73 insertions(+), 27 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 7fe39e2b65..ae92386dc2 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1609,15 +1609,22 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } - // Commit all cached state changes into underlying memory database. - root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time())) + + var ( + err error + root common.Hash + isEIP158 = bc.chainConfig.IsEIP158(block.Number()) + isCancun = bc.chainConfig.IsCancun(block.Number(), block.Time()) + ) + if bc.stateSizer == nil { + root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun) + } else { + root, err = statedb.CommitAndTrack(block.NumberU64(), isEIP158, isCancun, bc.stateSizer) + } if err != nil { return err } - // Emit the state update to the state sizestats if it's active - if bc.stateSizer != nil { - bc.stateSizer.Notify(stateUpdate) - } + // If node is running in path mode, skip explicit gc operation // which is unnecessary in this mode. if bc.triedb.Scheme() == rawdb.PathScheme { diff --git a/core/state/reader.go b/core/state/reader.go index 21e76c5b66..c912ca28da 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -40,6 +40,10 @@ import ( // ContractCodeReader defines the interface for accessing contract code. type ContractCodeReader interface { + // Has returns the flag indicating whether the contract code with + // specified address and hash exists or not. + Has(addr common.Address, codeHash common.Hash) bool + // Code retrieves a particular contract's code. // // - Returns nil code along with nil error if the requested contract code @@ -170,6 +174,13 @@ func (r *cachingCodeReader) CodeSize(addr common.Address, codeHash common.Hash) return len(code), nil } +// Has returns the flag indicating whether the contract code with +// specified address and hash exists or not. +func (r *cachingCodeReader) Has(addr common.Address, codeHash common.Hash) bool { + code, _ := r.Code(addr, codeHash) + return len(code) > 0 +} + // flatReader wraps a database state reader and is safe for concurrent access. type flatReader struct { reader database.StateReader diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go index 636b158da6..3faa750906 100644 --- a/core/state/state_sizer.go +++ b/core/state/state_sizer.go @@ -243,12 +243,14 @@ func calSizeStats(update *stateUpdate) (SizeStats, error) { } } - // Measure code changes. Note that the reported contract code size may be slightly - // inaccurate due to database deduplication (code is stored by its hash). However, - // this deviation is negligible and acceptable for measurement purposes. + codeExists := make(map[common.Hash]struct{}) for _, code := range update.codes { + if _, ok := codeExists[code.hash]; ok || code.exists { + continue + } stats.ContractCodes += 1 stats.ContractCodeBytes += codeKeySize + int64(len(code.blob)) + codeExists[code.hash] = struct{}{} } return stats, nil } diff --git a/core/state/state_sizer_test.go b/core/state/state_sizer_test.go index 65f652e424..b3203afd74 100644 --- a/core/state/state_sizer_test.go +++ b/core/state/state_sizer_test.go @@ -58,7 +58,7 @@ func TestSizeTracker(t *testing.T) { state.AddBalance(addr3, uint256.NewInt(3000), tracing.BalanceChangeUnspecified) state.SetNonce(addr3, 3, tracing.NonceChangeUnspecified) - currentRoot, _, err := state.CommitWithUpdate(1, true, false) + currentRoot, err := state.Commit(1, true, false) if err != nil { t.Fatalf("Failed to commit initial state: %v", err) } @@ -83,7 +83,7 @@ func TestSizeTracker(t *testing.T) { if i%3 == 0 { newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified) } - root, _, err := newState.CommitWithUpdate(blockNum, true, false) + root, err := newState.Commit(blockNum, true, false) if err != nil { t.Fatalf("Failed to commit state at block %d: %v", blockNum, err) } @@ -154,21 +154,22 @@ func TestSizeTracker(t *testing.T) { if i%3 == 0 { newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified) } - root, update, err := newState.CommitWithUpdate(blockNum, true, false) + ret, err := newState.commitAndFlush(blockNum, true, false, true) if err != nil { t.Fatalf("Failed to commit state at block %d: %v", blockNum, err) } - if err := tdb.Commit(root, false); err != nil { + tracker.Notify(ret) + + if err := tdb.Commit(ret.root, false); err != nil { t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err) } - diff, err := calSizeStats(update) + diff, err := calSizeStats(ret) if err != nil { t.Fatalf("Failed to calculate size stats for block %d: %v", blockNum, err) } trackedUpdates = append(trackedUpdates, diff) - tracker.Notify(update) - currentRoot = root + currentRoot = ret.root } finalRoot := rawdb.ReadSnapshotRoot(db) diff --git a/core/state/statedb.go b/core/state/statedb.go index 73d4af7dcf..8d8ab00e48 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1317,11 +1317,16 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum // commitAndFlush is a wrapper of commit which also commits the state mutations // to the configured data stores. -func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) { +func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, dedupCode bool) (*stateUpdate, error) { ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block) if err != nil { return nil, err } + + if dedupCode { + ret.markCodeExistence(s.reader) + } + // Commit dirty contract code if any exists if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 { batch := db.NewBatch() @@ -1376,21 +1381,21 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag // no empty accounts left that could be deleted by EIP-158, storage wiping // should not occur. func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) { - ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping) + ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping, false) if err != nil { return common.Hash{}, err } return ret.root, nil } -// CommitWithUpdate writes the state mutations and returns both the root hash and the state update. -// This is useful for tracking state changes at the blockchain level. -func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) { - ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping) +// CommitAndTrack writes the state mutations and notifies the size tracker of the state changes. +func (s *StateDB) CommitAndTrack(block uint64, deleteEmptyObjects bool, noStorageWiping bool, sizer *SizeTracker) (common.Hash, error) { + ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping, true) if err != nil { - return common.Hash{}, nil, err + return common.Hash{}, err } - return ret.root, ret, nil + sizer.Notify(ret) + return ret.root, nil } // Prepare handles the preparatory steps for executing a state transition with. diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index f4761bd10c..8b6ac0ba64 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -228,7 +228,7 @@ func (test *stateTest) run() bool { } else { state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary } - ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary + ret, err := state.commitAndFlush(0, true, false, false) // call commit at the block boundary if err != nil { panic(err) } diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go index a62e2b2d2d..853ed09dad 100644 --- a/core/state/stateupdate.go +++ b/core/state/stateupdate.go @@ -26,8 +26,9 @@ import ( // contractCode represents a contract code with associated metadata. type contractCode struct { - hash common.Hash // hash is the cryptographic hash of the contract code. - blob []byte // blob is the binary representation of the contract code. + hash common.Hash // hash is the cryptographic hash of the contract code. + blob []byte // blob is the binary representation of the contract code. + exists bool // flag whether the code has been existent } // accountDelete represents an operation for deleting an Ethereum account. @@ -190,3 +191,22 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet { RawStorageKey: sc.rawStorageKey, } } + +// markCodeExistence determines whether each piece of contract code referenced +// in this state update actually exists. +// +// Note: This operation is expensive and not needed during normal state transitions. +// It is only required when SizeTracker is enabled to produce accurate state +// statistics. +func (sc *stateUpdate) markCodeExistence(reader ContractCodeReader) { + cache := make(map[common.Hash]bool) + for addr, code := range sc.codes { + if exists, ok := cache[code.hash]; ok { + code.exists = exists + continue + } + res := reader.Has(addr, code.hash) + cache[code.hash] = res + code.exists = res + } +} From c21fe5475ffd931c0c15c9addb99dcf909ccc7fa Mon Sep 17 00:00:00 2001 From: jwasinger Date: Wed, 10 Dec 2025 01:26:27 -0500 Subject: [PATCH 179/277] tests: integrate BlockTest.run into BlockTest.Run (#33383) --- tests/block_test_util.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 52fe58e702..72fd955c8f 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -116,15 +116,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t if !ok { return UnsupportedForkError{t.json.Network} } - return t.run(config, snapshotter, scheme, witness, tracer, postCheck) -} -// Network returns the network/fork name for this test. -func (t *BlockTest) Network() string { - return t.json.Network -} - -func (t *BlockTest) run(config *params.ChainConfig, snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) { // import pre accounts & construct test genesis block & state root // Commit genesis state var ( @@ -212,6 +204,11 @@ func (t *BlockTest) run(config *params.ChainConfig, snapshotter bool, scheme str return t.validateImportedHeaders(chain, validBlocks) } +// Network returns the network/fork name for this test. +func (t *BlockTest) Network() string { + return t.json.Network +} + func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis { return &core.Genesis{ Config: config, From 215ee6ac1882c7de749e3d2ab1906df19333adb7 Mon Sep 17 00:00:00 2001 From: Fibonacci747 Date: Wed, 10 Dec 2025 07:56:56 +0100 Subject: [PATCH 180/277] internal/ethapi: select precompiles using the simulated header (#33363) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The simulator computed active precompiles from the base header, which is incorrect when simulations cross fork boundaries. This change selects precompiles using the current simulated header so the precompile set matches the block’s number/time. It brings simulate in line with doCall, tracing, and mining, and keeps precompile state overrides applied on the correct epoch set. --- internal/ethapi/simulate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index e0732c327a..3c08061313 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -233,7 +233,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, if block.BlockOverrides.BlobBaseFee != nil { blockContext.BlobBaseFee = block.BlockOverrides.BlobBaseFee.ToInt() } - precompiles := sim.activePrecompiles(sim.base) + precompiles := sim.activePrecompiles(header) // State overrides are applied prior to execution of a block if err := block.StateOverrides.Apply(sim.state, precompiles); err != nil { return nil, nil, nil, err From 1ce71a1895039d09b166771125dd24e693827d63 Mon Sep 17 00:00:00 2001 From: SashaMalysehko Date: Wed, 10 Dec 2025 09:13:47 +0200 Subject: [PATCH 181/277] eth/tracers/native: include SWAP16 in default ignored opcodes (#33381) --- eth/tracers/native/erc7562.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/tracers/native/erc7562.go b/eth/tracers/native/erc7562.go index 3ab98c7132..34e202f667 100644 --- a/eth/tracers/native/erc7562.go +++ b/eth/tracers/native/erc7562.go @@ -513,7 +513,7 @@ func defaultIgnoredOpcodes() []hexutil.Uint64 { ignored := make([]hexutil.Uint64, 0, 64) // Allow all PUSHx, DUPx and SWAPx opcodes as they have sequential codes - for op := vm.PUSH0; op < vm.SWAP16; op++ { + for op := vm.PUSH0; op <= vm.SWAP16; op++ { ignored = append(ignored, hexutil.Uint64(op)) } From 13a8798fa322b16441b819404e3f66b4ea22c3c4 Mon Sep 17 00:00:00 2001 From: kurahin Date: Wed, 10 Dec 2025 10:09:07 +0200 Subject: [PATCH 182/277] p2p/tracker: fix head detection in Fulfil to avoid unnecessary timer reschedules (#33370) --- p2p/tracker/tracker.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/p2p/tracker/tracker.go b/p2p/tracker/tracker.go index 5b72eb2b88..a1cf6f1119 100644 --- a/p2p/tracker/tracker.go +++ b/p2p/tracker/tracker.go @@ -185,9 +185,10 @@ func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) { return } // Everything matches, mark the request serviced and meter it + wasHead := req.expire.Prev() == nil t.expire.Remove(req.expire) delete(t.pending, id) - if req.expire.Prev() == nil { + if wasHead { if t.wake.Stop() { t.schedule() } From b98b25544949cb0d9caa3e61ce5fa0cc12445bde Mon Sep 17 00:00:00 2001 From: Bashmunta Date: Wed, 10 Dec 2025 10:09:24 +0200 Subject: [PATCH 183/277] core/rawdb: fix size counting in memory freezer (#33344) --- core/rawdb/freezer_memory.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go index 8cb4cc2006..a0d308f896 100644 --- a/core/rawdb/freezer_memory.go +++ b/core/rawdb/freezer_memory.go @@ -91,6 +91,13 @@ func (t *memoryTable) truncateHead(items uint64) error { if items < t.offset { return errors.New("truncation below tail") } + for i := int(items - t.offset); i < len(t.data); i++ { + if t.size > uint64(len(t.data[i])) { + t.size -= uint64(len(t.data[i])) + } else { + t.size = 0 + } + } t.data = t.data[:items-t.offset] t.items = items return nil @@ -108,6 +115,13 @@ func (t *memoryTable) truncateTail(items uint64) error { if t.items < items { return errors.New("truncation above head") } + for i := uint64(0); i < items-t.offset; i++ { + if t.size > uint64(len(t.data[i])) { + t.size -= uint64(len(t.data[i])) + } else { + t.size = 0 + } + } t.data = t.data[items-t.offset:] t.offset = items return nil From 1b702f71d933af8cedb534d511bad723dd349f1f Mon Sep 17 00:00:00 2001 From: Delweng Date: Thu, 11 Dec 2025 09:37:16 +0800 Subject: [PATCH 184/277] triedb/pathdb: use copy instead of append to reduce memory alloc (#33044) --- triedb/pathdb/disklayer.go | 2 +- triedb/pathdb/flush.go | 5 +++-- triedb/pathdb/lookup.go | 7 +++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index 76f3f5a46e..b9c308c5b6 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -275,7 +275,7 @@ func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([ // If the layer is being generated, ensure the requested storage slot // has already been covered by the generator. - key := append(accountHash[:], storageHash[:]...) + key := storageKeySlice(accountHash, storageHash) marker := dl.genMarker() if marker != nil && bytes.Compare(key, marker) > 0 { return nil, errNotCoveredYet diff --git a/triedb/pathdb/flush.go b/triedb/pathdb/flush.go index 6563dbccff..4f816cf6a6 100644 --- a/triedb/pathdb/flush.go +++ b/triedb/pathdb/flush.go @@ -116,15 +116,16 @@ func writeStates(batch ethdb.Batch, genMarker []byte, accountData map[common.Has continue } slots += 1 + key := storageKeySlice(addrHash, storageHash) if len(blob) == 0 { rawdb.DeleteStorageSnapshot(batch, addrHash, storageHash) if clean != nil { - clean.Set(append(addrHash[:], storageHash[:]...), nil) + clean.Set(key, nil) } } else { rawdb.WriteStorageSnapshot(batch, addrHash, storageHash, blob) if clean != nil { - clean.Set(append(addrHash[:], storageHash[:]...), blob) + clean.Set(key, blob) } } } diff --git a/triedb/pathdb/lookup.go b/triedb/pathdb/lookup.go index 8b092730f8..719546f410 100644 --- a/triedb/pathdb/lookup.go +++ b/triedb/pathdb/lookup.go @@ -33,6 +33,13 @@ func storageKey(accountHash common.Hash, slotHash common.Hash) [64]byte { return key } +// storageKeySlice returns a key for uniquely identifying the storage slot in +// the slice format. +func storageKeySlice(accountHash common.Hash, slotHash common.Hash) []byte { + key := storageKey(accountHash, slotHash) + return key[:] +} + // lookup is an internal structure used to efficiently determine the layer in // which a state entry resides. type lookup struct { From 56d201b0feb90b3e4a863349d0883c5502bc792f Mon Sep 17 00:00:00 2001 From: Bosul Mun Date: Thu, 11 Dec 2025 13:11:52 +0900 Subject: [PATCH 185/277] eth/fetcher: add metadata validation in tx announcement (#33378) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes the bug reported in #33365. The impact of the bug is not catastrophic. After a transaction is ultimately fetched, validation and propagation will be performed based on the fetched body, and any response with a mismatched type is treated as a protocol violation. An attacker could only waste the limited portion of victim’s bandwidth at most. However, the reasons for submitting this PR are as follows 1. Fetching a transaction announced with an arbitrary type is a weird behavior. 2. It aligns with efforts such as EIP-8077 and #33119 to make the fetcher smarter and reduce bandwidth waste. Regarding the `FilterType` function, it could potentially be implemented by modifying the Filter function's parameter itself, but I wasn’t sure whether changing that function is acceptable, so I left it as is. --- core/txpool/blobpool/blobpool.go | 7 +- core/txpool/legacypool/legacypool.go | 7 +- core/txpool/subpool.go | 3 + core/txpool/txpool.go | 11 +++ eth/fetcher/tx_fetcher.go | 83 +++++++++++--------- eth/fetcher/tx_fetcher_test.go | 85 ++++++++++++++------- eth/handler.go | 16 +++- eth/handler_test.go | 9 +++ tests/fuzzers/txfetcher/txfetcher_fuzzer.go | 2 +- 9 files changed, 153 insertions(+), 70 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index bfaf4d5b8e..e49fe7bb61 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -377,7 +377,12 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo // Filter returns whether the given transaction can be consumed by the blob pool. func (p *BlobPool) Filter(tx *types.Transaction) bool { - return tx.Type() == types.BlobTxType + return p.FilterType(tx.Type()) +} + +// FilterType returns whether the blob pool supports the given transaction type. +func (p *BlobPool) FilterType(kind byte) bool { + return kind == types.BlobTxType } // Init sets the gas price needed to keep a transaction in the pool and the chain diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index ceedc74a53..5f8dd4fac8 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -288,7 +288,12 @@ func New(config Config, chain BlockChain) *LegacyPool { // Filter returns whether the given transaction can be consumed by the legacy // pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction. func (pool *LegacyPool) Filter(tx *types.Transaction) bool { - switch tx.Type() { + return pool.FilterType(tx.Type()) +} + +// FilterType returns whether the legacy pool supports the given transaction type. +func (pool *LegacyPool) FilterType(kind byte) bool { + switch kind { case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.SetCodeTxType: return true default: diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 519ae7b989..db099ddf98 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -100,6 +100,9 @@ type SubPool interface { // to this particular subpool. Filter(tx *types.Transaction) bool + // FilterType returns whether the subpool supports the given transaction type. + FilterType(kind byte) bool + // Init sets the base parameters of the subpool, allowing it to load any saved // transactions from disk and also permitting internal maintenance routines to // start up. diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 437861efca..a314a83f1b 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -489,3 +489,14 @@ func (p *TxPool) Clear() { subpool.Clear() } } + +// FilterType returns whether a transaction with the given type is supported +// (can be added) by the pool. +func (p *TxPool) FilterType(kind byte) bool { + for _, subpool := range p.subpools { + if subpool.FilterType(kind) { + return true + } + } + return false +} diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index d919ac8a5f..f024f3aeba 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -170,10 +170,10 @@ type TxFetcher struct { alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails // Callbacks - hasTx func(common.Hash) bool // Retrieves a tx from the local txpool - addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool - fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer - dropPeer func(string) // Drops a peer in case of announcement violation + validateMeta func(common.Hash, byte) error // Validate a tx metadata based on the local txpool + addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool + fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer + dropPeer func(string) // Drops a peer in case of announcement violation step chan struct{} // Notification channel when the fetcher loop iterates clock mclock.Clock // Monotonic clock or simulated clock for tests @@ -183,36 +183,36 @@ type TxFetcher struct { // NewTxFetcher creates a transaction fetcher to retrieve transaction // based on hash announcements. -func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher { - return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil) +func NewTxFetcher(validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher { + return NewTxFetcherForTests(validateMeta, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil) } // NewTxFetcherForTests is a testing method to mock out the realtime clock with // a simulated version and the internal randomness with a deterministic one. func NewTxFetcherForTests( - hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string), + validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string), clock mclock.Clock, realTime func() time.Time, rand *mrand.Rand) *TxFetcher { return &TxFetcher{ - notify: make(chan *txAnnounce), - cleanup: make(chan *txDelivery), - drop: make(chan *txDrop), - quit: make(chan struct{}), - waitlist: make(map[common.Hash]map[string]struct{}), - waittime: make(map[common.Hash]mclock.AbsTime), - waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq), - announces: make(map[string]map[common.Hash]*txMetadataWithSeq), - announced: make(map[common.Hash]map[string]struct{}), - fetching: make(map[common.Hash]string), - requests: make(map[string]*txRequest), - alternates: make(map[common.Hash]map[string]struct{}), - underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize), - hasTx: hasTx, - addTxs: addTxs, - fetchTxs: fetchTxs, - dropPeer: dropPeer, - clock: clock, - realTime: realTime, - rand: rand, + notify: make(chan *txAnnounce), + cleanup: make(chan *txDelivery), + drop: make(chan *txDrop), + quit: make(chan struct{}), + waitlist: make(map[common.Hash]map[string]struct{}), + waittime: make(map[common.Hash]mclock.AbsTime), + waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq), + announces: make(map[string]map[common.Hash]*txMetadataWithSeq), + announced: make(map[common.Hash]map[string]struct{}), + fetching: make(map[common.Hash]string), + requests: make(map[string]*txRequest), + alternates: make(map[common.Hash]map[string]struct{}), + underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize), + validateMeta: validateMeta, + addTxs: addTxs, + fetchTxs: fetchTxs, + dropPeer: dropPeer, + clock: clock, + realTime: realTime, + rand: rand, } } @@ -235,19 +235,26 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c underpriced int64 ) for i, hash := range hashes { - switch { - case f.hasTx(hash): + err := f.validateMeta(hash, types[i]) + if errors.Is(err, txpool.ErrAlreadyKnown) { duplicate++ - case f.isKnownUnderpriced(hash): - underpriced++ - default: - unknownHashes = append(unknownHashes, hash) - - // Transaction metadata has been available since eth68, and all - // legacy eth protocols (prior to eth68) have been deprecated. - // Therefore, metadata is always expected in the announcement. - unknownMetas = append(unknownMetas, txMetadata{kind: types[i], size: sizes[i]}) + continue } + if err != nil { + continue + } + + if f.isKnownUnderpriced(hash) { + underpriced++ + continue + } + + unknownHashes = append(unknownHashes, hash) + + // Transaction metadata has been available since eth68, and all + // legacy eth protocols (prior to eth68) have been deprecated. + // Therefore, metadata is always expected in the announcement. + unknownMetas = append(unknownMetas, txMetadata{kind: types[i], size: sizes[i]}) } txAnnounceKnownMeter.Mark(duplicate) txAnnounceUnderpricedMeter.Mark(underpriced) diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index bb41f62932..d6d5a8692e 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -93,7 +93,7 @@ func TestTransactionFetcherWaiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -295,7 +295,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -385,7 +385,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -490,7 +490,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(origin string, hashes []common.Hash) error { <-proceed @@ -574,7 +574,7 @@ func TestTransactionFetcherCleanup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -618,7 +618,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -661,7 +661,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -722,7 +722,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -771,7 +771,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -827,7 +827,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -897,7 +897,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -975,7 +975,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -1053,7 +1053,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -1083,7 +1083,7 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -1200,7 +1200,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -1267,7 +1267,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { errs := make([]error, len(txs)) for i := 0; i < len(errs); i++ { @@ -1368,7 +1368,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { testTransactionFetcher(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { errs := make([]error, len(txs)) for i := 0; i < len(errs); i++ { @@ -1400,7 +1400,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1459,7 +1459,7 @@ func TestTransactionFetcherDrop(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1533,7 +1533,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1579,7 +1579,7 @@ func TestInvalidAnnounceMetadata(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1662,7 +1662,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1690,7 +1690,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1720,7 +1720,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1759,7 +1759,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1794,7 +1794,7 @@ func TestBlobTransactionAnnounce(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, nil, func(string, []common.Hash) error { return nil }, nil, @@ -1862,7 +1862,7 @@ func TestTransactionFetcherDropAlternates(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { return NewTxFetcher( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, @@ -1908,6 +1908,35 @@ func TestTransactionFetcherDropAlternates(t *testing.T) { }) } +func TestTransactionFetcherWrongMetadata(t *testing.T) { + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(_ common.Hash, kind byte) error { + switch kind { + case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: + return nil + } + return types.ErrTxTypeNotSupported + }, + func(txs []*types.Transaction) []error { + return make([]error, len(txs)) + }, + func(string, []common.Hash) error { return nil }, + nil, + ) + }, + steps: []interface{}{ + doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{0xff, types.LegacyTxType}, sizes: []uint32{111, 222}}, + isWaiting(map[string][]announce{ + "A": { + {common.Hash{0x02}, types.LegacyTxType, 222}, + }, + }), + }, + }) +} + func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { t.Parallel() testTransactionFetcher(t, tt) @@ -2245,7 +2274,7 @@ func TestTransactionForgotten(t *testing.T) { } fetcher := NewTxFetcherForTests( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { errs := make([]error, len(txs)) for i := 0; i < len(errs); i++ { diff --git a/eth/handler.go b/eth/handler.go index 4510dd32f0..0d07e88c7a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -92,6 +92,9 @@ type txPool interface { // can decide whether to receive notifications only for newly seen transactions // or also for reorged out ones. SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription + + // FilterType returns whether the given tx type is supported by the txPool. + FilterType(kind byte) bool } // handlerConfig is the collection of initialization parameters to create a full @@ -176,7 +179,18 @@ func newHandler(config *handlerConfig) (*handler, error) { addTxs := func(txs []*types.Transaction) []error { return h.txpool.Add(txs, false) } - h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer) + + validateMeta := func(tx common.Hash, kind byte) error { + if h.txpool.Has(tx) { + return txpool.ErrAlreadyKnown + } + if !h.txpool.FilterType(kind) { + return types.ErrTxTypeNotSupported + } + return nil + } + + h.txFetcher = fetcher.NewTxFetcher(validateMeta, addTxs, fetchTx, h.removePeer) return h, nil } diff --git a/eth/handler_test.go b/eth/handler_test.go index 312e5625ba..3470452980 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -163,6 +163,15 @@ func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bo return p.txFeed.Subscribe(ch) } +// FilterType should check whether the pool supports the given type of transactions. +func (p *testTxPool) FilterType(kind byte) bool { + switch kind { + case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: + return true + } + return false +} + // testHandler is a live implementation of the Ethereum protocol handler, just // preinitialized with some sane testing defaults and the transaction pool mocked // out. diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go index c136253a62..3baff33dcc 100644 --- a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go +++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go @@ -78,7 +78,7 @@ func fuzz(input []byte) int { rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!! f := fetcher.NewTxFetcherForTests( - func(common.Hash) bool { return false }, + func(common.Hash, byte) error { return nil }, func(txs []*types.Transaction) []error { return make([]error, len(txs)) }, From 472e3a24ac17626cac07c52704ff7f5706260792 Mon Sep 17 00:00:00 2001 From: yyhrnk Date: Thu, 11 Dec 2025 09:42:32 +0200 Subject: [PATCH 186/277] core/stateless: cap witness depth metrics buckets (#33389) --- core/stateless/stats.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/core/stateless/stats.go b/core/stateless/stats.go index 94f5587f99..73ce031bff 100644 --- a/core/stateless/stats.go +++ b/core/stateless/stats.go @@ -62,10 +62,17 @@ func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) { // If current path is a prefix of the next path, it's not a leaf. // The last path is always a leaf. if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) { + depth := len(path) if owner == (common.Hash{}) { - s.accountTrieLeaves[len(path)] += 1 + if depth >= len(s.accountTrieLeaves) { + depth = len(s.accountTrieLeaves) - 1 + } + s.accountTrieLeaves[depth] += 1 } else { - s.storageTrieLeaves[len(path)] += 1 + if depth >= len(s.storageTrieLeaves) { + depth = len(s.storageTrieLeaves) - 1 + } + s.storageTrieLeaves[depth] += 1 } } } From 16f50285b75533977566b3fb14511fb51deb8195 Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Fri, 12 Dec 2025 06:46:53 +0100 Subject: [PATCH 187/277] cmd/utils: fix DeveloperFlag handling when set to false (#33379) geth --dev=false now correctly respects the false value, instead of incorrectly enabling UseLightweightKDF. --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index b73fa80b17..2b64761e00 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1430,7 +1430,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { cfg.KeyStoreDir = ctx.String(KeyStoreDirFlag.Name) } if ctx.IsSet(DeveloperFlag.Name) { - cfg.UseLightweightKDF = true + cfg.UseLightweightKDF = ctx.Bool(DeveloperFlag.Name) } if ctx.IsSet(LightKDFFlag.Name) { cfg.UseLightweightKDF = ctx.Bool(LightKDFFlag.Name) From 3a5560fa987c0b2d1a25ddace14111e8397120d2 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Sat, 13 Dec 2025 11:27:00 +0800 Subject: [PATCH 188/277] core/state: make test output message readable (#33400) --- core/state/statedb_hooked_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go index 4ff1023eb2..4d85e61679 100644 --- a/core/state/statedb_hooked_test.go +++ b/core/state/statedb_hooked_test.go @@ -129,7 +129,7 @@ func TestHooks(t *testing.T) { for i, want := range wants { if have := result[i]; have != want { - t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want) + t.Fatalf("error event %d\nhave: %v\nwant: %v", i, have, want) } } } @@ -165,7 +165,7 @@ func TestHooks_OnCodeChangeV2(t *testing.T) { for i, want := range wants { if have := result[i]; have != want { - t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want) + t.Fatalf("error event %d\nhave: %v\nwant: %v", i, have, want) } } } From a9eaf2ffd8596d58433b7816f4607c0c9faf60d1 Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Sat, 13 Dec 2025 06:09:07 +0200 Subject: [PATCH 189/277] crypto/signify: fix fuzz test compilation (#33402) The fuzz test file has been broken for a while - it doesn't compile with the `gofuzz` build tag. Two issues: - Line 59: called `SignifySignFile` which doesn't exist (should be `SignFile`) - Line 71: used `:=` instead of `=` for already declared `err` variable --- crypto/signify/signify_fuzz.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/signify/signify_fuzz.go b/crypto/signify/signify_fuzz.go index 239a2134df..d11125c697 100644 --- a/crypto/signify/signify_fuzz.go +++ b/crypto/signify/signify_fuzz.go @@ -56,7 +56,7 @@ func Fuzz(data []byte) int { fmt.Printf("untrusted: %v\n", untrustedComment) fmt.Printf("trusted: %v\n", trustedComment) - err = SignifySignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, untrustedComment, trustedComment) + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, untrustedComment, trustedComment) if err != nil { panic(err) } @@ -68,7 +68,7 @@ func Fuzz(data []byte) int { signify = path } - _, err := exec.LookPath(signify) + _, err = exec.LookPath(signify) if err != nil { panic(err) } From e20b05ec7fd1b1f8be5b0185e6f1698b3dfa87c4 Mon Sep 17 00:00:00 2001 From: Galoretka Date: Sun, 14 Dec 2025 22:51:13 +0200 Subject: [PATCH 190/277] core/overlay: fix incorrect debug log key/value in LoadTransitionState (#32637) --- core/overlay/state_transition.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/overlay/state_transition.go b/core/overlay/state_transition.go index 67ca0f9671..a52d9139c9 100644 --- a/core/overlay/state_transition.go +++ b/core/overlay/state_transition.go @@ -97,7 +97,7 @@ func LoadTransitionState(db ethdb.KeyValueReader, root common.Hash, isVerkle boo // Initialize the first transition state, with the "ended" // field set to true if the database was created // as a verkle database. - log.Debug("no transition state found, starting fresh", "is verkle", db) + log.Debug("no transition state found, starting fresh", "verkle", isVerkle) // Start with a fresh state ts = &TransitionState{Ended: isVerkle} From 15f52a29370cf44256b15ec2fc31c6eb1a227cfd Mon Sep 17 00:00:00 2001 From: Ng Wei Han <47109095+weiihann@users.noreply.github.com> Date: Mon, 15 Dec 2025 13:54:26 +0800 Subject: [PATCH 191/277] core/state: fix code existence not marked correctly (#33415) When iterating over a map with value types in Go, the loop variable is a copy. In `markCodeExistence`, assigning to `code.exists` modified only the local copy, not the actual map entry, causing the existence flag to always remain false. This resulted in overcounting contract codes in state size statistics, as codes that already existed in the database were incorrectly counted as new. Fix by changing `codes` from `map[common.Address]contractCode` to `map[common.Address]*contractCode`, so mutations apply directly to the struct. --- core/state/stateupdate.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go index 853ed09dad..c043166cf2 100644 --- a/core/state/stateupdate.go +++ b/core/state/stateupdate.go @@ -83,8 +83,8 @@ type stateUpdate struct { storagesOrigin map[common.Address]map[common.Hash][]byte rawStorageKey bool - codes map[common.Address]contractCode // codes contains the set of dirty codes - nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes + codes map[common.Address]*contractCode // codes contains the set of dirty codes + nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes } // empty returns a flag indicating the state transition is empty or not. @@ -104,7 +104,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash accountsOrigin = make(map[common.Address][]byte) storages = make(map[common.Hash]map[common.Hash][]byte) storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - codes = make(map[common.Address]contractCode) + codes = make(map[common.Address]*contractCode) ) // Since some accounts might be destroyed and recreated within the same // block, deletions must be aggregated first. @@ -126,7 +126,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash // Aggregate dirty contract codes if they are available. addr := op.address if op.code != nil { - codes[addr] = *op.code + codes[addr] = op.code } accounts[addrHash] = op.data From 6978ab48aa411fa9618648df9e0c4e2a84f2f7a0 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 15 Dec 2025 15:35:02 +0800 Subject: [PATCH 192/277] cmd/workload, eth/tracers/native: introduce state proof tests (#32247) This pull request introduces a new workload command, providing the features for `eth_getProof` endpoint test generation and execution. --- cmd/workload/README.md | 1 + cmd/workload/main.go | 1 + cmd/workload/prooftest.go | 105 +++++++++ cmd/workload/prooftestgen.go | 355 +++++++++++++++++++++++++++++ cmd/workload/testsuite.go | 18 ++ eth/tracers/native/prestate.go | 6 +- ethclient/gethclient/gethclient.go | 7 +- 7 files changed, 488 insertions(+), 5 deletions(-) create mode 100644 cmd/workload/prooftest.go create mode 100644 cmd/workload/prooftestgen.go diff --git a/cmd/workload/README.md b/cmd/workload/README.md index 1b84dd05db..ee1d6acbc9 100644 --- a/cmd/workload/README.md +++ b/cmd/workload/README.md @@ -34,4 +34,5 @@ the following commands (in this directory) against a synced mainnet node: > go run . filtergen --queries queries/filter_queries_mainnet.json http://host:8545 > go run . historygen --history-tests queries/history_mainnet.json http://host:8545 > go run . tracegen --trace-tests queries/trace_mainnet.json --trace-start 4000000 --trace-end 4000100 http://host:8545 +> go run . proofgen --proof-tests queries/proof_mainnet.json --proof-states 3000 http://host:8545 ``` diff --git a/cmd/workload/main.go b/cmd/workload/main.go index 8ac0e5b6cb..4ee894e962 100644 --- a/cmd/workload/main.go +++ b/cmd/workload/main.go @@ -48,6 +48,7 @@ func init() { historyGenerateCommand, filterGenerateCommand, traceGenerateCommand, + proofGenerateCommand, filterPerfCommand, filterFuzzCommand, } diff --git a/cmd/workload/prooftest.go b/cmd/workload/prooftest.go new file mode 100644 index 0000000000..dcc063d30e --- /dev/null +++ b/cmd/workload/prooftest.go @@ -0,0 +1,105 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/urfave/cli/v2" +) + +// proofTest is the content of a state-proof test. +type proofTest struct { + BlockNumbers []uint64 `json:"blockNumbers"` + Addresses [][]common.Address `json:"addresses"` + StorageKeys [][][]string `json:"storageKeys"` + Results [][]common.Hash `json:"results"` +} + +type proofTestSuite struct { + cfg testConfig + tests proofTest + invalidDir string +} + +func newProofTestSuite(cfg testConfig, ctx *cli.Context) *proofTestSuite { + s := &proofTestSuite{ + cfg: cfg, + invalidDir: ctx.String(proofTestInvalidOutputFlag.Name), + } + if err := s.loadTests(); err != nil { + exit(err) + } + return s +} + +func (s *proofTestSuite) loadTests() error { + file, err := s.cfg.fsys.Open(s.cfg.proofTestFile) + if err != nil { + // If not found in embedded FS, try to load it from disk + if !os.IsNotExist(err) { + return err + } + file, err = os.OpenFile(s.cfg.proofTestFile, os.O_RDONLY, 0666) + if err != nil { + return fmt.Errorf("can't open proofTestFile: %v", err) + } + } + defer file.Close() + if err := json.NewDecoder(file).Decode(&s.tests); err != nil { + return fmt.Errorf("invalid JSON in %s: %v", s.cfg.proofTestFile, err) + } + if len(s.tests.BlockNumbers) == 0 { + return fmt.Errorf("proofTestFile %s has no test data", s.cfg.proofTestFile) + } + return nil +} + +func (s *proofTestSuite) allTests() []workloadTest { + return []workloadTest{ + newArchiveWorkloadTest("Proof/GetProof", s.getProof), + } +} + +func (s *proofTestSuite) getProof(t *utesting.T) { + ctx := context.Background() + for i, blockNumber := range s.tests.BlockNumbers { + for j := 0; j < len(s.tests.Addresses[i]); j++ { + res, err := s.cfg.client.Geth.GetProof(ctx, s.tests.Addresses[i][j], s.tests.StorageKeys[i][j], big.NewInt(int64(blockNumber))) + if err != nil { + t.Errorf("State proving fails, blockNumber: %d, address: %x, keys: %v, err: %v\n", blockNumber, s.tests.Addresses[i][j], strings.Join(s.tests.StorageKeys[i][j], " "), err) + continue + } + blob, err := json.Marshal(res) + if err != nil { + t.Fatalf("State proving fails: error %v", err) + continue + } + if crypto.Keccak256Hash(blob) != s.tests.Results[i][j] { + t.Errorf("State proof mismatch, %d, number: %d, address: %x, keys: %v: invalid result", i, blockNumber, s.tests.Addresses[i][j], strings.Join(s.tests.StorageKeys[i][j], " ")) + } + } + } +} diff --git a/cmd/workload/prooftestgen.go b/cmd/workload/prooftestgen.go new file mode 100644 index 0000000000..5d92eea114 --- /dev/null +++ b/cmd/workload/prooftestgen.go @@ -0,0 +1,355 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see + +package main + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/eth/tracers/native" + "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/internal/testrand" + "github.com/ethereum/go-ethereum/log" + "github.com/urfave/cli/v2" +) + +var ( + proofGenerateCommand = &cli.Command{ + Name: "proofgen", + Usage: "Generates tests for state proof verification", + ArgsUsage: "", + Action: generateProofTests, + Flags: []cli.Flag{ + proofTestFileFlag, + proofTestResultOutputFlag, + proofTestStatesFlag, + proofTestStartBlockFlag, + proofTestEndBlockFlag, + }, + } + + proofTestFileFlag = &cli.StringFlag{ + Name: "proof-tests", + Usage: "JSON file containing proof test queries", + Value: "proof_tests.json", + Category: flags.TestingCategory, + } + proofTestResultOutputFlag = &cli.StringFlag{ + Name: "proof-output", + Usage: "Folder containing detailed trace output files", + Value: "", + Category: flags.TestingCategory, + } + proofTestStatesFlag = &cli.Int64Flag{ + Name: "proof-states", + Usage: "Number of states to generate proof against", + Value: 10000, + Category: flags.TestingCategory, + } + proofTestInvalidOutputFlag = &cli.StringFlag{ + Name: "proof-invalid", + Usage: "Folder containing the mismatched state proof output files", + Value: "", + Category: flags.TestingCategory, + } + proofTestStartBlockFlag = &cli.Uint64Flag{ + Name: "proof-start", + Usage: "The number of starting block for proof verification (included)", + Category: flags.TestingCategory, + } + proofTestEndBlockFlag = &cli.Uint64Flag{ + Name: "proof-end", + Usage: "The number of ending block for proof verification (excluded)", + Category: flags.TestingCategory, + } +) + +type proofGenerator func(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) + +func genAccountProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) { + var ( + blockNumbers []uint64 + accountAddresses [][]common.Address + storageKeys [][][]string + nAccounts int + ctx = context.Background() + start = time.Now() + ) + chainID, err := cli.Eth.ChainID(ctx) + if err != nil { + return nil, nil, nil, err + } + signer := types.LatestSignerForChainID(chainID) + + for { + if nAccounts >= number { + break + } + blockNumber := uint64(rand.Intn(int(endBlock-startBlock))) + startBlock + + block, err := cli.Eth.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + continue + } + var ( + addresses []common.Address + keys [][]string + gather = func(address common.Address) { + addresses = append(addresses, address) + keys = append(keys, nil) + nAccounts++ + } + ) + for _, tx := range block.Transactions() { + if nAccounts >= number { + break + } + sender, err := signer.Sender(tx) + if err != nil { + log.Error("Failed to resolve the sender address", "hash", tx.Hash(), "err", err) + continue + } + gather(sender) + + if tx.To() != nil { + gather(*tx.To()) + } + } + blockNumbers = append(blockNumbers, blockNumber) + accountAddresses = append(accountAddresses, addresses) + storageKeys = append(storageKeys, keys) + } + log.Info("Generated tests for account proof", "blocks", len(blockNumbers), "accounts", nAccounts, "elapsed", common.PrettyDuration(time.Since(start))) + return blockNumbers, accountAddresses, storageKeys, nil +} + +func genNonExistentAccountProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) { + var ( + blockNumbers []uint64 + accountAddresses [][]common.Address + storageKeys [][][]string + total int + ) + for i := 0; i < number/5; i++ { + var ( + addresses []common.Address + keys [][]string + blockNumber = uint64(rand.Intn(int(endBlock-startBlock))) + startBlock + ) + for j := 0; j < 5; j++ { + addresses = append(addresses, testrand.Address()) + keys = append(keys, nil) + } + total += len(addresses) + blockNumbers = append(blockNumbers, blockNumber) + accountAddresses = append(accountAddresses, addresses) + storageKeys = append(storageKeys, keys) + } + log.Info("Generated tests for non-existing account proof", "blocks", len(blockNumbers), "accounts", total) + return blockNumbers, accountAddresses, storageKeys, nil +} + +func genStorageProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) { + var ( + blockNumbers []uint64 + accountAddresses [][]common.Address + storageKeys [][][]string + + nAccounts int + nStorages int + start = time.Now() + ) + for { + if nAccounts+nStorages >= number { + break + } + blockNumber := uint64(rand.Intn(int(endBlock-startBlock))) + startBlock + + block, err := cli.Eth.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber))) + if err != nil { + continue + } + var ( + addresses []common.Address + slots [][]string + tracer = "prestateTracer" + configBlob, _ = json.Marshal(native.PrestateTracerConfig{ + DiffMode: false, + DisableCode: true, + DisableStorage: false, + }) + ) + for _, tx := range block.Transactions() { + if nAccounts+nStorages >= number { + break + } + if tx.To() == nil { + continue + } + ret, err := cli.Geth.TraceTransaction(context.Background(), tx.Hash(), &tracers.TraceConfig{ + Tracer: &tracer, + TracerConfig: configBlob, + }) + if err != nil { + log.Error("Failed to trace the transaction", "blockNumber", blockNumber, "hash", tx.Hash(), "err", err) + continue + } + blob, err := json.Marshal(ret) + if err != nil { + log.Error("Failed to marshal data", "err", err) + continue + } + var accounts map[common.Address]*types.Account + if err := json.Unmarshal(blob, &accounts); err != nil { + log.Error("Failed to decode trace result", "blockNumber", blockNumber, "hash", tx.Hash(), "err", err) + continue + } + for addr, account := range accounts { + if len(account.Storage) == 0 { + continue + } + addresses = append(addresses, addr) + nAccounts += 1 + + var keys []string + for k := range account.Storage { + keys = append(keys, k.Hex()) + } + nStorages += len(keys) + + var emptyKeys []string + for i := 0; i < 3; i++ { + emptyKeys = append(emptyKeys, testrand.Hash().Hex()) + } + nStorages += len(emptyKeys) + + slots = append(slots, append(keys, emptyKeys...)) + } + } + blockNumbers = append(blockNumbers, blockNumber) + accountAddresses = append(accountAddresses, addresses) + storageKeys = append(storageKeys, slots) + } + log.Info("Generated tests for storage proof", "blocks", len(blockNumbers), "accounts", nAccounts, "storages", nStorages, "elapsed", common.PrettyDuration(time.Since(start))) + return blockNumbers, accountAddresses, storageKeys, nil +} + +func genProofRequests(cli *client, startBlock, endBlock uint64, states int) (*proofTest, error) { + var ( + blockNumbers []uint64 + accountAddresses [][]common.Address + storageKeys [][][]string + ) + ratio := []float64{0.2, 0.1, 0.7} + for i, fn := range []proofGenerator{genAccountProof, genNonExistentAccountProof, genStorageProof} { + numbers, addresses, keys, err := fn(cli, startBlock, endBlock, int(float64(states)*ratio[i])) + if err != nil { + return nil, err + } + blockNumbers = append(blockNumbers, numbers...) + accountAddresses = append(accountAddresses, addresses...) + storageKeys = append(storageKeys, keys...) + } + return &proofTest{ + BlockNumbers: blockNumbers, + Addresses: accountAddresses, + StorageKeys: storageKeys, + }, nil +} + +func generateProofTests(clictx *cli.Context) error { + var ( + client = makeClient(clictx) + ctx = context.Background() + states = clictx.Int(proofTestStatesFlag.Name) + outputFile = clictx.String(proofTestFileFlag.Name) + outputDir = clictx.String(proofTestResultOutputFlag.Name) + startBlock = clictx.Uint64(proofTestStartBlockFlag.Name) + endBlock = clictx.Uint64(proofTestEndBlockFlag.Name) + ) + head, err := client.Eth.BlockNumber(ctx) + if err != nil { + exit(err) + } + if startBlock > head || endBlock > head { + return fmt.Errorf("chain is out of proof range, head %d, start: %d, limit: %d", head, startBlock, endBlock) + } + if endBlock == 0 { + endBlock = head + } + log.Info("Generating proof states", "startBlock", startBlock, "endBlock", endBlock, "states", states) + + test, err := genProofRequests(client, startBlock, endBlock, states) + if err != nil { + exit(err) + } + for i, blockNumber := range test.BlockNumbers { + var hashes []common.Hash + for j := 0; j < len(test.Addresses[i]); j++ { + res, err := client.Geth.GetProof(ctx, test.Addresses[i][j], test.StorageKeys[i][j], big.NewInt(int64(blockNumber))) + if err != nil { + log.Error("Failed to prove the state", "number", blockNumber, "address", test.Addresses[i][j], "slots", len(test.StorageKeys[i][j]), "err", err) + continue + } + blob, err := json.Marshal(res) + if err != nil { + return err + } + hashes = append(hashes, crypto.Keccak256Hash(blob)) + + writeStateProof(outputDir, blockNumber, test.Addresses[i][j], res) + } + test.Results = append(test.Results, hashes) + } + writeJSON(outputFile, test) + return nil +} + +func writeStateProof(dir string, blockNumber uint64, address common.Address, result any) { + if dir == "" { + return + } + // Ensure the directory exists + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + exit(fmt.Errorf("failed to create directories: %w", err)) + } + fname := fmt.Sprintf("%d-%x", blockNumber, address) + name := filepath.Join(dir, fname) + file, err := os.Create(name) + if err != nil { + exit(fmt.Errorf("error creating %s: %v", name, err)) + return + } + defer file.Close() + + data, _ := json.MarshalIndent(result, "", " ") + _, err = file.Write(data) + if err != nil { + exit(fmt.Errorf("error writing %s: %v", name, err)) + return + } +} diff --git a/cmd/workload/testsuite.go b/cmd/workload/testsuite.go index 25dc17a49e..80cbd15352 100644 --- a/cmd/workload/testsuite.go +++ b/cmd/workload/testsuite.go @@ -50,7 +50,9 @@ var ( filterQueryFileFlag, historyTestFileFlag, traceTestFileFlag, + proofTestFileFlag, traceTestInvalidOutputFlag, + proofTestInvalidOutputFlag, }, } testPatternFlag = &cli.StringFlag{ @@ -95,6 +97,7 @@ type testConfig struct { historyTestFile string historyPruneBlock *uint64 traceTestFile string + proofTestFile string } var errPrunedHistory = errors.New("attempt to access pruned history") @@ -145,6 +148,12 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) { } else { cfg.traceTestFile = "queries/trace_mainnet.json" } + if ctx.IsSet(proofTestFileFlag.Name) { + cfg.proofTestFile = ctx.String(proofTestFileFlag.Name) + } else { + cfg.proofTestFile = "queries/proof_mainnet.json" + } + cfg.historyPruneBlock = new(uint64) *cfg.historyPruneBlock = history.PrunePoints[params.MainnetGenesisHash].BlockNumber case ctx.Bool(testSepoliaFlag.Name): @@ -164,6 +173,12 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) { } else { cfg.traceTestFile = "queries/trace_sepolia.json" } + if ctx.IsSet(proofTestFileFlag.Name) { + cfg.proofTestFile = ctx.String(proofTestFileFlag.Name) + } else { + cfg.proofTestFile = "queries/proof_sepolia.json" + } + cfg.historyPruneBlock = new(uint64) *cfg.historyPruneBlock = history.PrunePoints[params.SepoliaGenesisHash].BlockNumber default: @@ -171,6 +186,7 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) { cfg.filterQueryFile = ctx.String(filterQueryFileFlag.Name) cfg.historyTestFile = ctx.String(historyTestFileFlag.Name) cfg.traceTestFile = ctx.String(traceTestFileFlag.Name) + cfg.proofTestFile = ctx.String(proofTestFileFlag.Name) } return cfg } @@ -222,11 +238,13 @@ func runTestCmd(ctx *cli.Context) error { filterSuite := newFilterTestSuite(cfg) historySuite := newHistoryTestSuite(cfg) traceSuite := newTraceTestSuite(cfg, ctx) + proofSuite := newProofTestSuite(cfg, ctx) // Filter test cases. tests := filterSuite.allTests() tests = append(tests, historySuite.allTests()...) tests = append(tests, traceSuite.allTests()...) + tests = append(tests, proofSuite.allTests()...) utests := filterTests(tests, ctx.String(testPatternFlag.Name), func(t workloadTest) bool { if t.Slow && !ctx.Bool(testSlowFlag.Name) { diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 2e446f729b..159a91b310 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -66,7 +66,7 @@ type prestateTracer struct { pre stateMap post stateMap to common.Address - config prestateTracerConfig + config PrestateTracerConfig chainConfig *params.ChainConfig interrupt atomic.Bool // Atomic flag to signal execution interruption reason error // Textual reason for the interruption @@ -74,7 +74,7 @@ type prestateTracer struct { deleted map[common.Address]bool } -type prestateTracerConfig struct { +type PrestateTracerConfig struct { DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications DisableCode bool `json:"disableCode"` // If true, this tracer will not return the contract code DisableStorage bool `json:"disableStorage"` // If true, this tracer will not return the contract storage @@ -82,7 +82,7 @@ type prestateTracerConfig struct { } func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) { - var config prestateTracerConfig + var config PrestateTracerConfig if err := json.Unmarshal(cfg, &config); err != nil { return nil, err } diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go index 6a0f5eb312..c2013bca2c 100644 --- a/ethclient/gethclient/gethclient.go +++ b/ethclient/gethclient/gethclient.go @@ -104,7 +104,10 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s var res accountResult err := ec.c.CallContext(ctx, &res, "eth_getProof", account, keys, toBlockNumArg(blockNumber)) - // Turn hexutils back to normal datatypes + if err != nil { + return nil, err + } + // Turn hexutils back to normal data types storageResults := make([]StorageResult, 0, len(res.StorageProof)) for _, st := range res.StorageProof { storageResults = append(storageResults, StorageResult{ @@ -122,7 +125,7 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s StorageHash: res.StorageHash, StorageProof: storageResults, } - return &result, err + return &result, nil } // CallContract executes a message call transaction, which is directly executed in the VM From 7aae33eacf523ab55b8e8fce253bdedae7553077 Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Wed, 17 Dec 2025 17:24:10 -0600 Subject: [PATCH 193/277] eth/catalyst: fix invalid timestamp log message (#33440) Fixes a typo in the NewPayload invalid timestamp warning where the parent timestamp was incorrectly logged as the block timestamp. --- eth/catalyst/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index d6d3f57936..7a8ba6a07a 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -757,7 +757,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe return api.delayPayloadImport(block), nil } if block.Time() <= parent.Time() { - log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time()) + log.Warn("Invalid timestamp", "parent", parent.Time(), "block", block.Time()) return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil } // Another corner case: if the node is in snap sync mode, but the CL client From ffe9dc97e5e04994ebcd72059c38e6e9ee4bd7fc Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 18 Dec 2025 17:24:02 +0800 Subject: [PATCH 194/277] core: add code read statistics (#33442) --- core/blockchain.go | 20 +++++++++----- core/blockchain_stats.go | 54 ++++++++++++++++++++++++++++---------- core/state/state_object.go | 10 +++++++ core/state/statedb.go | 2 ++ 4 files changed, 66 insertions(+), 20 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index ae92386dc2..9e4562eb44 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -75,6 +75,7 @@ var ( storageReadTimer = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil) storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil) storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil) + codeReadTimer = metrics.NewRegisteredResettingTimer("chain/code/reads", nil) accountCacheHitMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/hit", nil) accountCacheMissMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/miss", nil) @@ -88,6 +89,7 @@ var ( accountReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/account/single/reads", nil) storageReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/storage/single/reads", nil) + codeReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/code/single/reads", nil) snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil) triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil) @@ -1602,13 +1604,17 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // // Note all the components of block(hash->number map, header, body, receipts) // should be written atomically. BlockBatch is used for containing all components. - blockBatch := bc.db.NewBatch() - rawdb.WriteBlock(blockBatch, block) - rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) - rawdb.WritePreimages(blockBatch, statedb.Preimages()) - if err := blockBatch.Write(); err != nil { + var ( + batch = bc.db.NewBatch() + start = time.Now() + ) + rawdb.WriteBlock(batch, block) + rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) + rawdb.WritePreimages(batch, statedb.Preimages()) + if err := batch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } + log.Debug("Committed block data", "size", common.StorageSize(batch.ValueSize()), "elapsed", common.PrettyDuration(time.Since(start))) var ( err error @@ -2167,6 +2173,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation) stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation) stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation) + stats.CodeReads = statedb.CodeReads stats.AccountLoaded = statedb.AccountLoaded stats.AccountUpdated = statedb.AccountUpdated @@ -2174,8 +2181,9 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s stats.StorageLoaded = statedb.StorageLoaded stats.StorageUpdated = int(statedb.StorageUpdated.Load()) stats.StorageDeleted = int(statedb.StorageDeleted.Load()) + stats.CodeLoaded = statedb.CodeLoaded - stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing + stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation stats.CrossValidation = xvtime // The time spent on stateless cross validation diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go index 0cebebc20a..d52426d574 100644 --- a/core/blockchain_stats.go +++ b/core/blockchain_stats.go @@ -37,6 +37,7 @@ type ExecuteStats struct { AccountCommits time.Duration // Time spent on the account trie commit StorageUpdates time.Duration // Time spent on the storage trie update StorageCommits time.Duration // Time spent on the storage trie commit + CodeReads time.Duration // Time spent on the contract code read AccountLoaded int // Number of accounts loaded AccountUpdated int // Number of accounts updated @@ -44,6 +45,7 @@ type ExecuteStats struct { StorageLoaded int // Number of storage slots loaded StorageUpdated int // Number of storage slots updated StorageDeleted int // Number of storage slots deleted + CodeLoaded int // Number of contract code loaded Execution time.Duration // Time spent on the EVM execution Validation time.Duration // Time spent on the block validation @@ -61,19 +63,21 @@ type ExecuteStats struct { // reportMetrics uploads execution statistics to the metrics system. func (s *ExecuteStats) reportMetrics() { - accountReadTimer.Update(s.AccountReads) // Account reads are complete(in processing) - storageReadTimer.Update(s.StorageReads) // Storage reads are complete(in processing) if s.AccountLoaded != 0 { + accountReadTimer.Update(s.AccountReads) accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded)) } if s.StorageLoaded != 0 { + storageReadTimer.Update(s.StorageReads) storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded)) } - + if s.CodeLoaded != 0 { + codeReadTimer.Update(s.CodeReads) + codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded)) + } accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation) storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation) accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation) - accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them @@ -112,22 +116,44 @@ Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v EVM execution: %v Validation: %v -Account read: %v(%d) -Storage read: %v(%d) -Account hash: %v -Storage hash: %v -DB commit: %v -Block write: %v +State read: %v + Account read: %v(%d) + Storage read: %v(%d) + Code read: %v(%d) + +State hash: %v + Account hash: %v + Storage hash: %v + Trie commit: %v + +DB write: %v + State write: %v + Block write: %v %s ############################## `, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime), - common.PrettyDuration(s.Execution), common.PrettyDuration(s.Validation+s.CrossValidation), + common.PrettyDuration(s.Execution), + common.PrettyDuration(s.Validation+s.CrossValidation), + + // State read + common.PrettyDuration(s.AccountReads+s.StorageReads+s.CodeReads), common.PrettyDuration(s.AccountReads), s.AccountLoaded, common.PrettyDuration(s.StorageReads), s.StorageLoaded, - common.PrettyDuration(s.AccountHashes+s.AccountCommits+s.AccountUpdates), - common.PrettyDuration(s.StorageCommits+s.StorageUpdates), - common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), common.PrettyDuration(s.BlockWrite), + common.PrettyDuration(s.CodeReads), s.CodeLoaded, + + // State hash + common.PrettyDuration(s.AccountHashes+s.AccountUpdates+s.StorageUpdates+max(s.AccountCommits, s.StorageCommits)), + common.PrettyDuration(s.AccountHashes+s.AccountUpdates), + common.PrettyDuration(s.StorageUpdates), + common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)), + + // Database commit + common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite), + common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), + common.PrettyDuration(s.BlockWrite), + + // cache statistics s.StateReadCacheStats) for _, line := range strings.Split(msg, "\n") { if line == "" { diff --git a/core/state/state_object.go b/core/state/state_object.go index 8f2f323327..91623a838b 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -531,6 +531,11 @@ func (s *stateObject) Code() []byte { if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } + defer func(start time.Time) { + s.db.CodeLoaded += 1 + s.db.CodeReads += time.Since(start) + }(time.Now()) + code, err := s.db.reader.Code(s.address, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) @@ -552,6 +557,11 @@ func (s *stateObject) CodeSize() int { if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return 0 } + defer func(start time.Time) { + s.db.CodeLoaded += 1 + s.db.CodeReads += time.Since(start) + }(time.Now()) + size, err := s.db.reader.CodeSize(s.address, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) diff --git a/core/state/statedb.go b/core/state/statedb.go index 8d8ab00e48..7c6b8bbdfc 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -151,6 +151,7 @@ type StateDB struct { StorageCommits time.Duration SnapshotCommits time.Duration TrieDBCommits time.Duration + CodeReads time.Duration AccountLoaded int // Number of accounts retrieved from the database during the state transition AccountUpdated int // Number of accounts updated during the state transition @@ -158,6 +159,7 @@ type StateDB struct { StorageLoaded int // Number of storage slots retrieved from the database during the state transition StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition + CodeLoaded int // Number of contract code loaded during the state transition } // New creates a new state from a given trie. From bd77b77ede8a019a8cb7a3a4d9aae797fa330dd6 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 19 Dec 2025 03:33:07 +0800 Subject: [PATCH 195/277] core/txpool/blobpool: remove legacy sidecar conversion (#33352) This PR removes the legacy sidecar conversion logic. After the Osaka fork, the blobpool will accept only blob sidecar version 1. Any remaining version 0 blob transactions, if they still exist, will no longer be eligible for inclusion. Note that conversion at the RPC layer is still supported, and version 0 blob transactions will be automatically converted to version 1 there. --- core/txpool/blobpool/blobpool.go | 261 ++--------------------- core/txpool/blobpool/blobpool_test.go | 262 +----------------------- core/txpool/blobpool/conversion.go | 218 -------------------- core/txpool/blobpool/conversion_test.go | 171 ---------------- core/txpool/blobpool/lookup.go | 10 - core/txpool/validation.go | 11 +- eth/catalyst/api.go | 4 +- 7 files changed, 30 insertions(+), 907 deletions(-) delete mode 100644 core/txpool/blobpool/conversion.go delete mode 100644 core/txpool/blobpool/conversion_test.go diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index e49fe7bb61..e1a4960c8e 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -21,12 +21,10 @@ import ( "container/heap" "errors" "fmt" - "maps" "math" "math/big" "os" "path/filepath" - "slices" "sort" "sync" "sync/atomic" @@ -96,11 +94,6 @@ const ( // storeVersion is the current slotter layout used for the billy.Database // store. storeVersion = 1 - - // conversionTimeWindow defines the period after the Osaka fork during which - // the pool will still accept and convert legacy blob transactions. After this - // window, all legacy blob transactions will be rejected. - conversionTimeWindow = time.Hour * 2 ) // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and @@ -337,9 +330,8 @@ type BlobPool struct { stored uint64 // Useful data size of all transactions on disk limbo *limbo // Persistent data store for the non-finalized blobs - signer types.Signer // Transaction signer to use for sender recovery - chain BlockChain // Chain object to access the state through - cQueue *conversionQueue // The queue for performing legacy sidecar conversion (TODO: remove after Osaka) + signer types.Signer // Transaction signer to use for sender recovery + chain BlockChain // Chain object to access the state through head atomic.Pointer[types.Header] // Current head of the chain state *state.StateDB // Current state at the head of the chain @@ -368,7 +360,6 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo hasPendingAuth: hasPendingAuth, signer: types.LatestSigner(chain.Config()), chain: chain, - cQueue: newConversionQueue(), // Deprecate it after the osaka fork lookup: newLookup(), index: make(map[common.Address][]*blobTxMeta), spent: make(map[common.Address]*uint256.Int), @@ -490,9 +481,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser // Close closes down the underlying persistent store. func (p *BlobPool) Close() error { - // Terminate the conversion queue - p.cQueue.close() - var errs []error if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set if err := p.limbo.Close(); err != nil { @@ -890,172 +878,6 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { basefeeGauge.Update(int64(basefee.Uint64())) blobfeeGauge.Update(int64(blobfee.Uint64())) p.updateStorageMetrics() - - // Perform the conversion logic at the fork boundary - if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) { - // Deep copy all indexed transaction metadata. - var ( - ids = make(map[common.Address]map[uint64]uint64) - txs = make(map[common.Address]map[uint64]common.Hash) - ) - for sender, list := range p.index { - ids[sender] = make(map[uint64]uint64) - txs[sender] = make(map[uint64]common.Hash) - for _, m := range list { - ids[sender][m.nonce] = m.id - txs[sender][m.nonce] = m.hash - } - } - // Initiate the background conversion thread. - p.cQueue.launchBillyConversion(func() { - p.convertLegacySidecars(ids, txs) - }) - } -} - -// compareAndSwap checks if the specified transaction is still tracked in the pool -// and replace the metadata accordingly. It should only be used in the fork boundary -// bulk conversion. If it fails for some reason, the subsequent txs won't be dropped -// for simplicity which we assume it's very likely to happen. -// -// The returned flag indicates whether the replacement succeeded. -func (p *BlobPool) compareAndSwap(address common.Address, hash common.Hash, blob []byte, oldID uint64, oldStorageSize uint32) bool { - p.lock.Lock() - defer p.lock.Unlock() - - newId, err := p.store.Put(blob) - if err != nil { - log.Error("Failed to store transaction", "hash", hash, "err", err) - return false - } - newSize := uint64(len(blob)) - newStorageSize := p.store.Size(newId) - - // Terminate the procedure if the transaction was already evicted. The - // newly added blob should be removed before return. - if !p.lookup.update(hash, newId, newSize) { - if derr := p.store.Delete(newId); derr != nil { - log.Error("Failed to delete the dangling blob tx", "err", derr) - } else { - log.Warn("Deleted the dangling blob tx", "id", newId) - } - return false - } - // Update the metadata of blob transaction - for _, meta := range p.index[address] { - if meta.hash == hash { - meta.id = newId - meta.version = types.BlobSidecarVersion1 - meta.storageSize = newStorageSize - meta.size = newSize - - p.stored += uint64(newStorageSize) - p.stored -= uint64(oldStorageSize) - break - } - } - if err := p.store.Delete(oldID); err != nil { - log.Error("Failed to delete the legacy transaction", "hash", hash, "id", oldID, "err", err) - } - return true -} - -// convertLegacySidecar fetches transaction data from the store, performs an -// on-the-fly conversion. This function is intended for use only during the -// Osaka fork transition period. -// -// The returned flag indicates whether the replacement succeeds or not. -func (p *BlobPool) convertLegacySidecar(sender common.Address, hash common.Hash, id uint64) bool { - start := time.Now() - - // Retrieves the legacy blob transaction from the underlying store with - // read lock held, preventing any potential data race around the slot - // specified by the id. - p.lock.RLock() - data, err := p.store.Get(id) - if err != nil { - p.lock.RUnlock() - // The transaction may have been evicted simultaneously, safe to skip conversion. - log.Debug("Blob transaction is missing", "hash", hash, "id", id, "err", err) - return false - } - oldStorageSize := p.store.Size(id) - p.lock.RUnlock() - - // Decode the transaction, the failure is not expected and report the error - // loudly if possible. If the blob transaction in this slot is corrupted, - // leave it in the store, it will be dropped during the next pool - // initialization. - var tx types.Transaction - if err = rlp.DecodeBytes(data, &tx); err != nil { - log.Error("Blob transaction is corrupted", "hash", hash, "id", id, "err", err) - return false - } - - // Skip conversion if the transaction does not match the expected hash, or if it was - // already converted. This can occur if the original transaction was evicted from the - // pool and the slot was reused by a new one. - if tx.Hash() != hash { - log.Warn("Blob transaction was replaced", "hash", hash, "id", id, "stored", tx.Hash()) - return false - } - sc := tx.BlobTxSidecar() - if sc.Version >= types.BlobSidecarVersion1 { - log.Debug("Skipping conversion of blob tx", "hash", hash, "id", id) - return false - } - - // Perform the sidecar conversion, the failure is not expected and report the error - // loudly if possible. - if err := tx.BlobTxSidecar().ToV1(); err != nil { - log.Error("Failed to convert blob transaction", "hash", hash, "err", err) - return false - } - - // Encode the converted transaction, the failure is not expected and report - // the error loudly if possible. - blob, err := rlp.EncodeToBytes(&tx) - if err != nil { - log.Error("Failed to encode blob transaction", "hash", tx.Hash(), "err", err) - return false - } - - // Replace the legacy blob transaction with the converted format. - if !p.compareAndSwap(sender, hash, blob, id, oldStorageSize) { - log.Error("Failed to replace the legacy transaction", "hash", hash) - return false - } - log.Debug("Converted legacy blob transaction", "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) - return true -} - -// convertLegacySidecars converts all given transactions to sidecar version 1. -// -// If any of them fails to be converted, the subsequent transactions will still -// be processed, as we assume the failure is very unlikely to happen. If happens, -// these transactions will be stuck in the pool until eviction. -func (p *BlobPool) convertLegacySidecars(ids map[common.Address]map[uint64]uint64, txs map[common.Address]map[uint64]common.Hash) { - var ( - start = time.Now() - success int - failure int - ) - for addr, list := range txs { - // Transactions evicted from the pool must be contiguous, if in any case, - // the transactions are gapped with each other, they will be discarded. - nonces := slices.Collect(maps.Keys(list)) - slices.Sort(nonces) - - // Convert the txs with nonce order - for _, nonce := range nonces { - if p.convertLegacySidecar(addr, list[nonce], ids[addr][nonce]) { - success++ - } else { - failure++ - } - } - } - log.Info("Completed blob transaction conversion", "discarded", failure, "injected", success, "elapsed", common.PrettyDuration(time.Since(start))) } // reorg assembles all the transactors and missing transactions between an old @@ -1535,8 +1357,8 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata { // // The version argument specifies the type of proofs to return, either the // blob proofs (version 0) or the cell proofs (version 1). Proofs conversion is -// CPU intensive, so only done if explicitly requested with the convert flag. -func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) { +// CPU intensive and prohibited in the blobpool explicitly. +func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) { var ( blobs = make([]*kzg4844.Blob, len(vhashes)) commitments = make([]kzg4844.Commitment, len(vhashes)) @@ -1587,7 +1409,7 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ( } // Mark hash as seen. filled[hash] = struct{}{} - if sidecar.Version != version && !convert { + if sidecar.Version != version { // Skip blobs with incompatible version. Note we still track the blob hash // in `filled` here, ensuring that we do not resolve this tx another time. continue @@ -1596,29 +1418,13 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ( var pf []kzg4844.Proof switch version { case types.BlobSidecarVersion0: - if sidecar.Version == types.BlobSidecarVersion0 { - pf = []kzg4844.Proof{sidecar.Proofs[i]} - } else { - proof, err := kzg4844.ComputeBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i]) - if err != nil { - return nil, nil, nil, err - } - pf = []kzg4844.Proof{proof} - } + pf = []kzg4844.Proof{sidecar.Proofs[i]} case types.BlobSidecarVersion1: - if sidecar.Version == types.BlobSidecarVersion0 { - cellProofs, err := kzg4844.ComputeCellProofs(&sidecar.Blobs[i]) - if err != nil { - return nil, nil, nil, err - } - pf = cellProofs - } else { - cellProofs, err := sidecar.CellProofsAt(i) - if err != nil { - return nil, nil, nil, err - } - pf = cellProofs + cellProofs, err := sidecar.CellProofsAt(i) + if err != nil { + return nil, nil, nil, err } + pf = cellProofs } for _, index := range list { blobs[index] = &sidecar.Blobs[i] @@ -1645,56 +1451,15 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int { return available } -// preCheck performs the static validation upon the provided tx and converts -// the legacy sidecars if Osaka fork has been activated with a short time window. -// -// This function is pure static and lock free. -func (p *BlobPool) preCheck(tx *types.Transaction) error { - var ( - head = p.head.Load() - isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time) - deadline time.Time - ) - if isOsaka { - deadline = time.Unix(int64(*p.chain.Config().OsakaTime), 0).Add(conversionTimeWindow) - } - // Validate the transaction statically at first to avoid unnecessary - // conversion. This step doesn't require lock protection. - if err := p.ValidateTxBasics(tx); err != nil { - return err - } - // Before the Osaka fork, reject the blob txs with cell proofs - if !isOsaka { - if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { - return nil - } else { - return errors.New("cell proof is not supported yet") - } - } - // After the Osaka fork, reject the legacy blob txs if the conversion - // time window is passed. - if tx.BlobTxSidecar().Version == types.BlobSidecarVersion1 { - return nil - } - if head.Time > uint64(deadline.Unix()) { - return errors.New("legacy blob tx is not supported") - } - // Convert the legacy sidecar after Osaka fork. This could be a long - // procedure which takes a few seconds, even minutes if there is a long - // queue. Fortunately it will only block the routine of the source peer - // announcing the tx, without affecting other parts. - return p.cQueue.convert(tx) -} - // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restrictions). func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error { var ( - errs []error = make([]error, len(txs)) - adds = make([]*types.Transaction, 0, len(txs)) + errs = make([]error, len(txs)) + adds = make([]*types.Transaction, 0, len(txs)) ) for i, tx := range txs { - if errs[i] = p.preCheck(tx); errs[i] != nil { + if errs[i] = p.ValidateTxBasics(tx); errs[i] != nil { continue } if errs[i] = p.add(tx); errs[i] == nil { diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 2fa1927cae..eda87008c3 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -92,10 +92,6 @@ type testBlockChain struct { blockTime *uint64 } -func (bc *testBlockChain) setHeadTime(time uint64) { - bc.blockTime = &time -} - func (bc *testBlockChain) Config() *params.ChainConfig { return bc.config } @@ -433,11 +429,11 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) { hashes = append(hashes, tx.vhashes...) } } - blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0, false) + blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0) if err != nil { t.Fatal(err) } - blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1, false) + blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1) if err != nil { t.Fatal(err) } @@ -1329,7 +1325,7 @@ func TestBlobCountLimit(t *testing.T) { // Check that first succeeds second fails. if errs[0] != nil { - t.Fatalf("expected tx with 7 blobs to succeed") + t.Fatalf("expected tx with 7 blobs to succeed, got %v", errs[0]) } if !errors.Is(errs[1], txpool.ErrTxBlobLimitExceeded) { t.Fatalf("expected tx with 8 blobs to fail, got: %v", errs[1]) @@ -1806,66 +1802,6 @@ func TestAdd(t *testing.T) { } } -// Tests that transactions with legacy sidecars are accepted within the -// conversion window but rejected after it has passed. -func TestAddLegacyBlobTx(t *testing.T) { - testAddLegacyBlobTx(t, true) // conversion window has not yet passed - testAddLegacyBlobTx(t, false) // conversion window passed -} - -func testAddLegacyBlobTx(t *testing.T, accept bool) { - var ( - key1, _ = crypto.GenerateKey() - key2, _ = crypto.GenerateKey() - - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - ) - - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) - statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) - statedb.Commit(0, true, false) - - chain := &testBlockChain{ - config: params.MergedTestChainConfig, - basefee: uint256.NewInt(1050), - blobfee: uint256.NewInt(105), - statedb: statedb, - } - var timeDiff uint64 - if accept { - timeDiff = uint64(conversionTimeWindow.Seconds()) - 1 - } else { - timeDiff = uint64(conversionTimeWindow.Seconds()) + 1 - } - time := *params.MergedTestChainConfig.OsakaTime + timeDiff - chain.setHeadTime(time) - - pool := New(Config{Datadir: t.TempDir()}, chain, nil) - if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - - // Attempt to add legacy blob transactions. - var ( - tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) - tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0) - txs = []*types.Transaction{tx1, tx2} - ) - errs := pool.Add(txs, true) - for _, err := range errs { - if accept && err != nil { - t.Fatalf("expected tx add to succeed, %v", err) - } - if !accept && err == nil { - t.Fatal("expected tx add to fail") - } - } - verifyPoolInternals(t, pool) - pool.Close() -} - func TestGetBlobs(t *testing.T) { //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) @@ -1952,7 +1888,6 @@ func TestGetBlobs(t *testing.T) { limit int fillRandom bool // Whether to randomly fill some of the requested blobs with unknowns version byte // Blob sidecar version to request - convert bool // Whether to convert version on retrieval }{ { start: 0, limit: 6, @@ -2018,11 +1953,6 @@ func TestGetBlobs(t *testing.T) { start: 0, limit: 18, fillRandom: true, version: types.BlobSidecarVersion1, }, - { - start: 0, limit: 18, fillRandom: true, - version: types.BlobSidecarVersion1, - convert: true, // Convert some version 0 blobs to version 1 while retrieving - }, } for i, c := range cases { var ( @@ -2044,7 +1974,7 @@ func TestGetBlobs(t *testing.T) { filled[len(vhashes)] = struct{}{} vhashes = append(vhashes, testrand.Hash()) } - blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version, c.convert) + blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version) if err != nil { t.Errorf("Unexpected error for case %d, %v", i, err) } @@ -2070,8 +2000,7 @@ func TestGetBlobs(t *testing.T) { // If an item is missing, but shouldn't, error if blobs[j] == nil || proofs[j] == nil { // This is only an error if there was no version mismatch - if c.convert || - (c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) || + if (c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) || (c.version == types.BlobSidecarVersion0 && (testBlobIndex < 6 || 12 <= testBlobIndex)) { t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j]) } @@ -2098,185 +2027,6 @@ func TestGetBlobs(t *testing.T) { pool.Close() } -// TestSidecarConversion will verify that after the Osaka fork, all legacy -// sidecars in the pool are successfully convert to v1 sidecars. -func TestSidecarConversion(t *testing.T) { - // log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) - - // Create a temporary folder for the persistent backend - storage := t.TempDir() - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - - var ( - preOsakaTxs = make(types.Transactions, 10) - postOsakaTxs = make(types.Transactions, 3) - keys = make([]*ecdsa.PrivateKey, len(preOsakaTxs)+len(postOsakaTxs)) - addrs = make([]common.Address, len(preOsakaTxs)+len(postOsakaTxs)) - statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - ) - for i := range keys { - keys[i], _ = crypto.GenerateKey() - addrs[i] = crypto.PubkeyToAddress(keys[i].PublicKey) - statedb.AddBalance(addrs[i], uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) - } - for i := range preOsakaTxs { - preOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 2, 0, keys[i], types.BlobSidecarVersion0) - } - for i := range postOsakaTxs { - if i == 0 { - // First has a v0 sidecar. - postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion0) - } - postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion1) - } - statedb.Commit(0, true, false) - - // Test plan: - // 1) Create a bunch v0 sidecar txs and add to pool before Osaka. - // 2) Pass in new Osaka header to activate the conversion thread. - // 3) Continue adding both v0 and v1 transactions to the pool. - // 4) Verify that as additional blocks come in, transactions involved in the - // migration are correctly discarded. - - config := ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - LondonBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - CancunTime: newUint64(0), - PragueTime: newUint64(0), - OsakaTime: newUint64(1), - BlobScheduleConfig: params.DefaultBlobSchedule, - } - chain := &testBlockChain{ - config: config, - basefee: uint256.NewInt(1050), - blobfee: uint256.NewInt(105), - statedb: statedb, - blocks: make(map[uint64]*types.Block), - } - - // Create 3 blocks: - // - the current block, before Osaka - // - the first block after Osaka - // - another post-Osaka block with several transactions in it - header0 := chain.CurrentBlock() - header0.Time = 0 - chain.blocks[0] = types.NewBlockWithHeader(header0) - - header1 := chain.CurrentBlock() - header1.Number = big.NewInt(1) - header1.Time = 1 - chain.blocks[1] = types.NewBlockWithHeader(header1) - - header2 := chain.CurrentBlock() - header2.Time = 2 - header2.Number = big.NewInt(2) - - // Make a copy of one of the pre-Osaka transactions and convert it to v1 here - // so that we can add it to the pool later and ensure a duplicate is not added - // by the conversion queue. - tx := preOsakaTxs[len(preOsakaTxs)-1] - sc := *tx.BlobTxSidecar() // copy sidecar - sc.ToV1() - tx.WithBlobTxSidecar(&sc) - - block2 := types.NewBlockWithHeader(header2).WithBody(types.Body{Transactions: append(postOsakaTxs, tx)}) - chain.blocks[2] = block2 - - pool := New(Config{Datadir: storage}, chain, nil) - if err := pool.Init(1, header0, newReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - - errs := pool.Add(preOsakaTxs, true) - for i, err := range errs { - if err != nil { - t.Errorf("failed to insert blob tx from %s: %s", addrs[i], errs[i]) - } - } - - // Kick off migration. - pool.Reset(header0, header1) - - // Add the v0 sidecar tx, but don't block so we can keep doing other stuff - // while it converts the sidecar. - addDone := make(chan struct{}) - go func() { - pool.Add(types.Transactions{postOsakaTxs[0]}, false) - close(addDone) - }() - - // Add the post-Osaka v1 sidecar txs. - errs = pool.Add(postOsakaTxs[1:], false) - for _, err := range errs { - if err != nil { - t.Fatalf("expected tx add to succeed: %v", err) - } - } - - // Wait for the first tx's conversion to complete, then check that all - // transactions added after Osaka can be accounted for in the pool. - <-addDone - pending := pool.Pending(txpool.PendingFilter{BlobTxs: true, BlobVersion: types.BlobSidecarVersion1}) - for _, tx := range postOsakaTxs { - from, _ := pool.signer.Sender(tx) - if len(pending[from]) != 1 || pending[from][0].Hash != tx.Hash() { - t.Fatalf("expected post-Osaka txs to be pending") - } - } - - // Now update the pool with the next block. This should cause the pool to - // clear out the post-Osaka txs since they were included in block 2. Since the - // test blockchain doesn't manage nonces, we'll just do that manually before - // the reset is called. Don't forget about the pre-Osaka transaction we also - // added to block 2! - for i := range postOsakaTxs { - statedb.SetNonce(addrs[len(preOsakaTxs)+i], 1, tracing.NonceChangeEoACall) - } - statedb.SetNonce(addrs[len(preOsakaTxs)-1], 1, tracing.NonceChangeEoACall) - pool.Reset(header1, block2.Header()) - - // Now verify no post-Osaka transactions are tracked by the pool. - for i, tx := range postOsakaTxs { - if pool.Get(tx.Hash()) != nil { - t.Fatalf("expected txs added post-osaka to have been placed in limbo due to inclusion in a block: index %d, hash %s", i, tx.Hash()) - } - } - - // Wait for the pool migration to complete. - <-pool.cQueue.anyBillyConversionDone - - // Verify all transactions in the pool were converted and verify the - // subsequent cell proofs. - count, _ := pool.Stats() - if count != len(preOsakaTxs)-1 { - t.Errorf("expected pending count to match initial tx count: pending=%d, expected=%d", count, len(preOsakaTxs)-1) - } - for addr, acc := range pool.index { - for _, m := range acc { - if m.version != types.BlobSidecarVersion1 { - t.Errorf("expected sidecar to have been converted: from %s, hash %s", addr, m.hash) - } - tx := pool.Get(m.hash) - if tx == nil { - t.Errorf("failed to get tx by hash: %s", m.hash) - } - sc := tx.BlobTxSidecar() - if err := kzg4844.VerifyCellProofs(sc.Blobs, sc.Commitments, sc.Proofs); err != nil { - t.Errorf("failed to verify cell proofs for tx %s after conversion: %s", m.hash, err) - } - } - } - - verifyPoolInternals(t, pool) - - // Launch conversion a second time. - // This is just a sanity check to ensure we can handle it. - pool.Reset(header0, header1) - - pool.Close() -} - // fakeBilly is a billy.Database implementation which just drops data on the floor. type fakeBilly struct { billy.Database @@ -2360,5 +2110,3 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) { } } } - -func newUint64(val uint64) *uint64 { return &val } diff --git a/core/txpool/blobpool/conversion.go b/core/txpool/blobpool/conversion.go deleted file mode 100644 index afdc10554f..0000000000 --- a/core/txpool/blobpool/conversion.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2025 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "errors" - "slices" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" -) - -// maxPendingConversionTasks caps the number of pending conversion tasks. This -// prevents excessive memory usage; the worst-case scenario (2k transactions -// with 6 blobs each) would consume approximately 1.5GB of memory. -const maxPendingConversionTasks = 2048 - -// txConvert represents a conversion task with an attached legacy blob transaction. -type txConvert struct { - tx *types.Transaction // Legacy blob transaction - done chan error // Channel for signaling back if the conversion succeeds -} - -// conversionQueue is a dedicated queue for converting legacy blob transactions -// received from the network after the Osaka fork. Since conversion is expensive, -// it is performed in the background by a single thread, ensuring the main Geth -// process is not overloaded. -type conversionQueue struct { - tasks chan *txConvert - startBilly chan func() - quit chan struct{} - closed chan struct{} - - billyQueue []func() - billyTaskDone chan struct{} - - // This channel will be closed when the first billy conversion finishes. - // It's added for unit tests to synchronize with the conversion progress. - anyBillyConversionDone chan struct{} -} - -// newConversionQueue constructs the conversion queue. -func newConversionQueue() *conversionQueue { - q := &conversionQueue{ - tasks: make(chan *txConvert), - startBilly: make(chan func()), - quit: make(chan struct{}), - closed: make(chan struct{}), - anyBillyConversionDone: make(chan struct{}), - } - go q.loop() - return q -} - -// convert accepts a legacy blob transaction with version-0 blobs and queues it -// for conversion. -// -// This function may block for a long time until the transaction is processed. -func (q *conversionQueue) convert(tx *types.Transaction) error { - done := make(chan error, 1) - select { - case q.tasks <- &txConvert{tx: tx, done: done}: - return <-done - case <-q.closed: - return errors.New("conversion queue closed") - } -} - -// launchBillyConversion starts a conversion task in the background. -func (q *conversionQueue) launchBillyConversion(fn func()) error { - select { - case q.startBilly <- fn: - return nil - case <-q.closed: - return errors.New("conversion queue closed") - } -} - -// close terminates the conversion queue. -func (q *conversionQueue) close() { - select { - case <-q.closed: - return - default: - close(q.quit) - <-q.closed - } -} - -// run converts a batch of legacy blob txs to the new cell proof format. -func (q *conversionQueue) run(tasks []*txConvert, done chan struct{}, interrupt *atomic.Int32) { - defer close(done) - - for _, t := range tasks { - if interrupt != nil && interrupt.Load() != 0 { - t.done <- errors.New("conversion is interrupted") - continue - } - sidecar := t.tx.BlobTxSidecar() - if sidecar == nil { - t.done <- errors.New("tx without sidecar") - continue - } - // Run the conversion, the original sidecar will be mutated in place - start := time.Now() - err := sidecar.ToV1() - t.done <- err - log.Trace("Converted legacy blob tx", "hash", t.tx.Hash(), "err", err, "elapsed", common.PrettyDuration(time.Since(start))) - } -} - -func (q *conversionQueue) loop() { - defer close(q.closed) - - var ( - done chan struct{} // Non-nil if background routine is active - interrupt *atomic.Int32 // Flag to signal conversion interruption - - // The pending tasks for sidecar conversion. We assume the number of legacy - // blob transactions requiring conversion will not be excessive. However, - // a hard cap is applied as a protective measure. - txTasks []*txConvert - - firstBilly = true - ) - - for { - select { - case t := <-q.tasks: - if len(txTasks) >= maxPendingConversionTasks { - t.done <- errors.New("conversion queue is overloaded") - continue - } - txTasks = append(txTasks, t) - - // Launch the background conversion thread if it's idle - if done == nil { - done, interrupt = make(chan struct{}), new(atomic.Int32) - - tasks := slices.Clone(txTasks) - txTasks = txTasks[:0] - go q.run(tasks, done, interrupt) - } - - case <-done: - done, interrupt = nil, nil - if len(txTasks) > 0 { - done, interrupt = make(chan struct{}), new(atomic.Int32) - tasks := slices.Clone(txTasks) - txTasks = txTasks[:0] - go q.run(tasks, done, interrupt) - } - - case fn := <-q.startBilly: - q.billyQueue = append(q.billyQueue, fn) - q.runNextBillyTask() - - case <-q.billyTaskDone: - if firstBilly { - close(q.anyBillyConversionDone) - firstBilly = false - } - q.runNextBillyTask() - - case <-q.quit: - if done != nil { - log.Debug("Waiting for blob proof conversion to exit") - interrupt.Store(1) - <-done - } - if q.billyTaskDone != nil { - log.Debug("Waiting for blobpool billy conversion to exit") - <-q.billyTaskDone - } - // Signal any tasks that were queued for the next batch but never started - // so callers blocked in convert() receive an error instead of hanging. - for _, t := range txTasks { - // Best-effort notify; t.done is a buffered channel of size 1 - // created by convert(), and we send exactly once per task. - t.done <- errors.New("conversion queue closed") - } - // Drop references to allow GC of the backing array. - txTasks = txTasks[:0] - return - } - } -} - -func (q *conversionQueue) runNextBillyTask() { - if len(q.billyQueue) == 0 { - q.billyTaskDone = nil - return - } - - fn := q.billyQueue[0] - q.billyQueue = append(q.billyQueue[:0], q.billyQueue[1:]...) - - done := make(chan struct{}) - go func() { defer close(done); fn() }() - q.billyTaskDone = done -} diff --git a/core/txpool/blobpool/conversion_test.go b/core/txpool/blobpool/conversion_test.go deleted file mode 100644 index 7ffffb2e4d..0000000000 --- a/core/txpool/blobpool/conversion_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2025 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "crypto/ecdsa" - "crypto/sha256" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" -) - -// createV1BlobTx creates a blob transaction with version 1 sidecar for testing. -func createV1BlobTx(nonce uint64, key *ecdsa.PrivateKey) *types.Transaction { - blob := &kzg4844.Blob{byte(nonce)} - commitment, _ := kzg4844.BlobToCommitment(blob) - cellProofs, _ := kzg4844.ComputeCellProofs(blob) - - blobtx := &types.BlobTx{ - ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), - Nonce: nonce, - GasTipCap: uint256.NewInt(1), - GasFeeCap: uint256.NewInt(1000), - Gas: 21000, - BlobFeeCap: uint256.NewInt(100), - BlobHashes: []common.Hash{kzg4844.CalcBlobHashV1(sha256.New(), &commitment)}, - Value: uint256.NewInt(100), - Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion1, []kzg4844.Blob{*blob}, []kzg4844.Commitment{commitment}, cellProofs), - } - return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) -} - -func TestConversionQueueBasic(t *testing.T) { - queue := newConversionQueue() - defer queue.close() - - key, _ := crypto.GenerateKey() - tx := makeTx(0, 1, 1, 1, key) - if err := queue.convert(tx); err != nil { - t.Fatalf("Expected successful conversion, got error: %v", err) - } - if tx.BlobTxSidecar().Version != types.BlobSidecarVersion1 { - t.Errorf("Expected sidecar version to be %d, got %d", types.BlobSidecarVersion1, tx.BlobTxSidecar().Version) - } -} - -func TestConversionQueueV1BlobTx(t *testing.T) { - queue := newConversionQueue() - defer queue.close() - - key, _ := crypto.GenerateKey() - tx := createV1BlobTx(0, key) - version := tx.BlobTxSidecar().Version - - err := queue.convert(tx) - if err != nil { - t.Fatalf("Expected successful conversion, got error: %v", err) - } - if tx.BlobTxSidecar().Version != version { - t.Errorf("Expected sidecar version to remain %d, got %d", version, tx.BlobTxSidecar().Version) - } -} - -func TestConversionQueueClosed(t *testing.T) { - queue := newConversionQueue() - - // Close the queue first - queue.close() - key, _ := crypto.GenerateKey() - tx := makeTx(0, 1, 1, 1, key) - - err := queue.convert(tx) - if err == nil { - t.Fatal("Expected error when converting on closed queue, got nil") - } -} - -func TestConversionQueueDoubleClose(t *testing.T) { - queue := newConversionQueue() - queue.close() - queue.close() // Should not panic -} - -func TestConversionQueueAutoRestartBatch(t *testing.T) { - queue := newConversionQueue() - defer queue.close() - - key, _ := crypto.GenerateKey() - - // Create a heavy transaction to ensure the first batch runs long enough - // for subsequent tasks to be queued while it is active. - heavy := makeMultiBlobTx(0, 1, 1, 1, int(params.BlobTxMaxBlobs), 0, key, types.BlobSidecarVersion0) - - var wg sync.WaitGroup - wg.Add(1) - heavyDone := make(chan error, 1) - go func() { - defer wg.Done() - heavyDone <- queue.convert(heavy) - }() - - // Give the conversion worker a head start so that the following tasks are - // enqueued while the first batch is running. - time.Sleep(200 * time.Millisecond) - - tx1 := makeTx(1, 1, 1, 1, key) - tx2 := makeTx(2, 1, 1, 1, key) - - wg.Add(2) - done1 := make(chan error, 1) - done2 := make(chan error, 1) - go func() { defer wg.Done(); done1 <- queue.convert(tx1) }() - go func() { defer wg.Done(); done2 <- queue.convert(tx2) }() - - select { - case err := <-done1: - if err != nil { - t.Fatalf("tx1 conversion error: %v", err) - } - case <-time.After(30 * time.Second): - t.Fatal("timeout waiting for tx1 conversion") - } - - select { - case err := <-done2: - if err != nil { - t.Fatalf("tx2 conversion error: %v", err) - } - case <-time.After(30 * time.Second): - t.Fatal("timeout waiting for tx2 conversion") - } - - select { - case err := <-heavyDone: - if err != nil { - t.Fatalf("heavy conversion error: %v", err) - } - case <-time.After(30 * time.Second): - t.Fatal("timeout waiting for heavy conversion") - } - - wg.Wait() - - if tx1.BlobTxSidecar().Version != types.BlobSidecarVersion1 { - t.Fatalf("tx1 sidecar version mismatch: have %d, want %d", tx1.BlobTxSidecar().Version, types.BlobSidecarVersion1) - } - if tx2.BlobTxSidecar().Version != types.BlobSidecarVersion1 { - t.Fatalf("tx2 sidecar version mismatch: have %d, want %d", tx2.BlobTxSidecar().Version, types.BlobSidecarVersion1) - } -} diff --git a/core/txpool/blobpool/lookup.go b/core/txpool/blobpool/lookup.go index 874ca85b8c..7607cd487a 100644 --- a/core/txpool/blobpool/lookup.go +++ b/core/txpool/blobpool/lookup.go @@ -110,13 +110,3 @@ func (l *lookup) untrack(tx *blobTxMeta) { } } } - -// update updates the transaction index. It should only be used in the conversion. -func (l *lookup) update(hash common.Hash, id uint64, size uint64) bool { - meta, exists := l.txIndex[hash] - if !exists { - return false - } - meta.id, meta.size = id, size - return true -} diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 4b54eac50d..4f985a8bd0 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -130,7 +130,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types return fmt.Errorf("%w: gas %v, minimum needed %v", core.ErrIntrinsicGas, tx.Gas(), intrGas) } // Ensure the transaction can cover floor data gas. - if opts.Config.IsPrague(head.Number, head.Time) { + if rules.IsPrague { floorDataGas, err := core.FloorDataGas(tx.Data()) if err != nil { return err @@ -160,6 +160,15 @@ func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationO if sidecar == nil { return errors.New("missing sidecar in blob transaction") } + // Ensure the sidecar is constructed with the correct version, consistent + // with the current fork. + version := types.BlobSidecarVersion0 + if opts.Config.IsOsaka(head.Number, head.Time) { + version = types.BlobSidecarVersion1 + } + if sidecar.Version != version { + return fmt.Errorf("unexpected sidecar version, want: %d, got: %d", version, sidecar.Version) + } // Ensure the blob fee cap satisfies the minimum blob gas price if tx.BlobGasFeeCapIntCmp(blobTxMinBlobGasPrice) < 0 { return fmt.Errorf("%w: blob fee cap %v, minimum needed %v", ErrTxGasPriceTooLow, tx.BlobGasFeeCap(), blobTxMinBlobGasPrice) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 7a8ba6a07a..0ad87ec496 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -517,7 +517,7 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo if len(hashes) > 128 { return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) } - blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0, false) + blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0) if err != nil { return nil, engine.InvalidParams.With(err) } @@ -578,7 +578,7 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo return nil, nil } - blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1, false) + blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1) if err != nil { return nil, engine.InvalidParams.With(err) } From 5dfcffcf3c505d0380ae7f02b702d03f4dcbc7f8 Mon Sep 17 00:00:00 2001 From: 0xFloki Date: Fri, 19 Dec 2025 02:13:00 +0100 Subject: [PATCH 196/277] tests/fuzzers: remove unused field from kv struct in rangeproof fuzzer (#33447) Removes the unused `t bool` field from the `kv` struct in the rangeproof fuzzer. --- tests/fuzzers/rangeproof/rangeproof-fuzzer.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go index 4d94d31c0c..c60c9cb6e6 100644 --- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go +++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go @@ -32,7 +32,6 @@ import ( type kv struct { k, v []byte - t bool } type fuzzer struct { @@ -62,8 +61,8 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { size := f.readInt() // Fill it with some fluff for i := byte(0); i < byte(size); i++ { - value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} - value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false} + value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}} + value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}} trie.MustUpdate(value.k, value.v) trie.MustUpdate(value2.k, value2.v) vals[string(value.k)] = value @@ -76,7 +75,7 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { for i := 0; i < n; i++ { k := f.randBytes(32) v := f.randBytes(20) - value := &kv{k, v, false} + value := &kv{k, v} trie.MustUpdate(k, v) vals[string(k)] = value if f.exhausted { From dd7daace9ddc5e02bd23aeb5fbced9fda20b1ab5 Mon Sep 17 00:00:00 2001 From: Jeevan Date: Fri, 19 Dec 2025 12:12:51 +0530 Subject: [PATCH 197/277] eth/catalyst: return empty response for GetBlobsV2 before Osaka (#33444) Fix #33420 --- eth/catalyst/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 0ad87ec496..7ab9cd57fd 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -563,7 +563,7 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) { head := api.eth.BlockChain().CurrentHeader() if api.config().LatestFork(head.Time) < forks.Osaka { - return nil, unsupportedForkErr("engine_getBlobsV2 is not available before Osaka fork") + return nil, nil } if len(hashes) > 128 { return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) From bf141fbfb114e18b6203e495ebb0442f632454df Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 19 Dec 2025 16:36:48 +0800 Subject: [PATCH 198/277] core, eth: add lock protection in snap sync (#33428) Fixes #33396, #33397, #33398 --- core/blockchain.go | 56 ++++++++++++++++++++++++++++-------- eth/catalyst/api.go | 22 +++----------- eth/catalyst/metrics.go | 33 +++++++++++++++++++++ eth/downloader/beaconsync.go | 5 ++++ eth/downloader/downloader.go | 35 +++++++++++----------- eth/downloader/skeleton.go | 40 +++++++++++++++++++++++++- eth/downloader/syncmode.go | 10 +++++-- triedb/pathdb/generate.go | 1 + 8 files changed, 149 insertions(+), 53 deletions(-) create mode 100644 eth/catalyst/metrics.go diff --git a/core/blockchain.go b/core/blockchain.go index 9e4562eb44..858eceb630 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -953,7 +953,8 @@ func (bc *BlockChain) rewindPathHead(head *types.Header, root common.Hash) (*typ // Recover if the target state if it's not available yet. if !bc.HasState(head.Root) { if err := bc.triedb.Recover(head.Root); err != nil { - log.Crit("Failed to rollback state", "err", err) + log.Error("Failed to rollback state, resetting to genesis", "err", err) + return bc.genesisBlock.Header(), rootNumber } } log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash()) @@ -1115,14 +1116,48 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha return rootNumber, bc.loadLastState() } -// SnapSyncCommitHead sets the current head block to the one defined by the hash -// irrelevant what the chain contents were prior. -func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { +// SnapSyncStart disables the underlying databases (such as the trie DB and the +// optional state snapshot) to prevent potential concurrent mutations between +// snap sync and other chain operations. +func (bc *BlockChain) SnapSyncStart() error { + if !bc.chainmu.TryLock() { + return errChainStopped + } + defer bc.chainmu.Unlock() + + // Snap sync will directly modify the persistent state, making the entire + // trie database unusable until the state is fully synced. To prevent any + // subsequent state reads, explicitly disable the trie database and state + // syncer is responsible to address and correct any state missing. + if bc.TrieDB().Scheme() == rawdb.PathScheme { + if err := bc.TrieDB().Disable(); err != nil { + return err + } + } + // Snap sync uses the snapshot namespace to store potentially flaky data until + // sync completely heals and finishes. Pause snapshot maintenance in the mean- + // time to prevent access. + if snapshots := bc.Snapshots(); snapshots != nil { // Only nil in tests + snapshots.Disable() + } + return nil +} + +// SnapSyncComplete sets the current head block to the block identified by the +// given hash, regardless of the chain contents prior to snap sync. It is +// invoked once snap sync completes and assumes that SnapSyncStart was called +// previously. +func (bc *BlockChain) SnapSyncComplete(hash common.Hash) error { // Make sure that both the block as well at its state trie exists block := bc.GetBlockByHash(hash) if block == nil { return fmt.Errorf("non existent block [%x..]", hash[:4]) } + if !bc.chainmu.TryLock() { + return errChainStopped + } + defer bc.chainmu.Unlock() + // Reset the trie database with the fresh snap synced state. root := block.Root() if bc.triedb.Scheme() == rawdb.PathScheme { @@ -1133,19 +1168,16 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { if !bc.HasState(root) { return fmt.Errorf("non existent state [%x..]", root[:4]) } - // If all checks out, manually set the head block. - if !bc.chainmu.TryLock() { - return errChainStopped - } - bc.currentBlock.Store(block.Header()) - headBlockGauge.Update(int64(block.NumberU64())) - bc.chainmu.Unlock() - // Destroy any existing state snapshot and regenerate it in the background, // also resuming the normal maintenance of any previously paused snapshot. if bc.snaps != nil { bc.snaps.Rebuild(root) } + + // If all checks out, manually set the head block. + bc.currentBlock.Store(block.Header()) + headBlockGauge.Update(int64(block.NumberU64())) + log.Info("Committed new head block", "number", block.Number(), "hash", hash) return nil } diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 7ab9cd57fd..cc9086b091 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -36,7 +36,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/version" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -81,20 +80,6 @@ const ( beaconUpdateWarnFrequency = 5 * time.Minute ) -var ( - // Number of blobs requested via getBlobsV2 - getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil) - - // Number of blobs requested via getBlobsV2 that are present in the blobpool - getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil) - - // Number of times getBlobsV2 responded with “hit” - getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) - - // Number of times getBlobsV2 responded with “miss” - getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) -) - type ConsensusAPI struct { eth *eth.Ethereum @@ -137,6 +122,9 @@ type ConsensusAPI struct { // NewConsensusAPI creates a new consensus api for the given backend. // The underlying blockchain needs to have a valid terminal total difficulty set. +// +// This function creates a long-lived object with an attached background thread. +// For testing or other short-term use cases, please use newConsensusAPIWithoutHeartbeat. func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI { api := newConsensusAPIWithoutHeartbeat(eth) go api.heartbeat() @@ -818,7 +806,7 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) engine.PayloadSt return engine.PayloadStatusV1{Status: engine.SYNCING} } // Either no beacon sync was started yet, or it rejected the delivered - // payload as non-integratable on top of the existing sync. We'll just + // payload as non-integrate on top of the existing sync. We'll just // have to rely on the beacon client to forcefully update the head with // a forkchoice update request. if api.eth.Downloader().ConfigSyncMode() == ethconfig.FullSync { @@ -916,8 +904,6 @@ func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.Pa // heartbeat loops indefinitely, and checks if there have been beacon client updates // received in the last while. If not - or if they but strange ones - it warns the // user that something might be off with their consensus node. -// -// TODO(karalabe): Spin this goroutine down somehow func (api *ConsensusAPI) heartbeat() { // Sleep a bit on startup since there's obviously no beacon client yet // attached, so no need to print scary warnings to the user. diff --git a/eth/catalyst/metrics.go b/eth/catalyst/metrics.go new file mode 100644 index 0000000000..d0a733a22b --- /dev/null +++ b/eth/catalyst/metrics.go @@ -0,0 +1,33 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package catalyst + +import "github.com/ethereum/go-ethereum/metrics" + +var ( + // Number of blobs requested via getBlobsV2 + getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil) + + // Number of blobs requested via getBlobsV2 that are present in the blobpool + getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil) + + // Number of times getBlobsV2 responded with “hit” + getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) + + // Number of times getBlobsV2 responded with “miss” + getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) +) diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 405643e576..750c224230 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -61,6 +61,7 @@ func (b *beaconBackfiller) suspend() *types.Header { b.lock.Unlock() if !filling { + log.Debug("Backfiller was inactive") return filled // Return the filled header on the previous sync completion } // A previous filling should be running, though it may happen that it hasn't @@ -73,6 +74,7 @@ func (b *beaconBackfiller) suspend() *types.Header { // Now that we're sure the downloader successfully started up, we can cancel // it safely without running the risk of data races. b.downloader.Cancel() + log.Debug("Backfiller has been suspended") // Sync cycle was just terminated, retrieve and return the last filled header. // Can't use `filled` as that contains a stale value from before cancellation. @@ -86,6 +88,7 @@ func (b *beaconBackfiller) resume() { // If a previous filling cycle is still running, just ignore this start // request. // TODO(karalabe): We should make this channel driven b.lock.Unlock() + log.Debug("Backfiller is running") return } b.filling = true @@ -114,7 +117,9 @@ func (b *beaconBackfiller) resume() { if b.success != nil { b.success() } + log.Debug("Backfilling completed") }() + log.Debug("Backfilling started") } // SetBadBlockCallback sets the callback to run when a bad block is hit by the diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 020dd7314b..e16014be95 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -193,8 +193,12 @@ type BlockChain interface { // CurrentSnapBlock retrieves the head snap block from the local chain. CurrentSnapBlock() *types.Header - // SnapSyncCommitHead directly commits the head block to a certain entity. - SnapSyncCommitHead(common.Hash) error + // SnapSyncStart explicitly notifies the chain that snap sync is scheduled and + // marks chain mutations as disallowed. + SnapSyncStart() error + + // SnapSyncComplete directly commits the head block to a certain entity. + SnapSyncComplete(common.Hash) error // InsertHeadersBeforeCutoff inserts a batch of headers before the configured // chain cutoff into the ancient store. @@ -361,28 +365,21 @@ func (d *Downloader) synchronise(beaconPing chan struct{}) (err error) { if d.notified.CompareAndSwap(false, true) { log.Info("Block synchronisation started") } - mode := d.moder.get() + + // Obtain the synchronized used in this cycle + mode := d.moder.get(true) defer func() { if err == nil && mode == ethconfig.SnapSync { d.moder.disableSnap() log.Info("Disabled snap-sync after the initial sync cycle") } }() + + // Disable chain mutations when snap sync is selected, ensuring the + // downloader is the sole mutator. if mode == ethconfig.SnapSync { - // Snap sync will directly modify the persistent state, making the entire - // trie database unusable until the state is fully synced. To prevent any - // subsequent state reads, explicitly disable the trie database and state - // syncer is responsible to address and correct any state missing. - if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme { - if err := d.blockchain.TrieDB().Disable(); err != nil { - return err - } - } - // Snap sync uses the snapshot namespace to store potentially flaky data until - // sync completely heals and finishes. Pause snapshot maintenance in the mean- - // time to prevent access. - if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests - snapshots.Disable() + if err := d.blockchain.SnapSyncStart(); err != nil { + return err } } // Reset the queue, peer set and wake channels to clean any internal leftover state @@ -427,7 +424,7 @@ func (d *Downloader) getMode() SyncMode { // ConfigSyncMode returns the sync mode configured for the node. // The actual running sync mode can differ from this. func (d *Downloader) ConfigSyncMode() SyncMode { - return d.moder.get() + return d.moder.get(false) } // syncToHead starts a block synchronization based on the hash chain from @@ -1086,7 +1083,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []rlp.RawValue{result.Receipts}, d.ancientLimit); err != nil { return err } - if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { + if err := d.blockchain.SnapSyncComplete(block.Hash()); err != nil { return err } d.committed.Store(true) diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 2cf9c4672b..c498ac84ec 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -64,6 +64,12 @@ var errSyncMerged = errors.New("sync merged") // should abort and restart with the new state. var errSyncReorged = errors.New("sync reorged") +// errSyncTrimmed is an internal helper error to signal that the local chain +// has been trimmed (e.g, via debug_setHead explicitly) and the skeleton chain +// is no longer linked with the local chain. In this case, the skeleton sync +// should be re-scheduled again. +var errSyncTrimmed = errors.New("sync trimmed") + // errTerminated is returned if the sync mechanism was terminated for this run of // the process. This is usually the case when Geth is shutting down and some events // might still be propagating. @@ -296,6 +302,11 @@ func (s *skeleton) startup() { // head to force a cleanup. head = newhead + case err == errSyncTrimmed: + // The skeleton chain is not linked with the local chain anymore, + // restart the sync. + head = nil + case err == errTerminated: // Sync was requested to be terminated from within, stop and // return (no need to pass a message, was already done internally) @@ -486,7 +497,22 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { // is still running, it will pick it up. If it already terminated, // a new cycle needs to be spun up. if linked { - s.filler.resume() + linked = len(s.progress.Subchains) == 1 && + rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && + rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && + rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) + + if linked { + // The skeleton chain has been extended and is still linked with the local + // chain, try to re-schedule the backfiller if it's already terminated. + s.filler.resume() + } else { + // The skeleton chain is no longer linked to the local chain for some reason + // (e.g. debug_setHead was used to trim the local chain). Re-schedule the + // skeleton sync to fill the chain gap. + log.Warn("Local chain has been trimmed", "tailnumber", s.scratchHead, "tailhash", s.progress.Subchains[0].Next) + return nil, errSyncTrimmed + } } case req := <-requestFails: @@ -649,9 +675,19 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header) error // Not a noop / double head announce, abort with a reorg return fmt.Errorf("%w, tail: %d, head: %d, newHead: %d", errChainReorged, lastchain.Tail, lastchain.Head, number) } + // Terminate the sync if the chain head is gapped if lastchain.Head+1 < number { return fmt.Errorf("%w, head: %d, newHead: %d", errChainGapped, lastchain.Head, number) } + // Ignore the duplicated beacon header announcement + if lastchain.Head == number { + local := rawdb.ReadSkeletonHeader(s.db, number) + if local != nil && local.Hash() == head.Hash() { + log.Debug("Ignored the identical beacon header", "number", number, "hash", local.Hash()) + return nil + } + } + // Terminate the sync if the chain head is forked if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash { return fmt.Errorf("%w, ancestor: %d, hash: %s, want: %s", errChainForked, number-1, parent.Hash(), head.ParentHash) } @@ -669,6 +705,7 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header) error if err := batch.Write(); err != nil { log.Crit("Failed to write skeleton sync status", "err", err) } + log.Debug("Extended beacon header chain", "number", head.Number, "hash", head.Hash()) return nil } @@ -1206,6 +1243,7 @@ func (s *skeleton) cleanStales(filled *types.Header) error { if err := batch.Write(); err != nil { log.Crit("Failed to write beacon trim data", "err", err) } + log.Debug("Cleaned stale beacon headers", "start", start, "end", end) return nil } diff --git a/eth/downloader/syncmode.go b/eth/downloader/syncmode.go index 7983d39e3a..036119ce3d 100644 --- a/eth/downloader/syncmode.go +++ b/eth/downloader/syncmode.go @@ -75,7 +75,7 @@ func newSyncModer(mode ethconfig.SyncMode, chain BlockChain, disk ethdb.KeyValue // get retrieves the current sync mode, either explicitly set, or derived // from the chain status. -func (m *syncModer) get() ethconfig.SyncMode { +func (m *syncModer) get(report bool) ethconfig.SyncMode { m.lock.Lock() defer m.lock.Unlock() @@ -83,12 +83,16 @@ func (m *syncModer) get() ethconfig.SyncMode { if m.mode == ethconfig.SnapSync { return ethconfig.SnapSync } + logger := log.Debug + if report { + logger = log.Info + } // We are probably in full sync, but we might have rewound to before the // snap sync pivot, check if we should re-enable snap sync. head := m.chain.CurrentBlock() if pivot := rawdb.ReadLastPivotNumber(m.disk); pivot != nil { if head.Number.Uint64() < *pivot { - log.Info("Reenabled snap-sync as chain is lagging behind the pivot", "head", head.Number, "pivot", pivot) + logger("Reenabled snap-sync as chain is lagging behind the pivot", "head", head.Number, "pivot", pivot) return ethconfig.SnapSync } } @@ -96,7 +100,7 @@ func (m *syncModer) get() ethconfig.SyncMode { // the head state, forcefully rerun the snap sync. Note it doesn't mean the // persistent state is corrupted, just mismatch with the head block. if !m.chain.HasState(head.Root) { - log.Info("Reenabled snap-sync as chain is stateless") + logger("Reenabled snap-sync as chain is stateless") return ethconfig.SnapSync } // Nope, we're really full syncing diff --git a/triedb/pathdb/generate.go b/triedb/pathdb/generate.go index 2efbbbb4e1..d3d26fff26 100644 --- a/triedb/pathdb/generate.go +++ b/triedb/pathdb/generate.go @@ -148,6 +148,7 @@ func (g *generator) stop() { g.abort <- ch <-ch g.running = false + log.Debug("Snapshot generation has been terminated") } // completed returns the flag indicating if the whole generation is done. From 2e5cd21edf21175fbbb0e6a95d2a110b97508d20 Mon Sep 17 00:00:00 2001 From: Rizky Ikwan Date: Fri, 19 Dec 2025 12:29:41 +0100 Subject: [PATCH 199/277] graphql: add nil check in block resolver (#33225) Add nil checks for header and block in Block resolver methods to prevent panic when querying non-existent blocks. --- graphql/graphql.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/graphql/graphql.go b/graphql/graphql.go index 55da3185dd..0013abf26f 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -707,6 +707,9 @@ func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) { if err != nil { return nil, err } + if b.header == nil { + return nil, nil + } if b.hash == (common.Hash{}) { b.hash = b.header.Hash() } From 27b3a6087e23a477b74e617641ff2378faf2a970 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Wed, 24 Dec 2025 02:44:17 +0100 Subject: [PATCH 200/277] core/txpool/blobpool: fix slotter size limit (#33474) Blobs are stored per transaction in the pool, so we need billy to handle up to the per-tx limit, not to the per-block limit. The per-block limit was larger than the per-tx limit, so it not a bug, we just created and handled a few billy files for no reason. Signed-off-by: Csaba Kiraly --- core/txpool/blobpool/blobpool.go | 2 +- core/txpool/blobpool/limbo.go | 3 +-- core/txpool/blobpool/slotter.go | 5 ++--- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index e1a4960c8e..28326ae605 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -410,7 +410,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser p.state = state // Create new slotter for pre-Osaka blob configuration. - slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + slotter := newSlotter(params.BlobTxMaxBlobs) // See if we need to migrate the queue blob store after fusaka slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir) diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 50c40c9d83..36284d6a03 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -20,7 +20,6 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -57,7 +56,7 @@ func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) { } // Create new slotter for pre-Osaka blob configuration. - slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config)) + slotter := newSlotter(params.BlobTxMaxBlobs) // See if we need to migrate the limbo after fusaka. slotter, err := tryMigrate(config, slotter, datadir) diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go index 9b793e366c..3399361e55 100644 --- a/core/txpool/blobpool/slotter.go +++ b/core/txpool/blobpool/slotter.go @@ -17,7 +17,6 @@ package blobpool import ( - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/params" "github.com/holiman/billy" ) @@ -42,7 +41,7 @@ func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir st // If the version found is less than the currently configured store version, // perform a migration then write the updated version of the store. if version < storeVersion { - newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + newSlotter := newSlotterEIP7594(params.BlobTxMaxBlobs) if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil { return nil, err } @@ -54,7 +53,7 @@ func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir st store.Close() } // Set the slotter to the format now that the Osaka is active. - slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + slotter = newSlotterEIP7594(params.BlobTxMaxBlobs) } return slotter, nil } From 4531bfebecbb448ac742ee804e5063e6e8d3d1c9 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 29 Dec 2025 16:13:30 +0800 Subject: [PATCH 201/277] eth/downloader: fix stale beacon header deletion (#33481) In this PR, two things have been fixed: --- (a) truncate the stale beacon headers with latest snap block Originally, b.filled is used as the indicator for deleting stale beacon headers. This field is set only after synchronization has been scheduled, under the assumption that the skeleton chain is already linked to the local chain. However, the local chain can be mutated via `debug_setHead`, which may cause `b.filled` outdated. For instance, `b.filled` refers to the last head snap block in the last sync cycle while after `debug_setHead`, the head snap block has been rewounded to 1. As a result, Geth can enter an unintended loop: it repeatedly downloads the missing beacon headers for the skeleton chain and attempts to schedule the actual synchronization, but in the final step, all recently fetched headers are removed by `cleanStales` due to the stale `b.filled` value. This issue is addressed by always using the latest snap block as the indicator, without relying on any cached value. However, note that before the skeleton chain is linked to the local chain, the latest snap block will always be below skeleton.tail, and this condition should not be treated as an error. --- (b) merge the subchains once the skeleton chain links to local chain Once the skeleton chain links with local one, it will try to schedule the synchronization by fetching the missing blocks and import them then. It's possible the last subchain already overwrites the previous subchain and results in having two subchains leftover. As a result, an error log will printed https://github.com/ethereum/go-ethereum/blob/master/eth/downloader/skeleton.go#L1074 --- eth/downloader/beaconsync.go | 15 ++-- eth/downloader/downloader.go | 2 +- eth/downloader/skeleton.go | 135 ++++++++++++++++++++------------ eth/downloader/skeleton_test.go | 13 ++- 4 files changed, 104 insertions(+), 61 deletions(-) diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 750c224230..914e1dfada 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -36,7 +36,6 @@ type beaconBackfiller struct { downloader *Downloader // Downloader to direct via this callback implementation success func() // Callback to run on successful sync cycle completion filling bool // Flag whether the downloader is backfilling or not - filled *types.Header // Last header filled by the last terminated sync loop started chan struct{} // Notification channel whether the downloader inited lock sync.Mutex // Mutex protecting the sync lock } @@ -56,13 +55,15 @@ func (b *beaconBackfiller) suspend() *types.Header { // If no filling is running, don't waste cycles b.lock.Lock() filling := b.filling - filled := b.filled started := b.started b.lock.Unlock() if !filling { + // Sync cycle was inactive, retrieve and return the latest snap block + // as the filled header. log.Debug("Backfiller was inactive") - return filled // Return the filled header on the previous sync completion + + return b.downloader.blockchain.CurrentSnapBlock() } // A previous filling should be running, though it may happen that it hasn't // yet started (being done on a new goroutine). Many concurrent beacon head @@ -77,7 +78,6 @@ func (b *beaconBackfiller) suspend() *types.Header { log.Debug("Backfiller has been suspended") // Sync cycle was just terminated, retrieve and return the last filled header. - // Can't use `filled` as that contains a stale value from before cancellation. return b.downloader.blockchain.CurrentSnapBlock() } @@ -92,7 +92,6 @@ func (b *beaconBackfiller) resume() { return } b.filling = true - b.filled = nil b.started = make(chan struct{}) b.lock.Unlock() @@ -103,7 +102,6 @@ func (b *beaconBackfiller) resume() { defer func() { b.lock.Lock() b.filling = false - b.filled = b.downloader.blockchain.CurrentSnapBlock() b.lock.Unlock() }() // If the downloader fails, report an error as in beacon chain mode there @@ -113,7 +111,7 @@ func (b *beaconBackfiller) resume() { return } // Synchronization succeeded. Since this happens async, notify the outer - // context to disable snap syncing and enable transaction propagation. + // context to enable transaction propagation. if b.success != nil { b.success() } @@ -188,6 +186,8 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { log.Error("Failed to retrieve beacon bounds", "err", err) return 0, err } + log.Debug("Searching beacon ancestor", "local", number, "beaconhead", beaconHead.Number, "beacontail", beaconTail.Number) + var linked bool switch d.getMode() { case ethconfig.FullSync: @@ -241,6 +241,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { } start = check } + log.Debug("Found beacon ancestor", "number", start) return start, nil } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index e16014be95..caeb3d64dd 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -248,7 +248,7 @@ func New(stateDb ethdb.Database, mode ethconfig.SyncMode, mux *event.TypeMux, ch syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(), } // Create the post-merge skeleton syncer and start the process - dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) + dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success), chain) go dl.stateFetcher() return dl diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index c498ac84ec..e693bfc066 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -207,6 +207,7 @@ type backfiller interface { type skeleton struct { db ethdb.Database // Database backing the skeleton filler backfiller // Chain syncer suspended/resumed by head events + chain chainReader // Underlying block chain peers *peerSet // Set of peers we can sync from idles map[string]*peerConnection // Set of idle peers in the current sync cycle @@ -231,12 +232,19 @@ type skeleton struct { syncStarting func() // callback triggered after a sync cycle is inited but before started } +// chainReader wraps the method to retrieve the head of the local chain. +type chainReader interface { + // CurrentSnapBlock retrieves the head snap block from the local chain. + CurrentSnapBlock() *types.Header +} + // newSkeleton creates a new sync skeleton that tracks a potentially dangling // header chain until it's linked into an existing set of blocks. -func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller) *skeleton { +func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller, chain chainReader) *skeleton { sk := &skeleton{ db: db, filler: filler, + chain: chain, peers: peers, drop: drop, requests: make(map[uint64]*headerRequest), @@ -354,6 +362,29 @@ func (s *skeleton) Sync(head *types.Header, final *types.Header, force bool) err } } +// linked returns the flag indicating whether the skeleton has been linked with +// the local chain. +func (s *skeleton) linked(number uint64, hash common.Hash) bool { + linked := rawdb.HasHeader(s.db, hash, number) && + rawdb.HasBody(s.db, hash, number) && + rawdb.HasReceipts(s.db, hash, number) + + // Ensure the skeleton chain links to the local chain below the chain head. + // This accounts for edge cases where leftover chain segments above the head + // may still link to the skeleton chain. In such cases, synchronization is + // likely to fail due to potentially missing segments in the middle. + // + // You can try to produce the edge case by these steps: + // - sync the chain + // - debug.setHead(`0x1`) + // - kill the geth process (the chain segment will be left with chain head rewound) + // - restart + if s.chain.CurrentSnapBlock() != nil { + linked = linked && s.chain.CurrentSnapBlock().Number.Uint64() >= number + } + return linked +} + // sync is the internal version of Sync that executes a single sync cycle, either // until some termination condition is reached, or until the current cycle merges // with a previously aborted run. @@ -378,10 +409,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { // If the sync is already done, resume the backfiller. When the loop stops, // terminate the backfiller too. - linked := len(s.progress.Subchains) == 1 && - rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && - rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && - rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) + linked := len(s.progress.Subchains) == 1 && s.linked(s.scratchHead, s.progress.Subchains[0].Next) if linked { s.filler.resume() } @@ -497,12 +525,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { // is still running, it will pick it up. If it already terminated, // a new cycle needs to be spun up. if linked { - linked = len(s.progress.Subchains) == 1 && - rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && - rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && - rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) - - if linked { + if len(s.progress.Subchains) == 1 && s.linked(s.scratchHead, s.progress.Subchains[0].Next) { // The skeleton chain has been extended and is still linked with the local // chain, try to re-schedule the backfiller if it's already terminated. s.filler.resume() @@ -946,6 +969,45 @@ func (s *skeleton) revertRequest(req *headerRequest) { s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = "" } +// mergeSubchains is invoked once certain beacon headers have been persisted locally +// and the subchains should be merged in case there are some overlaps between. An +// indicator will be returned if the last subchain is merged with previous subchain. +func (s *skeleton) mergeSubchains() bool { + // If the subchain extended into the next subchain, we need to handle + // the overlap. Since there could be many overlaps, do this in a loop. + var merged bool + for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail { + // Extract some stats from the second subchain + head := s.progress.Subchains[1].Head + tail := s.progress.Subchains[1].Tail + next := s.progress.Subchains[1].Next + + // Since we just overwrote part of the next subchain, we need to trim + // its head independent of matching or mismatching content + if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail { + // Fully overwritten, get rid of the subchain as a whole + log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next) + s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) + continue + } else { + // Partially overwritten, trim the head to the overwritten size + log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next) + s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1 + } + // If the old subchain is an extension of the new one, merge the two + // and let the skeleton syncer restart (to clean internal state) + if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next { + log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next) + s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail + s.progress.Subchains[0].Next = s.progress.Subchains[1].Next + + s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) + merged = true + } + } + return merged +} + func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged bool) { res.peer.log.Trace("Processing header response", "head", res.headers[0].Number, "hash", res.headers[0].Hash(), "count", len(res.headers)) @@ -1019,10 +1081,9 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo // processing is done, so it's just one more "needless" check. // // The weird cascading checks are done to minimize the database reads. - linked = rawdb.HasHeader(s.db, header.ParentHash, header.Number.Uint64()-1) && - rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) && - rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1) + linked = s.linked(header.Number.Uint64()-1, header.ParentHash) if linked { + log.Debug("Primary subchain linked", "number", header.Number.Uint64()-1, "hash", header.ParentHash) break } } @@ -1036,6 +1097,9 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo // If the beacon chain was linked to the local chain, completely swap out // all internal progress and abort header synchronization. if linked { + // Merge all overlapped subchains beforehand + s.mergeSubchains() + // Linking into the local chain should also mean that there are no // leftover subchains, but in the case of importing the blocks via // the engine API, we will not push the subchains forward. This will @@ -1093,41 +1157,10 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo s.scratchHead -= uint64(consumed) - // If the subchain extended into the next subchain, we need to handle - // the overlap. Since there could be many overlaps (come on), do this - // in a loop. - for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail { - // Extract some stats from the second subchain - head := s.progress.Subchains[1].Head - tail := s.progress.Subchains[1].Tail - next := s.progress.Subchains[1].Next - - // Since we just overwrote part of the next subchain, we need to trim - // its head independent of matching or mismatching content - if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail { - // Fully overwritten, get rid of the subchain as a whole - log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next) - s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) - continue - } else { - // Partially overwritten, trim the head to the overwritten size - log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next) - s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1 - } - // If the old subchain is an extension of the new one, merge the two - // and let the skeleton syncer restart (to clean internal state) - if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next { - log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next) - s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail - s.progress.Subchains[0].Next = s.progress.Subchains[1].Next - - s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) - merged = true - } - } // If subchains were merged, all further available headers in the scratch // space are invalid since we skipped ahead. Stop processing the scratch // space to avoid dropping peers thinking they delivered invalid data. + merged = s.mergeSubchains() if merged { break } @@ -1158,15 +1191,17 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo // due to the downloader backfilling past the tracked tail. func (s *skeleton) cleanStales(filled *types.Header) error { number := filled.Number.Uint64() - log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash()) + log.Debug("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash()) - // If the filled header is below the linked subchain, something's corrupted - // internally. Report and error and refuse to do anything. + // If the filled header is below the subchain, it means the skeleton is not + // linked with local chain yet, don't bother to do cleanup. if number+1 < s.progress.Subchains[0].Tail { - return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail) + log.Debug("filled header below beacon header tail", "filled", number, "tail", s.progress.Subchains[0].Tail) + return nil } // If nothing in subchain is filled, don't bother to do cleanup. if number+1 == s.progress.Subchains[0].Tail { + log.Debug("Skeleton chain not yet consumed", "filled", number, "hash", filled.Hash(), "tail", s.progress.Subchains[0].Tail) return nil } // If the latest fill was on a different subchain, it means the backfiller diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 4aa97cf1f7..5c54b4b5c2 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "math/big" "sync/atomic" "testing" @@ -71,6 +72,12 @@ func (hf *hookedBackfiller) resume() { } } +type fakeChainReader struct{} + +func (fc *fakeChainReader) CurrentSnapBlock() *types.Header { + return &types.Header{Number: big.NewInt(math.MaxInt64)} +} + // skeletonTestPeer is a mock peer that can only serve header requests from a // pre-perated header chain (which may be arbitrarily wrong for testing). // @@ -369,7 +376,7 @@ func TestSkeletonSyncInit(t *testing.T) { // Create a skeleton sync and run a cycle wait := make(chan struct{}) - skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) + skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller(), &fakeChainReader{}) skeleton.syncStarting = func() { close(wait) } skeleton.Sync(tt.head, nil, true) @@ -472,7 +479,7 @@ func TestSkeletonSyncExtend(t *testing.T) { // Create a skeleton sync and run a cycle wait := make(chan struct{}) - skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) + skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller(), &fakeChainReader{}) skeleton.syncStarting = func() { close(wait) } skeleton.Sync(tt.head, nil, true) @@ -885,7 +892,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { } } // Create a skeleton sync and run a cycle - skeleton := newSkeleton(db, peerset, drop, filler) + skeleton := newSkeleton(db, peerset, drop, filler, &fakeChainReader{}) skeleton.Sync(tt.head, nil, true) // Wait a bit (bleah) for the initial sync loop to go to idle. This might From b9702ed27b30b5c0388b9da5cdc816792b52515b Mon Sep 17 00:00:00 2001 From: oooLowNeoNooo Date: Mon, 29 Dec 2025 09:23:51 +0100 Subject: [PATCH 202/277] console/prompt: use PromptInput in PromptConfirm method (#33445) --- console/prompt/prompter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/console/prompt/prompter.go b/console/prompt/prompter.go index 2a20b6906a..5a0a89e76a 100644 --- a/console/prompt/prompter.go +++ b/console/prompt/prompter.go @@ -142,7 +142,7 @@ func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err err // PromptConfirm displays the given prompt to the user and requests a boolean // choice to be made, returning that choice. func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) { - input, err := p.Prompt(prompt + " [y/n] ") + input, err := p.PromptInput(prompt + " [y/n] ") if len(input) > 0 && strings.EqualFold(input[:1], "y") { return true, nil } From 57f84866bc6e5a638eea604298c3813b88de1e29 Mon Sep 17 00:00:00 2001 From: Archkon <180910180+Archkon@users.noreply.github.com> Date: Mon, 29 Dec 2025 20:57:29 +0800 Subject: [PATCH 203/277] params: fix wrong comment (#33503) It seems that the comment for CopyGas was wrongly associated to SloadGas. --- params/protocol_params.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/params/protocol_params.go b/params/protocol_params.go index e8b044f450..bb506af015 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -32,7 +32,7 @@ const ( MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction. - SloadGas uint64 = 50 // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added. + SloadGas uint64 = 50 // CallValueTransferGas uint64 = 9000 // Paid for CALL when the value transfer is non-zero. CallNewAccountGas uint64 = 25000 // Paid for CALL when the destination address didn't exist prior. TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions. @@ -82,7 +82,7 @@ const ( CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. ExpGas uint64 = 10 // Once per EXP instruction LogGas uint64 = 375 // Per LOG* operation. - CopyGas uint64 = 3 // + CopyGas uint64 = 3 // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added. StackLimit uint64 = 1024 // Maximum size of VM stack allowed. TierStepGas uint64 = 0 // Once per operation, for a selection of them. LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. From 3f641dba872dd43c8232b9384b4c09f0b9e3bd96 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 30 Dec 2025 13:44:04 +0100 Subject: [PATCH 204/277] trie, go.mod: remove all references to go-verkle and go-ipa (#33461) In order to reduce the amount of code that is embedded into the keeper binary, I am removing all the verkle code that uses go-verkle and go-ipa. This will be followed by further PRs that are more like stubs to replace code when the keeper build is detected. I'm keeping the binary tree of course. This means that you will still see `isVerkle` variables all over the codebase, but they will be renamed when code is touched (i.e. this is not an invitation for 30+ AI slop PRs). --------- Co-authored-by: Gary Rong --- beacon/engine/gen_ed.go | 74 +++-- beacon/engine/types.go | 73 +++-- cmd/geth/main.go | 2 - cmd/geth/verkle.go | 214 --------------- cmd/keeper/go.mod | 2 - cmd/keeper/go.sum | 4 - consensus/beacon/consensus.go | 41 +-- core/state/access_events.go | 55 ++-- core/state/access_events_test.go | 7 +- core/state/database.go | 18 +- core/state/database_history.go | 8 - core/state/reader.go | 3 +- core/state/state_object.go | 5 +- core/state/statedb.go | 8 +- core/state/statedb_hooked.go | 5 - core/types/block.go | 28 -- core/vm/evm.go | 2 +- core/vm/interface.go | 4 - go.mod | 2 - go.sum | 4 - trie/bintrie/key_encoding.go | 48 +++- trie/bintrie/trie.go | 5 +- trie/utils/verkle.go | 413 ---------------------------- trie/utils/verkle_test.go | 130 --------- trie/verkle.go | 458 ------------------------------- trie/verkle_test.go | 173 ------------ 26 files changed, 154 insertions(+), 1632 deletions(-) delete mode 100644 cmd/geth/verkle.go delete mode 100644 trie/utils/verkle.go delete mode 100644 trie/utils/verkle_test.go delete mode 100644 trie/verkle.go delete mode 100644 trie/verkle_test.go diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go index 0ae5a3b8f1..6893d64a16 100644 --- a/beacon/engine/gen_ed.go +++ b/beacon/engine/gen_ed.go @@ -17,24 +17,23 @@ var _ = (*executableDataMarshaling)(nil) // MarshalJSON marshals as JSON. func (e ExecutableData) MarshalJSON() ([]byte, error) { type ExecutableData struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random common.Hash `json:"prevRandao" gencodec:"required"` - Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` - ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"prevRandao" gencodec:"required"` + Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` } var enc ExecutableData enc.ParentHash = e.ParentHash @@ -59,31 +58,29 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) { enc.Withdrawals = e.Withdrawals enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) - enc.ExecutionWitness = e.ExecutionWitness return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (e *ExecutableData) UnmarshalJSON(input []byte) error { type ExecutableData struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random *common.Hash `json:"prevRandao" gencodec:"required"` - Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash *common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` - ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"` + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random *common.Hash `json:"prevRandao" gencodec:"required"` + Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash *common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` } var dec ExecutableData if err := json.Unmarshal(input, &dec); err != nil { @@ -157,8 +154,5 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error { if dec.ExcessBlobGas != nil { e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) } - if dec.ExecutionWitness != nil { - e.ExecutionWitness = dec.ExecutionWitness - } return nil } diff --git a/beacon/engine/types.go b/beacon/engine/types.go index ddb276ab09..da9b6568f2 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -73,24 +73,23 @@ type payloadAttributesMarshaling struct { // ExecutableData is the data necessary to execute an EL payload. type ExecutableData struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom []byte `json:"logsBloom" gencodec:"required"` - Random common.Hash `json:"prevRandao" gencodec:"required"` - Number uint64 `json:"blockNumber" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` - Timestamp uint64 `json:"timestamp" gencodec:"required"` - ExtraData []byte `json:"extraData" gencodec:"required"` - BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions [][]byte `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *uint64 `json:"blobGasUsed"` - ExcessBlobGas *uint64 `json:"excessBlobGas"` - ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom []byte `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"prevRandao" gencodec:"required"` + Number uint64 `json:"blockNumber" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + Timestamp uint64 `json:"timestamp" gencodec:"required"` + ExtraData []byte `json:"extraData" gencodec:"required"` + BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions [][]byte `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *uint64 `json:"blobGasUsed"` + ExcessBlobGas *uint64 `json:"excessBlobGas"` } // JSON type overrides for executableData. @@ -316,8 +315,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H RequestsHash: requestsHash, } return types.NewBlockWithHeader(header). - WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}). - WithWitness(data.ExecutionWitness), + WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}), nil } @@ -325,24 +323,23 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H // fields from the given block. It assumes the given block is post-merge block. func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope { data := &ExecutableData{ - BlockHash: block.Hash(), - ParentHash: block.ParentHash(), - FeeRecipient: block.Coinbase(), - StateRoot: block.Root(), - Number: block.NumberU64(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - BaseFeePerGas: block.BaseFee(), - Timestamp: block.Time(), - ReceiptsRoot: block.ReceiptHash(), - LogsBloom: block.Bloom().Bytes(), - Transactions: encodeTransactions(block.Transactions()), - Random: block.MixDigest(), - ExtraData: block.Extra(), - Withdrawals: block.Withdrawals(), - BlobGasUsed: block.BlobGasUsed(), - ExcessBlobGas: block.ExcessBlobGas(), - ExecutionWitness: block.ExecutionWitness(), + BlockHash: block.Hash(), + ParentHash: block.ParentHash(), + FeeRecipient: block.Coinbase(), + StateRoot: block.Root(), + Number: block.NumberU64(), + GasLimit: block.GasLimit(), + GasUsed: block.GasUsed(), + BaseFeePerGas: block.BaseFee(), + Timestamp: block.Time(), + ReceiptsRoot: block.ReceiptHash(), + LogsBloom: block.Bloom().Bytes(), + Transactions: encodeTransactions(block.Transactions()), + Random: block.MixDigest(), + ExtraData: block.Extra(), + Withdrawals: block.Withdrawals(), + BlobGasUsed: block.BlobGasUsed(), + ExcessBlobGas: block.ExcessBlobGas(), } // Add blobs. diff --git a/cmd/geth/main.go b/cmd/geth/main.go index b294ee593e..96f9f58dde 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -251,8 +251,6 @@ func init() { utils.ShowDeprecated, // See snapshot.go snapshotCommand, - // See verkle.go - verkleCommand, } if logTestCommand != nil { app.Commands = append(app.Commands, logTestCommand) diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go deleted file mode 100644 index c064d70aba..0000000000 --- a/cmd/geth/verkle.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "os" - "slices" - - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-verkle" - "github.com/urfave/cli/v2" -) - -var ( - zero [32]byte - - verkleCommand = &cli.Command{ - Name: "verkle", - Usage: "A set of experimental verkle tree management commands", - Description: "", - Subcommands: []*cli.Command{ - { - Name: "verify", - Usage: "verify the conversion of a MPT into a verkle tree", - ArgsUsage: "", - Action: verifyVerkle, - Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags), - Description: ` -geth verkle verify -This command takes a root commitment and attempts to rebuild the tree. - `, - }, - { - Name: "dump", - Usage: "Dump a verkle tree to a DOT file", - ArgsUsage: " [ ...]", - Action: expandVerkle, - Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags), - Description: ` -geth verkle dump [ ...] -This command will produce a dot file representing the tree, rooted at . -in which key1, key2, ... are expanded. - `, - }, - }, - } -) - -// recurse into each child to ensure they can be loaded from the db. The tree isn't rebuilt -// (only its nodes are loaded) so there is no need to flush them, the garbage collector should -// take care of that for us. -func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error { - switch node := root.(type) { - case *verkle.InternalNode: - for i, child := range node.Children() { - childC := child.Commit().Bytes() - - if bytes.Equal(childC[:], zero[:]) { - continue - } - childS, err := resolver(childC[:]) - if err != nil { - return fmt.Errorf("could not find child %x in db: %w", childC, err) - } - // depth is set to 0, the tree isn't rebuilt so it's not a problem - childN, err := verkle.ParseNode(childS, 0) - if err != nil { - return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err) - } - if err := checkChildren(childN, resolver); err != nil { - return fmt.Errorf("%x%w", i, err) // write the path to the erroring node - } - } - case *verkle.LeafNode: - // sanity check: ensure at least one value is non-zero - - for i := 0; i < verkle.NodeWidth; i++ { - if len(node.Value(i)) != 0 { - return nil - } - } - return errors.New("both balance and nonce are 0") - case verkle.Empty: - // nothing to do - default: - return fmt.Errorf("unsupported type encountered %v", root) - } - - return nil -} - -func verifyVerkle(ctx *cli.Context) error { - stack, _ := makeConfigNode(ctx) - defer stack.Close() - - chaindb := utils.MakeChainDatabase(ctx, stack, true) - defer chaindb.Close() - headBlock := rawdb.ReadHeadBlock(chaindb) - if headBlock == nil { - log.Error("Failed to load head block") - return errors.New("no head block") - } - if ctx.NArg() > 1 { - log.Error("Too many arguments given") - return errors.New("too many arguments") - } - var ( - rootC common.Hash - err error - ) - if ctx.NArg() == 1 { - rootC, err = parseRoot(ctx.Args().First()) - if err != nil { - log.Error("Failed to resolve state root", "error", err) - return err - } - log.Info("Rebuilding the tree", "root", rootC) - } else { - rootC = headBlock.Root() - log.Info("Rebuilding the tree", "root", rootC, "number", headBlock.NumberU64()) - } - - serializedRoot, err := chaindb.Get(rootC[:]) - if err != nil { - return err - } - root, err := verkle.ParseNode(serializedRoot, 0) - if err != nil { - return err - } - - if err := checkChildren(root, chaindb.Get); err != nil { - log.Error("Could not rebuild the tree from the database", "err", err) - return err - } - - log.Info("Tree was rebuilt from the database") - return nil -} - -func expandVerkle(ctx *cli.Context) error { - stack, _ := makeConfigNode(ctx) - defer stack.Close() - - chaindb := utils.MakeChainDatabase(ctx, stack, true) - defer chaindb.Close() - var ( - rootC common.Hash - keylist [][]byte - err error - ) - if ctx.NArg() >= 2 { - rootC, err = parseRoot(ctx.Args().First()) - if err != nil { - log.Error("Failed to resolve state root", "error", err) - return err - } - keylist = make([][]byte, 0, ctx.Args().Len()-1) - args := ctx.Args().Slice() - for i := range args[1:] { - key, err := hex.DecodeString(args[i+1]) - log.Info("decoded key", "arg", args[i+1], "key", key) - if err != nil { - return fmt.Errorf("error decoding key #%d: %w", i+1, err) - } - keylist = append(keylist, key) - } - log.Info("Rebuilding the tree", "root", rootC) - } else { - return fmt.Errorf("usage: %s root key1 [key 2...]", ctx.App.Name) - } - - serializedRoot, err := chaindb.Get(rootC[:]) - if err != nil { - return err - } - root, err := verkle.ParseNode(serializedRoot, 0) - if err != nil { - return err - } - - for i, key := range keylist { - log.Info("Reading key", "index", i, "key", key) - root.Get(key, chaindb.Get) - } - - if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0600); err != nil { - log.Error("Failed to dump file", "err", err) - } else { - log.Info("Tree was dumped to file", "file", "dump.dot") - } - return nil -} diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 8402382a9b..a42be042aa 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -14,13 +14,11 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect - github.com/ethereum/go-verkle v0.2.2 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/gofrs/flock v0.12.1 // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 4f4c0dbba0..133a3b10b1 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -30,8 +30,6 @@ github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDd github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= -github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= -github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= @@ -46,8 +44,6 @@ github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3 github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= -github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= -github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index dbba73947f..eed27407a5 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -365,46 +365,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(true) // Assemble the final block. - block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) - - // Create the block witness and attach to block. - // This step needs to happen as late as possible to catch all access events. - if chain.Config().IsVerkle(header.Number, header.Time) { - keys := state.AccessEvents().Keys() - - // Open the pre-tree to prove the pre-state against - parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1) - if parent == nil { - return nil, fmt.Errorf("nil parent header for block %d", header.Number) - } - preTrie, err := state.Database().OpenTrie(parent.Root) - if err != nil { - return nil, fmt.Errorf("error opening pre-state tree root: %w", err) - } - postTrie := state.GetTrie() - if postTrie == nil { - return nil, errors.New("post-state tree is not available") - } - vktPreTrie, okpre := preTrie.(*trie.VerkleTrie) - vktPostTrie, okpost := postTrie.(*trie.VerkleTrie) - - // The witness is only attached iff both parent and current block are - // using verkle tree. - if okpre && okpost { - if len(keys) > 0 { - verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys) - if err != nil { - return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err) - } - block = block.WithWitness(&types.ExecutionWitness{ - StateDiff: stateDiff, - VerkleProof: verkleProof, - }) - } - } - } - - return block, nil + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/core/state/access_events.go b/core/state/access_events.go index 0575c9898a..86f44bd623 100644 --- a/core/state/access_events.go +++ b/core/state/access_events.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" + "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/holiman/uint256" ) @@ -45,15 +45,12 @@ var zeroTreeIndex uint256.Int type AccessEvents struct { branches map[branchAccessKey]mode chunks map[chunkAccessKey]mode - - pointCache *utils.PointCache } -func NewAccessEvents(pointCache *utils.PointCache) *AccessEvents { +func NewAccessEvents() *AccessEvents { return &AccessEvents{ - branches: make(map[branchAccessKey]mode), - chunks: make(map[chunkAccessKey]mode), - pointCache: pointCache, + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), } } @@ -75,8 +72,11 @@ func (ae *AccessEvents) Keys() [][]byte { // TODO: consider if parallelizing this is worth it, probably depending on len(ae.chunks). keys := make([][]byte, 0, len(ae.chunks)) for chunk := range ae.chunks { - basePoint := ae.pointCache.Get(chunk.addr[:]) - key := utils.GetTreeKeyWithEvaluatedAddress(basePoint, &chunk.treeIndex, chunk.leafKey) + var offset [32]byte + treeIndexBytes := chunk.treeIndex.Bytes32() + copy(offset[:31], treeIndexBytes[1:]) + offset[31] = chunk.leafKey + key := bintrie.GetBinaryTreeKey(chunk.addr, offset[:]) keys = append(keys, key) } return keys @@ -84,9 +84,8 @@ func (ae *AccessEvents) Keys() [][]byte { func (ae *AccessEvents) Copy() *AccessEvents { cpy := &AccessEvents{ - branches: maps.Clone(ae.branches), - chunks: maps.Clone(ae.chunks), - pointCache: ae.pointCache, + branches: maps.Clone(ae.branches), + chunks: maps.Clone(ae.chunks), } return cpy } @@ -95,12 +94,12 @@ func (ae *AccessEvents) Copy() *AccessEvents { // member fields of an account. func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableGas uint64) uint64 { var gas uint64 // accumulate the consumed gas - consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas) + consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas) if consumed < expected { return expected } gas += consumed - consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas-consumed) + consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas-consumed) if consumed < expected { return expected + gas } @@ -112,7 +111,7 @@ func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableG // cold member fields of an account, that need to be touched when making a message // call to that account. func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas uint64) uint64 { - _, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas) + _, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas) if expected == 0 { expected = params.WarmStorageReadCostEIP2929 } @@ -122,11 +121,11 @@ func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas // ValueTransferGas returns the gas to be charged for each of the currently // cold balance member fields of the caller and the callee accounts. func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address, availableGas uint64) uint64 { - _, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas) + _, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas) if expected1 > availableGas { return expected1 } - _, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas-expected1) + _, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas-expected1) if expected1+expected2 == 0 { return params.WarmStorageReadCostEIP2929 } @@ -138,8 +137,8 @@ func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address, // address collision is done before the transfer, and so no write // are guaranteed to happen at this point. func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, availableGas uint64) uint64 { - consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas) - _, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, false, availableGas-consumed) + consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas) + _, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, availableGas-consumed) return expected1 + expected2 } @@ -147,9 +146,9 @@ func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, available // a contract creation. func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas uint64) (uint64, uint64) { var gas uint64 - consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas) + consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas) gas += consumed - consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, true, availableGas-consumed) + consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, true, availableGas-consumed) gas += consumed return gas, expected1 + expected2 } @@ -157,20 +156,20 @@ func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas // AddTxOrigin adds the member fields of the sender account to the access event list, // so that cold accesses are not charged, since they are covered by the 21000 gas. func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) { - ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, gomath.MaxUint64) - ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeHashLeafKey, false, gomath.MaxUint64) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, gomath.MaxUint64) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, gomath.MaxUint64) } // AddTxDestination adds the member fields of the sender account to the access event list, // so that cold accesses are not charged, since they are covered by the 21000 gas. func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue, doesntExist bool) { - ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, sendsValue, gomath.MaxUint64) - ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, doesntExist, gomath.MaxUint64) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, sendsValue, gomath.MaxUint64) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, doesntExist, gomath.MaxUint64) } // SlotGas returns the amount of gas to be charged for a cold storage access. func (ae *AccessEvents) SlotGas(addr common.Address, slot common.Hash, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 { - treeIndex, subIndex := utils.StorageIndex(slot.Bytes()) + treeIndex, subIndex := bintrie.StorageIndex(slot.Bytes()) _, expected := ae.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite, availableGas) if expected == 0 && chargeWarmCosts { expected = params.WarmStorageReadCostEIP2929 @@ -313,7 +312,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC, // Note that an access in write mode implies an access in read mode, whereas an // access in read mode does not imply an access in write mode. func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 { - _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas) + _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas) if expected == 0 && chargeWarmCosts { if availableGas < params.WarmStorageReadCostEIP2929 { return availableGas @@ -329,7 +328,7 @@ func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availabl // Note that an access in write mode implies an access in read mode, whereas an access in // read mode does not imply an access in write mode. func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 { - _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas) + _, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas) if expected == 0 && chargeWarmCosts { if availableGas < params.WarmStorageReadCostEIP2929 { return availableGas diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go index e80859a0b4..0b39130e8d 100644 --- a/core/state/access_events_test.go +++ b/core/state/access_events_test.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" ) var ( @@ -38,7 +37,7 @@ func init() { } func TestAccountHeaderGas(t *testing.T) { - ae := NewAccessEvents(utils.NewPointCache(1024)) + ae := NewAccessEvents() // Check cold read cost gas := ae.BasicDataGas(testAddr, false, math.MaxUint64, false) @@ -93,7 +92,7 @@ func TestAccountHeaderGas(t *testing.T) { // TestContractCreateInitGas checks that the gas cost of contract creation is correctly // calculated. func TestContractCreateInitGas(t *testing.T) { - ae := NewAccessEvents(utils.NewPointCache(1024)) + ae := NewAccessEvents() var testAddr [20]byte for i := byte(0); i < 20; i++ { @@ -116,7 +115,7 @@ func TestContractCreateInitGas(t *testing.T) { // TestMessageCallGas checks that the gas cost of message calls is correctly // calculated. func TestMessageCallGas(t *testing.T) { - ae := NewAccessEvents(utils.NewPointCache(1024)) + ae := NewAccessEvents() // Check cold read cost, without a value gas := ae.MessageCallGas(testAddr, math.MaxUint64) diff --git a/core/state/database.go b/core/state/database.go index ae177d964f..1e8fc9d5c9 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" ) @@ -41,9 +40,6 @@ const ( // Cache size granted for caching clean code. codeCacheSize = 256 * 1024 * 1024 - - // Number of address->curve point associations to keep. - pointCacheSize = 4096 ) // Database wraps access to tries and contract code. @@ -57,9 +53,6 @@ type Database interface { // OpenStorageTrie opens the storage trie of an account. OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error) - // PointCache returns the cache holding points used in verkle tree key computation - PointCache() *utils.PointCache - // TrieDB returns the underlying trie database for managing trie nodes. TrieDB() *triedb.Database @@ -161,7 +154,6 @@ type CachingDB struct { snap *snapshot.Tree codeCache *lru.SizeConstrainedCache[common.Hash, []byte] codeSizeCache *lru.Cache[common.Hash, int] - pointCache *utils.PointCache // Transition-specific fields TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState] @@ -175,7 +167,6 @@ func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB { snap: snap, codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - pointCache: utils.NewPointCache(pointCacheSize), TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000), } } @@ -211,7 +202,7 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { } // Configure the trie reader, which is expected to be available as the // gatekeeper unless the state is corrupted. - tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache) + tr, err := newTrieReader(stateRoot, db.triedb) if err != nil { return nil, err } @@ -289,11 +280,6 @@ func (db *CachingDB) TrieDB() *triedb.Database { return db.triedb } -// PointCache returns the cache of evaluated curve points. -func (db *CachingDB) PointCache() *utils.PointCache { - return db.pointCache -} - // Snapshot returns the underlying state snapshot. func (db *CachingDB) Snapshot() *snapshot.Tree { return db.snap @@ -304,8 +290,6 @@ func mustCopyTrie(t Trie) Trie { switch t := t.(type) { case *trie.StateTrie: return t.Copy() - case *trie.VerkleTrie: - return t.Copy() case *transitiontrie.TransitionTrie: return t.Copy() default: diff --git a/core/state/database_history.go b/core/state/database_history.go index 314c56c470..f9c4a69f2f 100644 --- a/core/state/database_history.go +++ b/core/state/database_history.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/pathdb" ) @@ -105,7 +104,6 @@ type HistoricDB struct { triedb *triedb.Database codeCache *lru.SizeConstrainedCache[common.Hash, []byte] codeSizeCache *lru.Cache[common.Hash, int] - pointCache *utils.PointCache } // NewHistoricDatabase creates a historic state database. @@ -115,7 +113,6 @@ func NewHistoricDatabase(disk ethdb.KeyValueStore, triedb *triedb.Database) *His triedb: triedb, codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - pointCache: utils.NewPointCache(pointCacheSize), } } @@ -139,11 +136,6 @@ func (db *HistoricDB) OpenStorageTrie(stateRoot common.Hash, address common.Addr return nil, errors.New("not implemented") } -// PointCache returns the cache holding points used in verkle tree key computation -func (db *HistoricDB) PointCache() *utils.PointCache { - return db.pointCache -} - // TrieDB returns the underlying trie database for managing trie nodes. func (db *HistoricDB) TrieDB() *triedb.Database { return db.triedb diff --git a/core/state/reader.go b/core/state/reader.go index c912ca28da..38228f8453 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -33,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/ethereum/go-ethereum/trie/transitiontrie" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/database" ) @@ -267,7 +266,7 @@ type trieReader struct { // newTrieReader constructs a trie reader of the specific state. An error will be // returned if the associated trie specified by root is not existent. -func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCache) (*trieReader, error) { +func newTrieReader(root common.Hash, db *triedb.Database) (*trieReader, error) { var ( tr Trie err error diff --git a/core/state/state_object.go b/core/state/state_object.go index 91623a838b..411d5fb5b5 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/bintrie" "github.com/ethereum/go-ethereum/trie/transitiontrie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" @@ -498,8 +499,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { } switch s.trie.(type) { - case *trie.VerkleTrie: - // Verkle uses only one tree, and the copy has already been + case *bintrie.BinaryTrie: + // UBT uses only one tree, and the copy has already been // made in mustCopyTrie. obj.trie = db.trie case *transitiontrie.TransitionTrie: diff --git a/core/state/statedb.go b/core/state/statedb.go index 7c6b8bbdfc..c239d66233 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -38,7 +38,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" "golang.org/x/sync/errgroup" ) @@ -188,7 +187,7 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro transientStorage: newTransientStorage(), } if db.TrieDB().IsVerkle() { - sdb.accessEvents = NewAccessEvents(db.PointCache()) + sdb.accessEvents = NewAccessEvents() } return sdb, nil } @@ -1495,11 +1494,6 @@ func (s *StateDB) markUpdate(addr common.Address) { s.mutations[addr].typ = update } -// PointCache returns the point cache used by verkle tree. -func (s *StateDB) PointCache() *utils.PointCache { - return s.db.PointCache() -} - // Witness retrieves the current state witness being collected. func (s *StateDB) Witness() *stateless.Witness { return s.witness diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 50acc03aa8..33a2016784 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -133,10 +132,6 @@ func (s *hookedStateDB) AddSlotToAccessList(addr common.Address, slot common.Has s.inner.AddSlotToAccessList(addr, slot) } -func (s *hookedStateDB) PointCache() *utils.PointCache { - return s.inner.PointCache() -} - func (s *hookedStateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) { s.inner.Prepare(rules, sender, coinbase, dest, precompiles, txAccesses) } diff --git a/core/types/block.go b/core/types/block.go index b5b6468a13..c52c05a4c7 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-verkle" ) // A BlockNonce is a 64-bit hash which proves (combined with the @@ -61,13 +60,6 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) } -// ExecutionWitness represents the witness + proof used in a verkle context, -// to provide the ability to execute a block statelessly. -type ExecutionWitness struct { - StateDiff verkle.StateDiff `json:"stateDiff"` - VerkleProof *verkle.VerkleProof `json:"verkleProof"` -} - //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go @@ -209,11 +201,6 @@ type Block struct { transactions Transactions withdrawals Withdrawals - // witness is not an encoded part of the block body. - // It is held in Block in order for easy relaying to the places - // that process it. - witness *ExecutionWitness - // caches hash atomic.Pointer[common.Hash] size atomic.Uint64 @@ -429,9 +416,6 @@ func (b *Block) BlobGasUsed() *uint64 { return blobGasUsed } -// ExecutionWitness returns the verkle execution witneess + proof for a block -func (b *Block) ExecutionWitness() *ExecutionWitness { return b.witness } - // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. func (b *Block) Size() uint64 { @@ -494,7 +478,6 @@ func (b *Block) WithSeal(header *Header) *Block { transactions: b.transactions, uncles: b.uncles, withdrawals: b.withdrawals, - witness: b.witness, } } @@ -506,7 +489,6 @@ func (b *Block) WithBody(body Body) *Block { transactions: slices.Clone(body.Transactions), uncles: make([]*Header, len(body.Uncles)), withdrawals: slices.Clone(body.Withdrawals), - witness: b.witness, } for i := range body.Uncles { block.uncles[i] = CopyHeader(body.Uncles[i]) @@ -514,16 +496,6 @@ func (b *Block) WithBody(body Body) *Block { return block } -func (b *Block) WithWitness(witness *ExecutionWitness) *Block { - return &Block{ - header: b.header, - transactions: b.transactions, - uncles: b.uncles, - withdrawals: b.withdrawals, - witness: witness, - } -} - // Hash returns the keccak256 hash of b's header. // The hash is computed on the first call and cached thereafter. func (b *Block) Hash() common.Hash { diff --git a/core/vm/evm.go b/core/vm/evm.go index 8975c791c8..25a3318c02 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -214,7 +214,7 @@ func (evm *EVM) SetJumpDestCache(jumpDests JumpDestCache) { // This is not threadsafe and should only be done very cautiously. func (evm *EVM) SetTxContext(txCtx TxContext) { if evm.chainRules.IsEIP4762 { - txCtx.AccessEvents = state.NewAccessEvents(evm.StateDB.PointCache()) + txCtx.AccessEvents = state.NewAccessEvents() } evm.TxContext = txCtx } diff --git a/core/vm/interface.go b/core/vm/interface.go index d7f4c10e1f..e2f6a65189 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -84,9 +83,6 @@ type StateDB interface { // even if the feature/fork is not active yet AddSlotToAccessList(addr common.Address, slot common.Hash) - // PointCache returns the point cache used in computations - PointCache() *utils.PointCache - Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) RevertToSnapshot(int) diff --git a/go.mod b/go.mod index aff1d53923..66f3a3ffa5 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/cockroachdb/pebble v1.1.5 github.com/consensys/gnark-crypto v0.18.1 github.com/crate-crypto/go-eth-kzg v1.4.0 - github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a github.com/davecgh/go-spew v1.1.1 github.com/dchest/siphash v1.2.3 github.com/deckarep/golang-set/v2 v2.6.0 @@ -24,7 +23,6 @@ require ( github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 github.com/ethereum/c-kzg-4844/v2 v2.1.5 github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab - github.com/ethereum/go-verkle v0.2.2 github.com/fatih/color v1.16.0 github.com/ferranbt/fastssz v0.1.4 github.com/fsnotify/fsnotify v1.6.0 diff --git a/go.sum b/go.sum index 503e0975d6..ad066abc03 100644 --- a/go.sum +++ b/go.sum @@ -81,8 +81,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= -github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= -github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -117,8 +115,6 @@ github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3 github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= -github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= -github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go index cda797521a..5a93fcde9a 100644 --- a/trie/bintrie/key_encoding.go +++ b/trie/bintrie/key_encoding.go @@ -33,8 +33,17 @@ const ( ) var ( - zeroHash = common.Hash{} - codeOffset = uint256.NewInt(128) + zeroInt = uint256.NewInt(0) + zeroHash = common.Hash{} + verkleNodeWidthLog2 = 8 + headerStorageOffset = uint256.NewInt(64) + codeOffset = uint256.NewInt(128) + codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset) + mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2)) + CodeOffset = uint256.NewInt(128) + VerkleNodeWidth = uint256.NewInt(256) + HeaderStorageOffset = uint256.NewInt(64) + VerkleNodeWidthLog2 = 8 ) func GetBinaryTreeKey(addr common.Address, key []byte) []byte { @@ -83,3 +92,38 @@ func GetBinaryTreeKeyCodeChunk(address common.Address, chunknr *uint256.Int) []b chunkOffset := new(uint256.Int).Add(codeOffset, chunknr).Bytes() return GetBinaryTreeKey(address, chunkOffset) } + +func StorageIndex(storageKey []byte) (*uint256.Int, byte) { + // If the storage slot is in the header, we need to add the header offset. + var key uint256.Int + key.SetBytes(storageKey) + if key.Cmp(codeStorageDelta) < 0 { + // This addition is always safe; it can't ever overflow since pos. - -package utils - -import ( - "encoding/binary" - "sync" - - "github.com/crate-crypto/go-ipa/bandersnatch/fr" - "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" -) - -const ( - BasicDataLeafKey = 0 - CodeHashLeafKey = 1 - - BasicDataVersionOffset = 0 - BasicDataCodeSizeOffset = 5 - BasicDataNonceOffset = 8 - BasicDataBalanceOffset = 16 -) - -var ( - zero = uint256.NewInt(0) - verkleNodeWidthLog2 = 8 - headerStorageOffset = uint256.NewInt(64) - codeOffset = uint256.NewInt(128) - verkleNodeWidth = uint256.NewInt(256) - codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset) - mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2)) - CodeOffset = uint256.NewInt(128) - VerkleNodeWidth = uint256.NewInt(256) - HeaderStorageOffset = uint256.NewInt(64) - VerkleNodeWidthLog2 = 8 - - index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64] - - // cacheHitGauge is the metric to track how many cache hit occurred. - cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil) - - // cacheMissGauge is the metric to track how many cache miss occurred. - cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil) -) - -func init() { - // The byte array is the Marshalled output of the point computed as such: - // - // var ( - // config = verkle.GetConfig() - // fr verkle.Fr - // ) - // verkle.FromLEBytes(&fr, []byte{2, 64}) - // point := config.CommitToPoly([]verkle.Fr{fr}, 1) - index0Point = new(verkle.Point) - err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191}) - if err != nil { - panic(err) - } -} - -// PointCache is the LRU cache for storing evaluated address commitment. -type PointCache struct { - lru lru.BasicLRU[string, *verkle.Point] - lock sync.RWMutex -} - -// NewPointCache returns the cache with specified size. -func NewPointCache(maxItems int) *PointCache { - return &PointCache{ - lru: lru.NewBasicLRU[string, *verkle.Point](maxItems), - } -} - -// Get returns the cached commitment for the specified address, or computing -// it on the flight. -func (c *PointCache) Get(addr []byte) *verkle.Point { - c.lock.Lock() - defer c.lock.Unlock() - - p, ok := c.lru.Get(string(addr)) - if ok { - cacheHitGauge.Inc(1) - return p - } - cacheMissGauge.Inc(1) - p = evaluateAddressPoint(addr) - c.lru.Add(string(addr), p) - return p -} - -// GetStem returns the first 31 bytes of the tree key as the tree stem. It only -// works for the account metadata whose treeIndex is 0. -func (c *PointCache) GetStem(addr []byte) []byte { - p := c.Get(addr) - return pointToHash(p, 0)[:31] -} - -// GetTreeKey performs both the work of the spec's get_tree_key function, and that -// of pedersen_hash: it builds the polynomial in pedersen_hash without having to -// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte -// array. Since at most the first 5 coefficients of the polynomial will be non-zero, -// these 5 coefficients are created directly. -func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte { - if len(address) < 32 { - var aligned [32]byte - address = append(aligned[:32-len(address)], address...) - } - // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high] - var poly [5]fr.Element - - // 32-byte address, interpreted as two little endian - // 16-byte numbers. - verkle.FromLEBytes(&poly[1], address[:16]) - verkle.FromLEBytes(&poly[2], address[16:]) - - // treeIndex must be interpreted as a 32-byte aligned little-endian integer. - // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00. - // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes). - // - // To avoid unnecessary endianness conversions for go-ipa, we do some trick: - // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of - // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})). - // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of - // the 32-byte aligned big-endian representation (BE({00,00,...}). - trieIndexBytes := treeIndex.Bytes32() - verkle.FromBytes(&poly[3], trieIndexBytes[16:]) - verkle.FromBytes(&poly[4], trieIndexBytes[:16]) - - cfg := verkle.GetConfig() - ret := cfg.CommitToPoly(poly[:], 0) - - // add a constant point corresponding to poly[0]=[2+256*64]. - ret.Add(ret, index0Point) - - return pointToHash(ret, subIndex) -} - -// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only -// difference is a part of polynomial is already evaluated. -// -// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already -// evaluated. -func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { - var poly [5]fr.Element - - // little-endian, 32-byte aligned treeIndex - var index [32]byte - for i := 0; i < len(treeIndex); i++ { - binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i]) - } - verkle.FromLEBytes(&poly[3], index[:16]) - verkle.FromLEBytes(&poly[4], index[16:]) - - cfg := verkle.GetConfig() - ret := cfg.CommitToPoly(poly[:], 0) - - // add the pre-evaluated address - ret.Add(ret, evaluated) - - return pointToHash(ret, subIndex) -} - -// BasicDataKey returns the verkle tree key of the basic data field for -// the specified account. -func BasicDataKey(address []byte) []byte { - return GetTreeKey(address, zero, BasicDataLeafKey) -} - -// CodeHashKey returns the verkle tree key of the code hash field for -// the specified account. -func CodeHashKey(address []byte) []byte { - return GetTreeKey(address, zero, CodeHashLeafKey) -} - -func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) { - var ( - chunkOffset = new(uint256.Int).Add(codeOffset, chunk) - treeIndex, subIndexMod = new(uint256.Int).DivMod(chunkOffset, verkleNodeWidth, new(uint256.Int)) - ) - return treeIndex, byte(subIndexMod.Uint64()) -} - -// CodeChunkKey returns the verkle tree key of the code chunk for the -// specified account. -func CodeChunkKey(address []byte, chunk *uint256.Int) []byte { - treeIndex, subIndex := codeChunkIndex(chunk) - return GetTreeKey(address, treeIndex, subIndex) -} - -func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) { - chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) - treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) - subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) - var subIndex byte - if len(subIndexMod) != 0 { - subIndex = byte(subIndexMod[0]) - } - return treeIndex, subIndex -} - -func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { - treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk) - return GetTreeKey(address, treeIndex, subIndex) -} - -func StorageIndex(storageKey []byte) (*uint256.Int, byte) { - // If the storage slot is in the header, we need to add the header offset. - var key uint256.Int - key.SetBytes(storageKey) - if key.Cmp(codeStorageDelta) < 0 { - // This addition is always safe; it can't ever overflow since pos. - -package utils - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" -) - -func TestTreeKey(t *testing.T) { - var ( - address = []byte{0x01} - addressEval = evaluateAddressPoint(address) - smallIndex = uint256.NewInt(1) - largeIndex = uint256.NewInt(10000) - smallStorage = []byte{0x1} - largeStorage = bytes.Repeat([]byte{0xff}, 16) - ) - if !bytes.Equal(BasicDataKey(address), BasicDataKeyWithEvaluatedAddress(addressEval)) { - t.Fatal("Unmatched basic data key") - } - if !bytes.Equal(CodeHashKey(address), CodeHashKeyWithEvaluatedAddress(addressEval)) { - t.Fatal("Unmatched code hash key") - } - if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) { - t.Fatal("Unmatched code chunk key") - } - if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) { - t.Fatal("Unmatched code chunk key") - } - if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) { - t.Fatal("Unmatched storage slot key") - } - if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) { - t.Fatal("Unmatched storage slot key") - } -} - -// goos: darwin -// goarch: amd64 -// pkg: github.com/ethereum/go-ethereum/trie/utils -// cpu: VirtualApple @ 2.50GHz -// BenchmarkTreeKey -// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op -func BenchmarkTreeKey(b *testing.B) { - // Initialize the IPA settings which can be pretty expensive. - verkle.GetConfig() - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - BasicDataKey([]byte{0x01}) - } -} - -// goos: darwin -// goarch: amd64 -// pkg: github.com/ethereum/go-ethereum/trie/utils -// cpu: VirtualApple @ 2.50GHz -// BenchmarkTreeKeyWithEvaluation -// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op -func BenchmarkTreeKeyWithEvaluation(b *testing.B) { - // Initialize the IPA settings which can be pretty expensive. - verkle.GetConfig() - - addr := []byte{0x01} - eval := evaluateAddressPoint(addr) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - BasicDataKeyWithEvaluatedAddress(eval) - } -} - -// goos: darwin -// goarch: amd64 -// pkg: github.com/ethereum/go-ethereum/trie/utils -// cpu: VirtualApple @ 2.50GHz -// BenchmarkStorageKey -// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op -func BenchmarkStorageKey(b *testing.B) { - // Initialize the IPA settings which can be pretty expensive. - verkle.GetConfig() - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32)) - } -} - -// goos: darwin -// goarch: amd64 -// pkg: github.com/ethereum/go-ethereum/trie/utils -// cpu: VirtualApple @ 2.50GHz -// BenchmarkStorageKeyWithEvaluation -// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op -func BenchmarkStorageKeyWithEvaluation(b *testing.B) { - // Initialize the IPA settings which can be pretty expensive. - verkle.GetConfig() - - addr := []byte{0x01} - eval := evaluateAddressPoint(addr) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32)) - } -} diff --git a/trie/verkle.go b/trie/verkle.go deleted file mode 100644 index 70793330c5..0000000000 --- a/trie/verkle.go +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/ethereum/go-ethereum/triedb/database" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" -) - -var ( - errInvalidRootType = errors.New("invalid node type for root") -) - -// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie -// interface so that Verkle trees can be reused verbatim. -type VerkleTrie struct { - root verkle.VerkleNode - cache *utils.PointCache - reader *Reader - tracer *PrevalueTracer -} - -// NewVerkleTrie constructs a verkle tree based on the specified root hash. -func NewVerkleTrie(root common.Hash, db database.NodeDatabase, cache *utils.PointCache) (*VerkleTrie, error) { - reader, err := NewReader(root, common.Hash{}, db) - if err != nil { - return nil, err - } - t := &VerkleTrie{ - root: verkle.New(), - cache: cache, - reader: reader, - tracer: NewPrevalueTracer(), - } - // Parse the root verkle node if it's not empty. - if root != types.EmptyVerkleHash && root != types.EmptyRootHash { - blob, err := t.nodeResolver(nil) - if err != nil { - return nil, err - } - node, err := verkle.ParseNode(blob, 0) - if err != nil { - return nil, err - } - t.root = node - } - return t, nil -} - -// GetKey returns the sha3 preimage of a hashed key that was previously used -// to store a value. -func (t *VerkleTrie) GetKey(key []byte) []byte { - return key -} - -// GetAccount implements state.Trie, retrieving the account with the specified -// account address. If the specified account is not in the verkle tree, nil will -// be returned. If the tree is corrupted, an error will be returned. -func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) { - var ( - acc = &types.StateAccount{} - values [][]byte - err error - ) - switch n := t.root.(type) { - case *verkle.InternalNode: - values, err = n.GetValuesAtStem(t.cache.GetStem(addr[:]), t.nodeResolver) - if err != nil { - return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err) - } - default: - return nil, errInvalidRootType - } - if values == nil { - return nil, nil - } - basicData := values[utils.BasicDataLeafKey] - acc.Nonce = binary.BigEndian.Uint64(basicData[utils.BasicDataNonceOffset:]) - acc.Balance = new(uint256.Int).SetBytes(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16]) - acc.CodeHash = values[utils.CodeHashLeafKey] - - // TODO account.Root is leave as empty. How should we handle the legacy account? - return acc, nil -} - -// PrefetchAccount attempts to resolve specific accounts from the database -// to accelerate subsequent trie operations. -func (t *VerkleTrie) PrefetchAccount(addresses []common.Address) error { - for _, addr := range addresses { - if _, err := t.GetAccount(addr); err != nil { - return err - } - } - return nil -} - -// GetStorage implements state.Trie, retrieving the storage slot with the specified -// account address and storage key. If the specified slot is not in the verkle tree, -// nil will be returned. If the tree is corrupted, an error will be returned. -func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { - k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key) - val, err := t.root.Get(k, t.nodeResolver) - if err != nil { - return nil, err - } - return common.TrimLeftZeroes(val), nil -} - -// PrefetchStorage attempts to resolve specific storage slots from the database -// to accelerate subsequent trie operations. -func (t *VerkleTrie) PrefetchStorage(addr common.Address, keys [][]byte) error { - for _, key := range keys { - if _, err := t.GetStorage(addr, key); err != nil { - return err - } - } - return nil -} - -// UpdateAccount implements state.Trie, writing the provided account into the tree. -// If the tree is corrupted, an error will be returned. -func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error { - var ( - err error - basicData [32]byte - values = make([][]byte, verkle.NodeWidth) - stem = t.cache.GetStem(addr[:]) - ) - - // Code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present - // before the code size to support bigger integers in the future. PutUint32(...) requires - // 4 bytes, so we need to shift the offset 1 byte to the left. - binary.BigEndian.PutUint32(basicData[utils.BasicDataCodeSizeOffset-1:], uint32(codeLen)) - binary.BigEndian.PutUint64(basicData[utils.BasicDataNonceOffset:], acc.Nonce) - if acc.Balance.ByteLen() > 16 { - panic("balance too large") - } - acc.Balance.WriteToSlice(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16]) - values[utils.BasicDataLeafKey] = basicData[:] - values[utils.CodeHashLeafKey] = acc.CodeHash[:] - - switch root := t.root.(type) { - case *verkle.InternalNode: - err = root.InsertValuesAtStem(stem, values, t.nodeResolver) - default: - return errInvalidRootType - } - if err != nil { - return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err) - } - - return nil -} - -// UpdateStorage implements state.Trie, writing the provided storage slot into -// the tree. If the tree is corrupted, an error will be returned. -func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error { - // Left padding the slot value to 32 bytes. - var v [32]byte - if len(value) >= 32 { - copy(v[:], value[:32]) - } else { - copy(v[32-len(value):], value[:]) - } - k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(address.Bytes()), key) - return t.root.Insert(k, v[:], t.nodeResolver) -} - -// DeleteAccount leaves the account untouched, as no account deletion can happen -// in verkle. -// There is a special corner case, in which an account that is prefunded, CREATE2-d -// and then SELFDESTRUCT-d should see its funds drained. EIP161 says that account -// should be removed, but this is verboten by the verkle spec. This contains a -// workaround in which the method checks for this corner case, and if so, overwrites -// the balance with 0. This will be removed once the spec has been clarified. -func (t *VerkleTrie) DeleteAccount(addr common.Address) error { - k := utils.BasicDataKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes())) - values, err := t.root.(*verkle.InternalNode).GetValuesAtStem(k, t.nodeResolver) - if err != nil { - return fmt.Errorf("Error getting data at %x in delete: %w", k, err) - } - var prefunded bool - for i, v := range values { - switch i { - case 0: - prefunded = len(v) == 32 - case 1: - prefunded = len(v) == 32 && bytes.Equal(v, types.EmptyCodeHash[:]) - default: - prefunded = v == nil - } - if !prefunded { - break - } - } - if prefunded { - t.root.Insert(k, common.Hash{}.Bytes(), t.nodeResolver) - } - return nil -} - -// RollBackAccount removes the account info + code from the tree, unlike DeleteAccount -// that will overwrite it with 0s. The first 64 storage slots are also removed. -func (t *VerkleTrie) RollBackAccount(addr common.Address) error { - var ( - evaluatedAddr = t.cache.Get(addr.Bytes()) - basicDataKey = utils.BasicDataKeyWithEvaluatedAddress(evaluatedAddr) - ) - basicDataBytes, err := t.root.Get(basicDataKey, t.nodeResolver) - if err != nil { - return fmt.Errorf("rollback: error finding code size: %w", err) - } - if len(basicDataBytes) == 0 { - return errors.New("rollback: basic data is not existent") - } - // The code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present - // before the code size to support bigger integers in the future. - // LittleEndian.Uint32(...) expects 4-bytes, so we need to shift the offset 1-byte to the left. - codeSize := binary.BigEndian.Uint32(basicDataBytes[utils.BasicDataCodeSizeOffset-1:]) - - // Delete the account header + first 64 slots + first 128 code chunks - _, err = t.root.(*verkle.InternalNode).DeleteAtStem(basicDataKey[:31], t.nodeResolver) - if err != nil { - return fmt.Errorf("error rolling back account header: %w", err) - } - - // Delete all further code - for i, chunknr := uint64(31*128), uint64(128); i < uint64(codeSize); i, chunknr = i+31*256, chunknr+256 { - // evaluate group key at the start of a new group - offset := uint256.NewInt(chunknr) - key := utils.CodeChunkKeyWithEvaluatedAddress(evaluatedAddr, offset) - - if _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver); err != nil { - return fmt.Errorf("error deleting code chunk stem (addr=%x, offset=%d) error: %w", addr[:], offset, err) - } - } - return nil -} - -// DeleteStorage implements state.Trie, deleting the specified storage slot from -// the trie. If the storage slot was not existent in the trie, no error will be -// returned. If the trie is corrupted, an error will be returned. -func (t *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error { - var zero [32]byte - k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key) - return t.root.Insert(k, zero[:], t.nodeResolver) -} - -// Hash returns the root hash of the tree. It does not write to the database and -// can be used even if the tree doesn't have one. -func (t *VerkleTrie) Hash() common.Hash { - return t.root.Commit().Bytes() -} - -// Commit writes all nodes to the tree's memory database. -func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) { - root := t.root.(*verkle.InternalNode) - nodes, err := root.BatchSerialize() - if err != nil { - // Error return from this function indicates error in the code logic - // of BatchSerialize, and we fail catastrophically if this is the case. - panic(fmt.Errorf("BatchSerialize failed: %v", err)) - } - nodeset := trienode.NewNodeSet(common.Hash{}) - for _, node := range nodes { - // Hash parameter is not used in pathdb - nodeset.AddNode(node.Path, trienode.NewNodeWithPrev(common.Hash{}, node.SerializedBytes, t.tracer.Get(node.Path))) - } - // Serialize root commitment form - return t.Hash(), nodeset -} - -// NodeIterator implements state.Trie, returning an iterator that returns -// nodes of the trie. Iteration starts at the key after the given start key. -// -// TODO(gballet, rjl493456442) implement it. -func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) { - // TODO(@CPerezz): remove. - return nil, errors.New("not implemented") -} - -// Prove implements state.Trie, constructing a Merkle proof for key. The result -// contains all encoded nodes on the path to the value at key. The value itself -// is also included in the last node and can be retrieved by verifying the proof. -// -// If the trie does not contain a value for key, the returned proof contains all -// nodes of the longest existing prefix of the key (at least the root), ending -// with the node that proves the absence of the key. -// -// TODO(gballet, rjl493456442) implement it. -func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { - panic("not implemented") -} - -// Copy returns a deep-copied verkle tree. -func (t *VerkleTrie) Copy() *VerkleTrie { - return &VerkleTrie{ - root: t.root.Copy(), - cache: t.cache, - reader: t.reader, - tracer: t.tracer.Copy(), - } -} - -// IsVerkle indicates if the trie is a Verkle trie. -func (t *VerkleTrie) IsVerkle() bool { - return true -} - -// Proof builds and returns the verkle multiproof for keys, built against -// the pre tree. The post tree is passed in order to add the post values -// to that proof. -func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { - var postroot verkle.VerkleNode - if posttrie != nil { - postroot = posttrie.root - } - proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.nodeResolver) - if err != nil { - return nil, nil, err - } - p, kvps, err := verkle.SerializeProof(proof) - if err != nil { - return nil, nil, err - } - return p, kvps, nil -} - -// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which -// are actual code, and 1 byte is the pushdata offset). -type ChunkedCode []byte - -// Copy the values here so as to avoid an import cycle -const ( - PUSH1 = byte(0x60) - PUSH32 = byte(0x7f) -) - -// ChunkifyCode generates the chunked version of an array representing EVM bytecode -func ChunkifyCode(code []byte) ChunkedCode { - var ( - chunkOffset = 0 // offset in the chunk - chunkCount = len(code) / 31 - codeOffset = 0 // offset in the code - ) - if len(code)%31 != 0 { - chunkCount++ - } - chunks := make([]byte, chunkCount*32) - for i := 0; i < chunkCount; i++ { - // number of bytes to copy, 31 unless the end of the code has been reached. - end := 31 * (i + 1) - if len(code) < end { - end = len(code) - } - copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself - - // chunk offset = taken from the last chunk. - if chunkOffset > 31 { - // skip offset calculation if push data covers the whole chunk - chunks[i*32] = 31 - chunkOffset = 1 - continue - } - chunks[32*i] = byte(chunkOffset) - chunkOffset = 0 - - // Check each instruction and update the offset it should be 0 unless - // a PUSH-N overflows. - for ; codeOffset < end; codeOffset++ { - if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { - codeOffset += int(code[codeOffset] - PUSH1 + 1) - if codeOffset+1 >= 31*(i+1) { - codeOffset++ - chunkOffset = codeOffset - 31*(i+1) - break - } - } - } - } - return chunks -} - -// UpdateContractCode implements state.Trie, writing the provided contract code -// into the trie. -// Note that the code-size *must* be already saved by a previous UpdateAccount call. -func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { - var ( - chunks = ChunkifyCode(code) - values [][]byte - key []byte - err error - ) - for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { - groupOffset := (chunknr + 128) % 256 - if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { - values = make([][]byte, verkle.NodeWidth) - key = utils.CodeChunkKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), uint256.NewInt(chunknr)) - } - values[groupOffset] = chunks[i : i+32] - - if groupOffset == 255 || len(chunks)-i <= 32 { - switch root := t.root.(type) { - case *verkle.InternalNode: - err = root.InsertValuesAtStem(key[:31], values, t.nodeResolver) - if err != nil { - return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err) - } - default: - return errInvalidRootType - } - } - } - return nil -} - -func (t *VerkleTrie) ToDot() string { - return verkle.ToDot(t.root) -} - -func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) { - blob, err := t.reader.Node(path, common.Hash{}) - if err != nil { - return nil, err - } - t.tracer.Put(path, blob) - return blob, nil -} - -// Witness returns a set containing all trie nodes that have been accessed. -func (t *VerkleTrie) Witness() map[string][]byte { - panic("not implemented") -} diff --git a/trie/verkle_test.go b/trie/verkle_test.go deleted file mode 100644 index 1832e3db13..0000000000 --- a/trie/verkle_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/holiman/uint256" -) - -var ( - accounts = map[common.Address]*types.StateAccount{ - {1}: { - Nonce: 100, - Balance: uint256.NewInt(100), - CodeHash: common.Hash{0x1}.Bytes(), - }, - {2}: { - Nonce: 200, - Balance: uint256.NewInt(200), - CodeHash: common.Hash{0x2}.Bytes(), - }, - } - storages = map[common.Address]map[common.Hash][]byte{ - {1}: { - common.Hash{10}: []byte{10}, - common.Hash{11}: []byte{11}, - common.MaxHash: []byte{0xff}, - }, - {2}: { - common.Hash{20}: []byte{20}, - common.Hash{21}: []byte{21}, - common.MaxHash: []byte{0xff}, - }, - } -) - -func TestVerkleTreeReadWrite(t *testing.T) { - db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) - tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100)) - - for addr, acct := range accounts { - if err := tr.UpdateAccount(addr, acct, 0); err != nil { - t.Fatalf("Failed to update account, %v", err) - } - for key, val := range storages[addr] { - if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil { - t.Fatalf("Failed to update storage, %v", err) - } - } - } - - for addr, acct := range accounts { - stored, err := tr.GetAccount(addr) - if err != nil { - t.Fatalf("Failed to get account, %v", err) - } - if !reflect.DeepEqual(stored, acct) { - t.Fatal("account is not matched") - } - for key, val := range storages[addr] { - stored, err := tr.GetStorage(addr, key.Bytes()) - if err != nil { - t.Fatalf("Failed to get storage, %v", err) - } - if !bytes.Equal(stored, val) { - t.Fatal("storage is not matched") - } - } - } -} - -func TestVerkleRollBack(t *testing.T) { - db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) - tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100)) - - for addr, acct := range accounts { - // create more than 128 chunks of code - code := make([]byte, 129*32) - for i := 0; i < len(code); i += 2 { - code[i] = 0x60 - code[i+1] = byte(i % 256) - } - if err := tr.UpdateAccount(addr, acct, len(code)); err != nil { - t.Fatalf("Failed to update account, %v", err) - } - for key, val := range storages[addr] { - if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil { - t.Fatalf("Failed to update storage, %v", err) - } - } - hash := crypto.Keccak256Hash(code) - if err := tr.UpdateContractCode(addr, hash, code); err != nil { - t.Fatalf("Failed to update contract, %v", err) - } - } - - // Check that things were created - for addr, acct := range accounts { - stored, err := tr.GetAccount(addr) - if err != nil { - t.Fatalf("Failed to get account, %v", err) - } - if !reflect.DeepEqual(stored, acct) { - t.Fatal("account is not matched") - } - for key, val := range storages[addr] { - stored, err := tr.GetStorage(addr, key.Bytes()) - if err != nil { - t.Fatalf("Failed to get storage, %v", err) - } - if !bytes.Equal(stored, val) { - t.Fatal("storage is not matched") - } - } - } - - // ensure there is some code in the 2nd group of the 1st account - keyOf2ndGroup := utils.CodeChunkKeyWithEvaluatedAddress(tr.cache.Get(common.Address{1}.Bytes()), uint256.NewInt(128)) - chunk, err := tr.root.Get(keyOf2ndGroup, nil) - if err != nil { - t.Fatalf("Failed to get account, %v", err) - } - if len(chunk) == 0 { - t.Fatal("account was not created ") - } - - // Rollback first account and check that it is gone - addr1 := common.Address{1} - err = tr.RollBackAccount(addr1) - if err != nil { - t.Fatalf("error rolling back address 1: %v", err) - } - - // ensure the account is gone - stored, err := tr.GetAccount(addr1) - if err != nil { - t.Fatalf("Failed to get account, %v", err) - } - if stored != nil { - t.Fatal("account was not deleted") - } - - // ensure that the last code chunk is also gone from the tree - chunk, err = tr.root.Get(keyOf2ndGroup, nil) - if err != nil { - t.Fatalf("Failed to get account, %v", err) - } - if len(chunk) != 0 { - t.Fatal("account was not deleted") - } -} From b84097d22eeeb90ac1b02901e5777ce7face6d5f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 30 Dec 2025 14:43:45 +0100 Subject: [PATCH 205/277] .github/workflows: preventively close PRs that seem AI-generated (#33414) This is a new step in my crusade against the braindead fad of starting PR titles with a word that is completely redundant with github labels, thus wasting prime first-line real-estate for something that isn't necessary. I noticed that every single one of these PRs are low-quality AI-slop, so I think there is a strong case to be made for these PRs to be auto-closed. A message is added before closing the PR, redirecting to our contribution guidelines, so I expect quality first-time contributors to read them and reopen the PR. In the case of spam PRs, the author is unlikely to revisit a given PR, and so auto-closing might have a positive impact. That's an experiment worth trying, imo. --- .github/workflows/validate_pr.yml | 39 +++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/.github/workflows/validate_pr.yml b/.github/workflows/validate_pr.yml index 57e8c12b5e..aa3c74cc67 100644 --- a/.github/workflows/validate_pr.yml +++ b/.github/workflows/validate_pr.yml @@ -8,6 +8,45 @@ jobs: validate-pr: runs-on: ubuntu-latest steps: + - name: Check for Spam PR + uses: actions/github-script@v7 + with: + script: | + const prTitle = context.payload.pull_request.title; + const spamRegex = /^(feat|chore|fix)(\(.*\))?\s*:/i; + + if (spamRegex.test(prTitle)) { + // Leave a comment explaining why + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + body: `## PR Closed as Spam + + This PR was automatically closed because the title format \`feat:\`, \`fix:\`, or \`chore:\` is commonly associated with spam contributions. + + If this is a legitimate contribution, please: + 1. Review our contribution guidelines + 2. Use the correct PR title format: \`directory, ...: description\` + 3. Open a new PR with the proper title format + + Thank you for your understanding.` + }); + + // Close the PR + await github.rest.pulls.update({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.payload.pull_request.number, + state: 'closed' + }); + + core.setFailed('PR closed as spam due to suspicious title format'); + return; + } + + console.log('✅ PR passed spam check'); + - name: Checkout repository uses: actions/checkout@v4 From b3e7d9ee445a7eb2121e7af3918fe974b10b977c Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 30 Dec 2025 23:05:13 +0800 Subject: [PATCH 206/277] triedb/pathdb: optimize history indexing efficiency (#33303) This pull request optimizes history indexing by splitting a single large database batch into multiple smaller chunks. Originally, the indexer will resolve a batch of state histories and commit all corresponding index entries atomically together with the indexing marker. While indexing more state histories in a single batch improves efficiency, excessively large batches can cause significant memory issues. To mitigate this, the pull request splits the mega-batch into several smaller batches and flushes them independently during indexing. However, this introduces a potential inconsistency that some index entries may be flushed while the indexing marker is not, and an unclean shutdown may leave the database in a partially updated state. This can corrupt index data. To address this, head truncation is introduced. After a restart, any excessive index entries beyond the expected indexing marker are removed, ensuring the index remains consistent after an unclean shutdown. --- triedb/pathdb/history_index.go | 51 +++++-- triedb/pathdb/history_index_block.go | 42 +++++- triedb/pathdb/history_index_block_test.go | 98 ++++++++++-- triedb/pathdb/history_index_iterator_test.go | 4 +- triedb/pathdb/history_index_test.go | 149 +++++++++++++++++-- triedb/pathdb/history_indexer.go | 71 ++++----- 6 files changed, 333 insertions(+), 82 deletions(-) diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index 87b6e377af..cc5cd204b4 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -163,12 +163,15 @@ type indexWriter struct { db ethdb.KeyValueReader } -// newIndexWriter constructs the index writer for the specified state. -func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, error) { +// newIndexWriter constructs the index writer for the specified state. Additionally, +// it takes an integer as the limit and prunes all existing elements above that ID. +// It's essential as the recovery mechanism after unclean shutdown during the history +// indexing. +func newIndexWriter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexWriter, error) { blob := readStateIndex(state, db) if len(blob) == 0 { desc := newIndexBlockDesc(0) - bw, _ := newBlockWriter(nil, desc) + bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */) return &indexWriter{ descList: []*indexBlockDesc{desc}, bw: bw, @@ -180,15 +183,27 @@ func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, er if err != nil { return nil, err } + // Trim trailing blocks whose elements all exceed the limit. + for i := len(descList) - 1; i > 0 && descList[i].max > limit; i-- { + // The previous block has the elements that exceed the limit, + // therefore the current block can be entirely dropped. + if descList[i-1].max >= limit { + descList = descList[:i] + } + } + // Take the last block for appending new elements lastDesc := descList[len(descList)-1] indexBlock := readStateIndexBlock(state, db, lastDesc.id) - bw, err := newBlockWriter(indexBlock, lastDesc) + + // Construct the writer for the last block. All elements in this block + // that exceed the limit will be truncated. + bw, err := newBlockWriter(indexBlock, lastDesc, limit) if err != nil { return nil, err } return &indexWriter{ descList: descList, - lastID: lastDesc.max, + lastID: bw.last(), bw: bw, state: state, db: db, @@ -221,7 +236,7 @@ func (w *indexWriter) rotate() error { desc = newIndexBlockDesc(w.bw.desc.id + 1) ) w.frozen = append(w.frozen, w.bw) - w.bw, err = newBlockWriter(nil, desc) + w.bw, err = newBlockWriter(nil, desc, 0 /* useless if the block is empty */) if err != nil { return err } @@ -271,13 +286,13 @@ type indexDeleter struct { } // newIndexDeleter constructs the index deleter for the specified state. -func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter, error) { +func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexDeleter, error) { blob := readStateIndex(state, db) if len(blob) == 0 { // TODO(rjl493456442) we can probably return an error here, // deleter with no data is meaningless. desc := newIndexBlockDesc(0) - bw, _ := newBlockWriter(nil, desc) + bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */) return &indexDeleter{ descList: []*indexBlockDesc{desc}, bw: bw, @@ -289,22 +304,34 @@ func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter, if err != nil { return nil, err } + // Trim trailing blocks whose elements all exceed the limit. + for i := len(descList) - 1; i > 0 && descList[i].max > limit; i-- { + // The previous block has the elements that exceed the limit, + // therefore the current block can be entirely dropped. + if descList[i-1].max >= limit { + descList = descList[:i] + } + } + // Take the block for deleting element from lastDesc := descList[len(descList)-1] indexBlock := readStateIndexBlock(state, db, lastDesc.id) - bw, err := newBlockWriter(indexBlock, lastDesc) + + // Construct the writer for the last block. All elements in this block + // that exceed the limit will be truncated. + bw, err := newBlockWriter(indexBlock, lastDesc, limit) if err != nil { return nil, err } return &indexDeleter{ descList: descList, - lastID: lastDesc.max, + lastID: bw.last(), bw: bw, state: state, db: db, }, nil } -// empty returns an flag indicating whether the state index is empty. +// empty returns whether the state index is empty. func (d *indexDeleter) empty() bool { return d.bw.empty() && len(d.descList) == 1 } @@ -337,7 +364,7 @@ func (d *indexDeleter) pop(id uint64) error { // Open the previous block writer for deleting lastDesc := d.descList[len(d.descList)-1] indexBlock := readStateIndexBlock(d.state, d.db, lastDesc.id) - bw, err := newBlockWriter(indexBlock, lastDesc) + bw, err := newBlockWriter(indexBlock, lastDesc, lastDesc.max) if err != nil { return err } diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go index 7b59c8e882..13f16b4cf3 100644 --- a/triedb/pathdb/history_index_block.go +++ b/triedb/pathdb/history_index_block.go @@ -21,13 +21,15 @@ import ( "errors" "fmt" "math" + + "github.com/ethereum/go-ethereum/log" ) const ( - indexBlockDescSize = 14 // The size of index block descriptor - indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block - indexBlockRestartLen = 256 // The restart interval length of index block - historyIndexBatch = 512 * 1024 // The number of state history indexes for constructing or deleting as batch + indexBlockDescSize = 14 // The size of index block descriptor + indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block + indexBlockRestartLen = 256 // The restart interval length of index block + historyIndexBatch = 8 * 1024 * 1024 // The number of state history indexes for constructing or deleting as batch ) // indexBlockDesc represents a descriptor for an index block, which contains a @@ -180,7 +182,11 @@ type blockWriter struct { data []byte // Aggregated encoded data slice } -func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) { +// newBlockWriter constructs a block writer. In addition to the existing data +// and block description, it takes an element ID and prunes all existing elements +// above that ID. It's essential as the recovery mechanism after unclean shutdown +// during the history indexing. +func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64) (*blockWriter, error) { if len(blob) == 0 { return &blockWriter{ desc: desc, @@ -191,11 +197,22 @@ func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) { if err != nil { return nil, err } - return &blockWriter{ + writer := &blockWriter{ desc: desc, restarts: restarts, data: data, // safe to own the slice - }, nil + } + var trimmed int + for !writer.empty() && writer.last() > limit { + if err := writer.pop(writer.last()); err != nil { + return nil, err + } + trimmed += 1 + } + if trimmed > 0 { + log.Debug("Truncated extraneous elements", "count", trimmed, "limit", limit) + } + return writer, nil } // append adds a new element to the block. The new element must be greater than @@ -271,6 +288,7 @@ func (b *blockWriter) sectionLast(section int) uint64 { // sectionSearch looks up the specified value in the given section, // the position and the preceding value will be returned if found. +// It assumes that the preceding element exists in the section. func (b *blockWriter) sectionSearch(section int, n uint64) (found bool, prev uint64, pos int) { b.scanSection(section, func(v uint64, p int) bool { if n == v { @@ -295,7 +313,6 @@ func (b *blockWriter) pop(id uint64) error { } // If there is only one entry left, the entire block should be reset if b.desc.entries == 1 { - //b.desc.min = 0 b.desc.max = 0 b.desc.entries = 0 b.restarts = nil @@ -331,6 +348,15 @@ func (b *blockWriter) full() bool { return b.desc.full() } +// last returns the last element in the block. It should only be called when +// writer is not empty, otherwise the returned data is meaningless. +func (b *blockWriter) last() uint64 { + if b.empty() { + return 0 + } + return b.desc.max +} + // finish finalizes the index block encoding by appending the encoded restart points // and the restart counter to the end of the block. // diff --git a/triedb/pathdb/history_index_block_test.go b/triedb/pathdb/history_index_block_test.go index c251cea2ec..f8c6d3ab87 100644 --- a/triedb/pathdb/history_index_block_test.go +++ b/triedb/pathdb/history_index_block_test.go @@ -28,7 +28,7 @@ func TestBlockReaderBasic(t *testing.T) { elements := []uint64{ 1, 5, 10, 11, 20, } - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < len(elements); i++ { bw.append(elements[i]) } @@ -66,7 +66,7 @@ func TestBlockReaderLarge(t *testing.T) { } slices.Sort(elements) - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < len(elements); i++ { bw.append(elements[i]) } @@ -95,7 +95,7 @@ func TestBlockReaderLarge(t *testing.T) { } func TestBlockWriterBasic(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) if !bw.empty() { t.Fatal("expected empty block") } @@ -103,11 +103,13 @@ func TestBlockWriterBasic(t *testing.T) { if err := bw.append(1); err == nil { t.Fatal("out-of-order insertion is not expected") } + var maxElem uint64 for i := 0; i < 10; i++ { bw.append(uint64(i + 3)) + maxElem = uint64(i + 3) } - bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0)) + bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0), maxElem) if err != nil { t.Fatalf("Failed to construct the block writer, %v", err) } @@ -119,8 +121,71 @@ func TestBlockWriterBasic(t *testing.T) { bw.finish() } +func TestBlockWriterWithLimit(t *testing.T) { + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + + var maxElem uint64 + for i := 0; i < indexBlockRestartLen*2; i++ { + bw.append(uint64(i + 1)) + maxElem = uint64(i + 1) + } + + suites := []struct { + limit uint64 + expMax uint64 + }{ + // nothing to truncate + { + maxElem, maxElem, + }, + // truncate the last element + { + maxElem - 1, maxElem - 1, + }, + // truncation around the restart boundary + { + uint64(indexBlockRestartLen + 1), + uint64(indexBlockRestartLen + 1), + }, + // truncation around the restart boundary + { + uint64(indexBlockRestartLen), + uint64(indexBlockRestartLen), + }, + { + uint64(1), uint64(1), + }, + // truncate the entire block, it's in theory invalid + { + uint64(0), uint64(0), + }, + } + for i, suite := range suites { + desc := *bw.desc + block, err := newBlockWriter(bw.finish(), &desc, suite.limit) + if err != nil { + t.Fatalf("Failed to construct the block writer, %v", err) + } + if block.desc.max != suite.expMax { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, suite.expMax) + } + + // Re-fill the elements + var maxElem uint64 + for elem := suite.limit + 1; elem < indexBlockRestartLen*4; elem++ { + if err := block.append(elem); err != nil { + t.Fatalf("Failed to append value %d: %v", elem, err) + } + maxElem = elem + } + if block.desc.max != maxElem { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, maxElem) + } + } +} + func TestBlockWriterDelete(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < 10; i++ { bw.append(uint64(i + 1)) } @@ -147,7 +212,7 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { elements := []uint64{ 1, 5, 10, 11, 20, } - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < len(elements); i++ { bw.append(elements[i]) } @@ -158,7 +223,7 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { max: 20, entries: 5, } - bw, err := newBlockWriter(bw.finish(), desc) + bw, err := newBlockWriter(bw.finish(), desc, elements[len(elements)-1]) if err != nil { t.Fatalf("Failed to construct block writer %v", err) } @@ -201,15 +266,18 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { } func TestCorruptedIndexBlock(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + + var maxElem uint64 for i := 0; i < 10; i++ { bw.append(uint64(i + 1)) + maxElem = uint64(i + 1) } buf := bw.finish() // Mutate the buffer manually buf[len(buf)-1]++ - _, err := newBlockWriter(buf, newIndexBlockDesc(0)) + _, err := newBlockWriter(buf, newIndexBlockDesc(0), maxElem) if err == nil { t.Fatal("Corrupted index block data is not detected") } @@ -218,7 +286,7 @@ func TestCorruptedIndexBlock(t *testing.T) { // BenchmarkParseIndexBlock benchmarks the performance of parseIndexBlock. func BenchmarkParseIndexBlock(b *testing.B) { // Generate a realistic index block blob - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < 4096; i++ { bw.append(uint64(i * 2)) } @@ -238,13 +306,15 @@ func BenchmarkBlockWriterAppend(b *testing.B) { b.ReportAllocs() b.ResetTimer() - desc := newIndexBlockDesc(0) - writer, _ := newBlockWriter(nil, desc) + var blockID uint32 + desc := newIndexBlockDesc(blockID) + writer, _ := newBlockWriter(nil, desc, 0) for i := 0; i < b.N; i++ { if writer.full() { - desc = newIndexBlockDesc(0) - writer, _ = newBlockWriter(nil, desc) + blockID += 1 + desc = newIndexBlockDesc(blockID) + writer, _ = newBlockWriter(nil, desc, 0) } if err := writer.append(writer.desc.max + 1); err != nil { b.Error(err) diff --git a/triedb/pathdb/history_index_iterator_test.go b/triedb/pathdb/history_index_iterator_test.go index da60dc6e8f..f0dd3fee4a 100644 --- a/triedb/pathdb/history_index_iterator_test.go +++ b/triedb/pathdb/history_index_iterator_test.go @@ -33,7 +33,7 @@ func makeTestIndexBlock(count int) ([]byte, []uint64) { marks = make(map[uint64]bool) elements []uint64 ) - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0)) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) for i := 0; i < count; i++ { n := uint64(rand.Uint32()) if marks[n] { @@ -67,7 +67,7 @@ func makeTestIndexBlocks(db ethdb.KeyValueStore, stateIdent stateIdent, count in } sort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] }) - iw, _ := newIndexWriter(db, stateIdent) + iw, _ := newIndexWriter(db, stateIdent, 0) for i := 0; i < len(elements); i++ { iw.append(elements[i]) } diff --git a/triedb/pathdb/history_index_test.go b/triedb/pathdb/history_index_test.go index be9b7c4049..42cb04b001 100644 --- a/triedb/pathdb/history_index_test.go +++ b/triedb/pathdb/history_index_test.go @@ -33,7 +33,7 @@ func TestIndexReaderBasic(t *testing.T) { 1, 5, 10, 11, 20, } db := rawdb.NewMemoryDatabase() - bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa})) + bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) for i := 0; i < len(elements); i++ { bw.append(elements[i]) } @@ -75,7 +75,7 @@ func TestIndexReaderLarge(t *testing.T) { slices.Sort(elements) db := rawdb.NewMemoryDatabase() - bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa})) + bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) for i := 0; i < len(elements); i++ { bw.append(elements[i]) } @@ -122,19 +122,21 @@ func TestEmptyIndexReader(t *testing.T) { func TestIndexWriterBasic(t *testing.T) { db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa})) + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) iw.append(2) if err := iw.append(1); err == nil { t.Fatal("out-of-order insertion is not expected") } + var maxElem uint64 for i := 0; i < 10; i++ { iw.append(uint64(i + 3)) + maxElem = uint64(i + 3) } batch := db.NewBatch() iw.finish(batch) batch.Write() - iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa})) + iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), maxElem) if err != nil { t.Fatalf("Failed to construct the block writer, %v", err) } @@ -146,18 +148,87 @@ func TestIndexWriterBasic(t *testing.T) { iw.finish(db.NewBatch()) } -func TestIndexWriterDelete(t *testing.T) { +func TestIndexWriterWithLimit(t *testing.T) { db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa})) + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + + var maxElem uint64 + for i := 0; i < indexBlockEntriesCap*2; i++ { + iw.append(uint64(i + 1)) + maxElem = uint64(i + 1) + } + batch := db.NewBatch() + iw.finish(batch) + batch.Write() + + suites := []struct { + limit uint64 + expMax uint64 + }{ + // nothing to truncate + { + maxElem, maxElem, + }, + // truncate the last element + { + maxElem - 1, maxElem - 1, + }, + // truncation around the block boundary + { + uint64(indexBlockEntriesCap + 1), + uint64(indexBlockEntriesCap + 1), + }, + // truncation around the block boundary + { + uint64(indexBlockEntriesCap), + uint64(indexBlockEntriesCap), + }, + { + uint64(1), uint64(1), + }, + // truncate the entire index, it's in theory invalid + { + uint64(0), uint64(0), + }, + } + for i, suite := range suites { + iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), suite.limit) + if err != nil { + t.Fatalf("Failed to construct the index writer, %v", err) + } + if iw.lastID != suite.expMax { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, suite.expMax) + } + + // Re-fill the elements + var maxElem uint64 + for elem := suite.limit + 1; elem < indexBlockEntriesCap*4; elem++ { + if err := iw.append(elem); err != nil { + t.Fatalf("Failed to append value %d: %v", elem, err) + } + maxElem = elem + } + if iw.lastID != maxElem { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, maxElem) + } + } +} + +func TestIndexDeleterBasic(t *testing.T) { + db := rawdb.NewMemoryDatabase() + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + + var maxElem uint64 for i := 0; i < indexBlockEntriesCap*4; i++ { iw.append(uint64(i + 1)) + maxElem = uint64(i + 1) } batch := db.NewBatch() iw.finish(batch) batch.Write() // Delete unknown id, the request should be rejected - id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa})) + id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), maxElem) if err := id.pop(indexBlockEntriesCap * 5); err == nil { t.Fatal("Expect error to occur for unknown id") } @@ -168,10 +239,66 @@ func TestIndexWriterDelete(t *testing.T) { if id.lastID != uint64(i-1) { t.Fatalf("Unexpected lastID, want: %d, got: %d", uint64(i-1), iw.lastID) } - if rand.Intn(10) == 0 { - batch := db.NewBatch() - id.finish(batch) - batch.Write() + } +} + +func TestIndexDeleterWithLimit(t *testing.T) { + db := rawdb.NewMemoryDatabase() + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + + var maxElem uint64 + for i := 0; i < indexBlockEntriesCap*2; i++ { + iw.append(uint64(i + 1)) + maxElem = uint64(i + 1) + } + batch := db.NewBatch() + iw.finish(batch) + batch.Write() + + suites := []struct { + limit uint64 + expMax uint64 + }{ + // nothing to truncate + { + maxElem, maxElem, + }, + // truncate the last element + { + maxElem - 1, maxElem - 1, + }, + // truncation around the block boundary + { + uint64(indexBlockEntriesCap + 1), + uint64(indexBlockEntriesCap + 1), + }, + // truncation around the block boundary + { + uint64(indexBlockEntriesCap), + uint64(indexBlockEntriesCap), + }, + { + uint64(1), uint64(1), + }, + // truncate the entire index, it's in theory invalid + { + uint64(0), uint64(0), + }, + } + for i, suite := range suites { + id, err := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), suite.limit) + if err != nil { + t.Fatalf("Failed to construct the index writer, %v", err) + } + if id.lastID != suite.expMax { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, id.lastID, suite.expMax) + } + + // Keep removing elements + for elem := id.lastID; elem > 0; elem-- { + if err := id.pop(elem); err != nil { + t.Fatalf("Failed to pop value %d: %v", elem, err) + } } } } diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index 893ccd6523..9af7a96dc6 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -40,11 +40,6 @@ const ( stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version - - // estimations for calculating the batch size for atomic database commit - estimatedStateHistoryIndexSize = 3 // The average size of each state history index entry is approximately 2–3 bytes - estimatedTrienodeHistoryIndexSize = 3 // The average size of each trienode history index entry is approximately 2-3 bytes - estimatedIndexBatchSizeFactor = 32 // The factor counts for the write amplification for each entry ) // indexVersion returns the latest index version for the given history type. @@ -155,22 +150,6 @@ func (b *batchIndexer) process(h history, id uint64) error { return b.finish(false) } -// makeBatch constructs a database batch based on the number of pending entries. -// The batch size is roughly estimated to minimize repeated resizing rounds, -// as accurately predicting the exact size is technically challenging. -func (b *batchIndexer) makeBatch() ethdb.Batch { - var size int - switch b.typ { - case typeStateHistory: - size = estimatedStateHistoryIndexSize - case typeTrienodeHistory: - size = estimatedTrienodeHistoryIndexSize - default: - panic(fmt.Sprintf("unknown history type %d", b.typ)) - } - return b.db.NewBatchWithSize(size * estimatedIndexBatchSizeFactor * b.pending) -} - // finish writes the accumulated state indexes into the disk if either the // memory limitation is reached or it's requested forcibly. func (b *batchIndexer) finish(force bool) error { @@ -181,17 +160,38 @@ func (b *batchIndexer) finish(force bool) error { return nil } var ( - batch = b.makeBatch() - batchMu sync.RWMutex - start = time.Now() - eg errgroup.Group + start = time.Now() + eg errgroup.Group + + batch = b.db.NewBatchWithSize(ethdb.IdealBatchSize) + batchSize int + batchMu sync.RWMutex + + writeBatch = func(fn func(batch ethdb.Batch)) error { + batchMu.Lock() + defer batchMu.Unlock() + + fn(batch) + if batch.ValueSize() >= ethdb.IdealBatchSize { + batchSize += batch.ValueSize() + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + } + return nil + } ) eg.SetLimit(runtime.NumCPU()) + var indexed uint64 + if metadata := loadIndexMetadata(b.db, b.typ); metadata != nil { + indexed = metadata.Last + } for ident, list := range b.index { eg.Go(func() error { if !b.delete { - iw, err := newIndexWriter(b.db, ident) + iw, err := newIndexWriter(b.db, ident, indexed) if err != nil { return err } @@ -200,11 +200,11 @@ func (b *batchIndexer) finish(force bool) error { return err } } - batchMu.Lock() - iw.finish(batch) - batchMu.Unlock() + return writeBatch(func(batch ethdb.Batch) { + iw.finish(batch) + }) } else { - id, err := newIndexDeleter(b.db, ident) + id, err := newIndexDeleter(b.db, ident, indexed) if err != nil { return err } @@ -213,11 +213,10 @@ func (b *batchIndexer) finish(force bool) error { return err } } - batchMu.Lock() - id.finish(batch) - batchMu.Unlock() + return writeBatch(func(batch ethdb.Batch) { + id.finish(batch) + }) } - return nil }) } if err := eg.Wait(); err != nil { @@ -233,10 +232,12 @@ func (b *batchIndexer) finish(force bool) error { storeIndexMetadata(batch, b.typ, b.lastID-1) } } + batchSize += batch.ValueSize() + if err := batch.Write(); err != nil { return err } - log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "size", common.StorageSize(batchSize), "elapsed", common.PrettyDuration(time.Since(start))) b.pending = 0 b.index = make(map[stateIdent][]uint64) return nil From d9aaab13d37af39556e0e2be930baf9f10374a93 Mon Sep 17 00:00:00 2001 From: Fibonacci747 Date: Tue, 30 Dec 2025 18:27:11 +0100 Subject: [PATCH 207/277] beacon/light/sync: clear reqFinalityEpoch on server unregistration (#33483) HeadSync kept reqFinalityEpoch entries for servers after receiving EvUnregistered, while other per-server maps were cleared. This left stale request.Server keys reachable from HeadSync, which can lead to a slow memory leak in setups that dynamically register and unregister servers. The fix adds deletion of the reqFinalityEpoch entry in the EvUnregistered handler. This aligns HeadSync with the cleanup pattern used by other sync modules and keeps the finality request bookkeeping strictly limited to currently registered servers. --- beacon/light/sync/head_sync.go | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon/light/sync/head_sync.go b/beacon/light/sync/head_sync.go index 5e41258053..7189767d9c 100644 --- a/beacon/light/sync/head_sync.go +++ b/beacon/light/sync/head_sync.go @@ -105,6 +105,7 @@ func (s *HeadSync) Process(requester request.Requester, events []request.Event) delete(s.serverHeads, event.Server) delete(s.unvalidatedOptimistic, event.Server) delete(s.unvalidatedFinality, event.Server) + delete(s.reqFinalityEpoch, event.Server) } } } From 52ae75afcda074d46a07fb017c334c2862f162be Mon Sep 17 00:00:00 2001 From: Rim Dinov Date: Wed, 31 Dec 2025 01:04:38 +0500 Subject: [PATCH 208/277] cmd/geth: remove deprecated vulnerability check command (#33498) This PR removes the version-check command and its associated logic as discussed in issue #31222. Removed versionCheckCommand from misccmd.go and main.go. Deleted version_check.go and its corresponding tests. Cleaned up testdata/vcheck directory (~800 lines of JSON/signatures removed). Verified build with make geth --- SECURITY.md | 1 - cmd/geth/main.go | 1 - cmd/geth/misccmd.go | 24 --- cmd/geth/testdata/vcheck/data.json | 202 ------------------ .../vcheck/minisig-sigs-new/data.json.minisig | 4 - .../vulnerabilities.json.minisig.1 | 4 - .../vulnerabilities.json.minisig.2 | 4 - .../vulnerabilities.json.minisig.3 | 4 - cmd/geth/testdata/vcheck/minisign.pub | 2 - cmd/geth/testdata/vcheck/minisign.sec | 2 - .../vcheck/signify-sigs/data.json.sig | 2 - cmd/geth/testdata/vcheck/signifykey.pub | 2 - cmd/geth/testdata/vcheck/signifykey.sec | 2 - cmd/geth/testdata/vcheck/vulnerabilities.json | 202 ------------------ cmd/geth/version_check.go | 170 --------------- cmd/geth/version_check_test.go | 189 ---------------- 16 files changed, 815 deletions(-) delete mode 100644 cmd/geth/testdata/vcheck/data.json delete mode 100644 cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig delete mode 100644 cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 delete mode 100644 cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 delete mode 100644 cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 delete mode 100644 cmd/geth/testdata/vcheck/minisign.pub delete mode 100644 cmd/geth/testdata/vcheck/minisign.sec delete mode 100644 cmd/geth/testdata/vcheck/signify-sigs/data.json.sig delete mode 100644 cmd/geth/testdata/vcheck/signifykey.pub delete mode 100644 cmd/geth/testdata/vcheck/signifykey.sec delete mode 100644 cmd/geth/testdata/vcheck/vulnerabilities.json delete mode 100644 cmd/geth/version_check.go delete mode 100644 cmd/geth/version_check_test.go diff --git a/SECURITY.md b/SECURITY.md index 0b497b44ae..d497248de5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -21,7 +21,6 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities. -Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number. The following key may be used to communicate sensitive information to developers. diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 96f9f58dde..db4b569c89 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -241,7 +241,6 @@ func init() { javascriptCommand, // See misccmd.go: versionCommand, - versionCheckCommand, licenseCommand, // See config.go dumpConfigCommand, diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go index 2d31f3abe7..f5c0d55ebb 100644 --- a/cmd/geth/misccmd.go +++ b/cmd/geth/misccmd.go @@ -27,16 +27,6 @@ import ( ) var ( - VersionCheckUrlFlag = &cli.StringFlag{ - Name: "check.url", - Usage: "URL to use when checking vulnerabilities", - Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json", - } - VersionCheckVersionFlag = &cli.StringFlag{ - Name: "check.version", - Usage: "Version to check", - Value: version.ClientName(clientIdentifier), - } versionCommand = &cli.Command{ Action: printVersion, Name: "version", @@ -44,20 +34,6 @@ var ( ArgsUsage: " ", Description: ` The output of this command is supposed to be machine-readable. -`, - } - versionCheckCommand = &cli.Command{ - Action: versionCheck, - Flags: []cli.Flag{ - VersionCheckUrlFlag, - VersionCheckVersionFlag, - }, - Name: "version-check", - Usage: "Checks (online) for known Geth security vulnerabilities", - ArgsUsage: "", - Description: ` -The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json, -and displays information about any security vulnerabilities that affect the currently executing version. `, } licenseCommand = &cli.Command{ diff --git a/cmd/geth/testdata/vcheck/data.json b/cmd/geth/testdata/vcheck/data.json deleted file mode 100644 index e52fd84e67..0000000000 --- a/cmd/geth/testdata/vcheck/data.json +++ /dev/null @@ -1,202 +0,0 @@ -[ - { - "name": "CorruptedDAG", - "uid": "GETH-2020-01", - "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", - "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", - "links": [ - "https://github.com/ethereum/go-ethereum/pull/21793", - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p" - ], - "introduced": "v1.6.0", - "fixed": "v1.9.24", - "published": "2020-11-12", - "severity": "Medium", - "CVE": "CVE-2020-26240", - "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*" - }, - { - "name": "Denial of service due to Go CVE-2020-28362", - "uid": "GETH-2020-02", - "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", - "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", - "https://github.com/golang/go/issues/42552", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52" - ], - "introduced": "v0.0.0", - "fixed": "v1.9.24", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-28362", - "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" - }, - { - "name": "ShallowCopy", - "uid": "GETH-2020-03", - "summary": "A consensus flaw in Geth, related to `datacopy` precompile", - "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf" - ], - "introduced": "v1.9.7", - "fixed": "v1.9.17", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-26241", - "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" - }, - { - "name": "Geth DoS via MULMOD", - "uid": "GETH-2020-04", - "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", - "description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m", - "https://github.com/holiman/uint256/releases/tag/v1.1.1", - "https://github.com/holiman/uint256/pull/80", - "https://github.com/ethereum/go-ethereum/pull/21368" - ], - "introduced": "v1.9.16", - "fixed": "v1.9.18", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-26242", - "check": "Geth\\/v1\\.9.(16|17).*$" - }, - { - "name": "LES Server DoS via GetProofsV2", - "uid": "GETH-2020-05", - "summary": "A DoS vulnerability can make a LES server crash.", - "description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q", - "https://github.com/ethereum/go-ethereum/pull/21896" - ], - "introduced": "v1.8.0", - "fixed": "v1.9.25", - "published": "2020-12-10", - "severity": "Medium", - "CVE": "CVE-2020-26264", - "check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$" - }, - { - "name": "SELFDESTRUCT-recreate consensus flaw", - "uid": "GETH-2020-06", - "introduced": "v1.9.4", - "fixed": "v1.9.20", - "summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.", - "description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4" - ], - "published": "2020-12-10", - "severity": "High", - "CVE": "CVE-2020-26265", - "check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$" - }, - { - "name": "Not ready for London upgrade", - "uid": "GETH-2021-01", - "summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.", - "description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.", - "links": [ - "https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md", - "https://notes.ethereum.org/@timbeiko/ropsten-postmortem" - ], - "introduced": "v1.10.1", - "fixed": "v1.10.6", - "published": "2021-07-22", - "severity": "High", - "check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$" - }, - { - "name": "RETURNDATA corruption via datacopy", - "uid": "GETH-2021-02", - "summary": "A consensus-flaw in the Geth EVM could cause a node to deviate from the canonical chain.", - "description": "A memory-corruption bug within the EVM can cause a consensus error, where vulnerable nodes obtain a different `stateRoot` when processing a maliciously crafted transaction. This, in turn, would lead to the chain being split: mainnet splitting in two forks.\n\nAll Geth versions supporting the London hard fork are vulnerable (the bug is older than London), so all users should update.\n\nThis bug was exploited on Mainnet at block 13107518.\n\nCredits for the discovery go to @guidovranken (working for Sentnl during an audit of the Telos EVM) and reported via bounty@ethereum.org.", - "links": [ - "https://github.com/ethereum/go-ethereum/blob/master/docs/postmortems/2021-08-22-split-postmortem.md", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-9856-9gg9-qcmq", - "https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.8", - "published": "2021-08-24", - "severity": "High", - "CVE": "CVE-2021-39137", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7)-.*)$" - }, - { - "name": "DoS via malicious `snap/1` request", - "uid": "GETH-2021-03", - "summary": "A vulnerable node is susceptible to crash when processing a maliciously crafted message from a peer, via the snap/1 protocol. The crash can be triggered by sending a malicious snap/1 GetTrieNodes package.", - "description": "The `snap/1` protocol handler contains two vulnerabilities related to the `GetTrieNodes` packet, which can be exploited to crash the node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities", - "https://github.com/ethereum/go-ethereum/pull/23657" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.9", - "published": "2021-10-24", - "severity": "Medium", - "CVE": "CVE-2021-41173", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8)-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2022-01", - "summary": "A vulnerable node can crash via p2p messages sent from an attacker node, if running with non-default log options.", - "description": "A vulnerable node, if configured to use high verbosity logging, can be made to crash when handling specially crafted p2p messages sent from an attacker node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities", - "https://github.com/ethereum/go-ethereum/pull/24507" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.17", - "published": "2022-05-11", - "severity": "Low", - "CVE": "CVE-2022-29177", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16)-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2023-01", - "summary": "A vulnerable node can be made to consume unbounded amounts of memory when handling specially crafted p2p messages sent from an attacker node.", - "description": "The p2p handler spawned a new goroutine to respond to ping requests. By flooding a node with ping requests, an unbounded number of goroutines can be created, leading to resource exhaustion and potentially crash due to OOM.", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-ppjg-v974-84cm", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" - ], - "introduced": "v1.10.0", - "fixed": "v1.12.1", - "published": "2023-09-06", - "severity": "High", - "CVE": "CVE-2023-40591", - "check": "(Geth\\/v1\\.(10|11)\\..*)|(Geth\\/v1\\.12\\.0-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2024-01", - "summary": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node.", - "description": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node. Full details will be available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" - ], - "introduced": "v1.10.0", - "fixed": "v1.13.15", - "published": "2024-05-06", - "severity": "High", - "CVE": "CVE-2024-32972", - "check": "(Geth\\/v1\\.(10|11|12)\\..*)|(Geth\\/v1\\.13\\.\\d-.*)|(Geth\\/v1\\.13\\.1(0|1|2|3|4)-.*)$" - } -] diff --git a/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig b/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig deleted file mode 100644 index 987ffe92bb..0000000000 --- a/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig +++ /dev/null @@ -1,4 +0,0 @@ -untrusted comment: signature from minisign secret key -RUQkliYstQBOKHklFEYCUjepz81dyUuDmIAxjAvXa+icjGuKcjtVfV06G7qfOMSpplS5EcntU12n+AnGNyuOM8zIctaIWcfG2w0= -trusted comment: timestamp:1752094689 file:data.json hashed -u2e4wo4HBTU6viQTSY/NVBHoWoPFJnnTvLZS0FYl3JdvSOYi6+qpbEsDhAIFqq/n8VmlS/fPqqf7vKCNiAgjAA== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 deleted file mode 100644 index 6b6aa900e3..0000000000 --- a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 +++ /dev/null @@ -1,4 +0,0 @@ -untrusted comment: signature from minisign secret key -RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0= -trusted comment: timestamp:1752094703 file:data.json -cNyq3ZGlqo785HtWODb9ejWqF0HhSeXuLGXzC7z1IhnDrBObWBJngYd3qBG1dQcYlHQ+bgB/On5mSyMFn4UoCQ== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 deleted file mode 100644 index 704437de39..0000000000 --- a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 +++ /dev/null @@ -1,4 +0,0 @@ -untrusted comment: Here's a comment -RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0= -trusted comment: Here's a trusted comment -dL7lO8sqFFCOXJO/u8SgoDk2nlXGWPRDbOTJkChMbmtUp9PB7sG831basXkZ/0CQ/l/vG7AbPyMNEVZyJn5NCg== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 deleted file mode 100644 index 806cd07316..0000000000 --- a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 +++ /dev/null @@ -1,4 +0,0 @@ -untrusted comment: One more (untrusted™) comment -RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0= -trusted comment: Here's a trusted comment -dL7lO8sqFFCOXJO/u8SgoDk2nlXGWPRDbOTJkChMbmtUp9PB7sG831basXkZ/0CQ/l/vG7AbPyMNEVZyJn5NCg== diff --git a/cmd/geth/testdata/vcheck/minisign.pub b/cmd/geth/testdata/vcheck/minisign.pub deleted file mode 100644 index 183dce5f6b..0000000000 --- a/cmd/geth/testdata/vcheck/minisign.pub +++ /dev/null @@ -1,2 +0,0 @@ -untrusted comment: minisign public key 284E00B52C269624 -RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp diff --git a/cmd/geth/testdata/vcheck/minisign.sec b/cmd/geth/testdata/vcheck/minisign.sec deleted file mode 100644 index 5c50715b20..0000000000 --- a/cmd/geth/testdata/vcheck/minisign.sec +++ /dev/null @@ -1,2 +0,0 @@ -untrusted comment: minisign encrypted secret key -RWRTY0Iyz8kmPMKrqk6DCtlO9a33akKiaOQG1aLolqDxs52qvPoAAAACAAAAAAAAAEAAAAAArEiggdvyn6+WzTprirLtgiYQoU+ihz/HyGgjhuF+Pz2ddMduyCO+xjCHeq+vgVVW039fbsI8hW6LRGJZLBKV5/jdxCXAVVQE7qTQ6xpEdO0z8Z731/pV1hlspQXG2PNd16NMtwd9dWw= diff --git a/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig b/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig deleted file mode 100644 index d704af7709..0000000000 --- a/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig +++ /dev/null @@ -1,2 +0,0 @@ -untrusted comment: verify with signifykey.pub -RWSKLNhZb0KdARbMcGN40hbHzKQYZDgDOFhEUT1YpzMnqre/mbKJ8td/HVlG03Am1YCszATiI0DbnljjTy4iNHYwqBfzrFUqUg0= diff --git a/cmd/geth/testdata/vcheck/signifykey.pub b/cmd/geth/testdata/vcheck/signifykey.pub deleted file mode 100644 index 328f973ab4..0000000000 --- a/cmd/geth/testdata/vcheck/signifykey.pub +++ /dev/null @@ -1,2 +0,0 @@ -untrusted comment: signify public key -RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/ diff --git a/cmd/geth/testdata/vcheck/signifykey.sec b/cmd/geth/testdata/vcheck/signifykey.sec deleted file mode 100644 index 3279a2e58b..0000000000 --- a/cmd/geth/testdata/vcheck/signifykey.sec +++ /dev/null @@ -1,2 +0,0 @@ -untrusted comment: signify secret key -RWRCSwAAACpLQDLawSQCtI7eAVIvaiHzjTsTyJsfV5aKLNhZb0KdAWeICXJGa93/bHAcsY6jUh9I8RdEcDWEoGxmaXZC+IdVBPxDpkix9fBRGEUdKWHi3dOfqME0YRzErWI5AVg3cRw= diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json deleted file mode 100644 index e52fd84e67..0000000000 --- a/cmd/geth/testdata/vcheck/vulnerabilities.json +++ /dev/null @@ -1,202 +0,0 @@ -[ - { - "name": "CorruptedDAG", - "uid": "GETH-2020-01", - "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", - "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", - "links": [ - "https://github.com/ethereum/go-ethereum/pull/21793", - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p" - ], - "introduced": "v1.6.0", - "fixed": "v1.9.24", - "published": "2020-11-12", - "severity": "Medium", - "CVE": "CVE-2020-26240", - "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*" - }, - { - "name": "Denial of service due to Go CVE-2020-28362", - "uid": "GETH-2020-02", - "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", - "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", - "https://github.com/golang/go/issues/42552", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52" - ], - "introduced": "v0.0.0", - "fixed": "v1.9.24", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-28362", - "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" - }, - { - "name": "ShallowCopy", - "uid": "GETH-2020-03", - "summary": "A consensus flaw in Geth, related to `datacopy` precompile", - "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf" - ], - "introduced": "v1.9.7", - "fixed": "v1.9.17", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-26241", - "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" - }, - { - "name": "Geth DoS via MULMOD", - "uid": "GETH-2020-04", - "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", - "description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n", - "links": [ - "https://blog.ethereum.org/2020/11/12/geth-security-release", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m", - "https://github.com/holiman/uint256/releases/tag/v1.1.1", - "https://github.com/holiman/uint256/pull/80", - "https://github.com/ethereum/go-ethereum/pull/21368" - ], - "introduced": "v1.9.16", - "fixed": "v1.9.18", - "published": "2020-11-12", - "severity": "Critical", - "CVE": "CVE-2020-26242", - "check": "Geth\\/v1\\.9.(16|17).*$" - }, - { - "name": "LES Server DoS via GetProofsV2", - "uid": "GETH-2020-05", - "summary": "A DoS vulnerability can make a LES server crash.", - "description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q", - "https://github.com/ethereum/go-ethereum/pull/21896" - ], - "introduced": "v1.8.0", - "fixed": "v1.9.25", - "published": "2020-12-10", - "severity": "Medium", - "CVE": "CVE-2020-26264", - "check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$" - }, - { - "name": "SELFDESTRUCT-recreate consensus flaw", - "uid": "GETH-2020-06", - "introduced": "v1.9.4", - "fixed": "v1.9.20", - "summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.", - "description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4" - ], - "published": "2020-12-10", - "severity": "High", - "CVE": "CVE-2020-26265", - "check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$" - }, - { - "name": "Not ready for London upgrade", - "uid": "GETH-2021-01", - "summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.", - "description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.", - "links": [ - "https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md", - "https://notes.ethereum.org/@timbeiko/ropsten-postmortem" - ], - "introduced": "v1.10.1", - "fixed": "v1.10.6", - "published": "2021-07-22", - "severity": "High", - "check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$" - }, - { - "name": "RETURNDATA corruption via datacopy", - "uid": "GETH-2021-02", - "summary": "A consensus-flaw in the Geth EVM could cause a node to deviate from the canonical chain.", - "description": "A memory-corruption bug within the EVM can cause a consensus error, where vulnerable nodes obtain a different `stateRoot` when processing a maliciously crafted transaction. This, in turn, would lead to the chain being split: mainnet splitting in two forks.\n\nAll Geth versions supporting the London hard fork are vulnerable (the bug is older than London), so all users should update.\n\nThis bug was exploited on Mainnet at block 13107518.\n\nCredits for the discovery go to @guidovranken (working for Sentnl during an audit of the Telos EVM) and reported via bounty@ethereum.org.", - "links": [ - "https://github.com/ethereum/go-ethereum/blob/master/docs/postmortems/2021-08-22-split-postmortem.md", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-9856-9gg9-qcmq", - "https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.8", - "published": "2021-08-24", - "severity": "High", - "CVE": "CVE-2021-39137", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7)-.*)$" - }, - { - "name": "DoS via malicious `snap/1` request", - "uid": "GETH-2021-03", - "summary": "A vulnerable node is susceptible to crash when processing a maliciously crafted message from a peer, via the snap/1 protocol. The crash can be triggered by sending a malicious snap/1 GetTrieNodes package.", - "description": "The `snap/1` protocol handler contains two vulnerabilities related to the `GetTrieNodes` packet, which can be exploited to crash the node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities", - "https://github.com/ethereum/go-ethereum/pull/23657" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.9", - "published": "2021-10-24", - "severity": "Medium", - "CVE": "CVE-2021-41173", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8)-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2022-01", - "summary": "A vulnerable node can crash via p2p messages sent from an attacker node, if running with non-default log options.", - "description": "A vulnerable node, if configured to use high verbosity logging, can be made to crash when handling specially crafted p2p messages sent from an attacker node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities", - "https://github.com/ethereum/go-ethereum/pull/24507" - ], - "introduced": "v1.10.0", - "fixed": "v1.10.17", - "published": "2022-05-11", - "severity": "Low", - "CVE": "CVE-2022-29177", - "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16)-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2023-01", - "summary": "A vulnerable node can be made to consume unbounded amounts of memory when handling specially crafted p2p messages sent from an attacker node.", - "description": "The p2p handler spawned a new goroutine to respond to ping requests. By flooding a node with ping requests, an unbounded number of goroutines can be created, leading to resource exhaustion and potentially crash due to OOM.", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-ppjg-v974-84cm", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" - ], - "introduced": "v1.10.0", - "fixed": "v1.12.1", - "published": "2023-09-06", - "severity": "High", - "CVE": "CVE-2023-40591", - "check": "(Geth\\/v1\\.(10|11)\\..*)|(Geth\\/v1\\.12\\.0-.*)$" - }, - { - "name": "DoS via malicious p2p message", - "uid": "GETH-2024-01", - "summary": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node.", - "description": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node. Full details will be available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652)", - "links": [ - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652", - "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" - ], - "introduced": "v1.10.0", - "fixed": "v1.13.15", - "published": "2024-05-06", - "severity": "High", - "CVE": "CVE-2024-32972", - "check": "(Geth\\/v1\\.(10|11|12)\\..*)|(Geth\\/v1\\.13\\.\\d-.*)|(Geth\\/v1\\.13\\.1(0|1|2|3|4)-.*)$" - } -] diff --git a/cmd/geth/version_check.go b/cmd/geth/version_check.go deleted file mode 100644 index 237556788e..0000000000 --- a/cmd/geth/version_check.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strings" - - "github.com/ethereum/go-ethereum/log" - "github.com/jedisct1/go-minisign" - "github.com/urfave/cli/v2" -) - -var gethPubKeys []string = []string{ - //@holiman, minisign public key FB1D084D39BAEC24 - "RWQk7Lo5TQgd+wxBNZM+Zoy+7UhhMHaWKzqoes9tvSbFLJYZhNTbrIjx", - //minisign public key 138B1CA303E51687 - "RWSHFuUDoxyLEzjszuWZI1xStS66QTyXFFZG18uDfO26CuCsbckX1e9J", - //minisign public key FD9813B2D2098484 - "RWSEhAnSshOY/b+GmaiDkObbCWefsAoavjoLcPjBo1xn71yuOH5I+Lts", -} - -type vulnJson struct { - Name string - Uid string - Summary string - Description string - Links []string - Introduced string - Fixed string - Published string - Severity string - Check string - CVE string -} - -func versionCheck(ctx *cli.Context) error { - url := ctx.String(VersionCheckUrlFlag.Name) - version := ctx.String(VersionCheckVersionFlag.Name) - log.Info("Checking vulnerabilities", "version", version, "url", url) - return checkCurrent(url, version) -} - -func checkCurrent(url, current string) error { - var ( - data []byte - sig []byte - err error - ) - if data, err = fetch(url); err != nil { - return fmt.Errorf("could not retrieve data: %w", err) - } - if sig, err = fetch(fmt.Sprintf("%v.minisig", url)); err != nil { - return fmt.Errorf("could not retrieve signature: %w", err) - } - if err = verifySignature(gethPubKeys, data, sig); err != nil { - return err - } - var vulns []vulnJson - if err = json.Unmarshal(data, &vulns); err != nil { - return err - } - allOk := true - for _, vuln := range vulns { - r, err := regexp.Compile(vuln.Check) - if err != nil { - return err - } - if r.MatchString(current) { - allOk = false - fmt.Printf("## Vulnerable to %v (%v)\n\n", vuln.Uid, vuln.Name) - fmt.Printf("Severity: %v\n", vuln.Severity) - fmt.Printf("Summary : %v\n", vuln.Summary) - fmt.Printf("Fixed in: %v\n", vuln.Fixed) - if len(vuln.CVE) > 0 { - fmt.Printf("CVE: %v\n", vuln.CVE) - } - if len(vuln.Links) > 0 { - fmt.Printf("References:\n") - for _, ref := range vuln.Links { - fmt.Printf("\t- %v\n", ref) - } - } - fmt.Println() - } - } - if allOk { - fmt.Println("No vulnerabilities found") - } - return nil -} - -// fetch makes an HTTP request to the given url and returns the response body -func fetch(url string) ([]byte, error) { - if filep := strings.TrimPrefix(url, "file://"); filep != url { - return os.ReadFile(filep) - } - res, err := http.Get(url) - if err != nil { - return nil, err - } - defer res.Body.Close() - body, err := io.ReadAll(res.Body) - if err != nil { - return nil, err - } - return body, nil -} - -// verifySignature checks that the sigData is a valid signature of the given -// data, for pubkey GethPubkey -func verifySignature(pubkeys []string, data, sigdata []byte) error { - sig, err := minisign.DecodeSignature(string(sigdata)) - if err != nil { - return err - } - // find the used key - var key *minisign.PublicKey - for _, pubkey := range pubkeys { - pub, err := minisign.NewPublicKey(pubkey) - if err != nil { - // our pubkeys should be parseable - return err - } - if pub.KeyId != sig.KeyId { - continue - } - key = &pub - break - } - if key == nil { - log.Info("Signing key not trusted", "keyid", keyID(sig.KeyId), "error", err) - return errors.New("signature could not be verified") - } - if ok, err := key.Verify(data, sig); !ok || err != nil { - log.Info("Verification failed error", "keyid", keyID(key.KeyId), "error", err) - return errors.New("signature could not be verified") - } - return nil -} - -// keyID turns a binary minisign key ID into a hex string. -// Note: key IDs are printed in reverse byte order. -func keyID(id [8]byte) string { - var rev [8]byte - for i := range id { - rev[len(rev)-1-i] = id[i] - } - return fmt.Sprintf("%X", rev) -} diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go deleted file mode 100644 index fb5d1b2d69..0000000000 --- a/cmd/geth/version_check_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/jedisct1/go-minisign" -) - -func TestVerification(t *testing.T) { - t.Parallel() - // Signatures generated with `minisign`. Legacy format, not pre-hashed file. - t.Run("minisig-legacy", func(t *testing.T) { - t.Parallel() - // For this test, the pubkey is in testdata/vcheck/minisign.pub - // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) - // 1. `minisign -S -l -s ./minisign.sec -m data.json -x ./minisig-sigs/vulnerabilities.json.minisig.1 -c "signature from minisign secret key"` - // 2. `minisign -S -l -s ./minisign.sec -m vulnerabilities.json -x ./minisig-sigs/vulnerabilities.json.minisig.2 -c "Here's a comment" -t "Here's a trusted comment"` - // 3. minisign -S -l -s ./minisign.sec -m vulnerabilities.json -x ./minisig-sigs/vulnerabilities.json.minisig.3 -c "One more (untrusted™) comment" -t "Here's a trusted comment" - pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" - testVerification(t, pub, "./testdata/vcheck/minisig-sigs/") - }) - t.Run("minisig-new", func(t *testing.T) { - t.Parallel() - // For this test, the pubkey is in testdata/vcheck/minisign.pub - // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) - // `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig` - pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" - testVerification(t, pub, "./testdata/vcheck/minisig-sigs-new/") - }) - // Signatures generated with `signify-openbsd` - t.Run("signify-openbsd", func(t *testing.T) { - t.Parallel() - t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2") - // For this test, the pubkey is in testdata/vcheck/signifykey.pub - // (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' ) - // `signify -S -s signifykey.sec -m data.json -x ./signify-sigs/data.json.sig` - pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/" - testVerification(t, pub, "./testdata/vcheck/signify-sigs/") - }) -} - -func testVerification(t *testing.T, pubkey, sigdir string) { - // Data to verify - data, err := os.ReadFile("./testdata/vcheck/data.json") - if err != nil { - t.Fatal(err) - } - // Signatures, with and without comments, both trusted and untrusted - files, err := os.ReadDir(sigdir) - if err != nil { - t.Fatal(err) - } - if len(files) == 0 { - t.Fatal("Missing tests") - } - for _, f := range files { - sig, err := os.ReadFile(filepath.Join(sigdir, f.Name())) - if err != nil { - t.Fatal(err) - } - err = verifySignature([]string{pubkey}, data, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func versionUint(v string) int { - mustInt := func(s string) int { - a, err := strconv.Atoi(s) - if err != nil { - panic(v) - } - return a - } - components := strings.Split(strings.TrimPrefix(v, "v"), ".") - a := mustInt(components[0]) - b := mustInt(components[1]) - c := mustInt(components[2]) - return a*100*100 + b*100 + c -} - -// TestMatching can be used to check that the regexps are correct -func TestMatching(t *testing.T) { - t.Parallel() - data, _ := os.ReadFile("./testdata/vcheck/vulnerabilities.json") - var vulns []vulnJson - if err := json.Unmarshal(data, &vulns); err != nil { - t.Fatal(err) - } - check := func(version string) { - vFull := fmt.Sprintf("Geth/%v-unstable-15339cf1-20201204/linux-amd64/go1.15.4", version) - for _, vuln := range vulns { - r, err := regexp.Compile(vuln.Check) - vulnIntro := versionUint(vuln.Introduced) - vulnFixed := versionUint(vuln.Fixed) - current := versionUint(version) - if err != nil { - t.Fatal(err) - } - if vuln.Name == "Denial of service due to Go CVE-2020-28362" { - // this one is not tied to geth-versions - continue - } - if vulnIntro <= current && vulnFixed > current { - // Should be vulnerable - if !r.MatchString(vFull) { - t.Errorf("Should be vulnerable, version %v, intro: %v, fixed: %v %v %v", - version, vuln.Introduced, vuln.Fixed, vuln.Name, vuln.Check) - } - } else { - if r.MatchString(vFull) { - t.Errorf("Should not be flagged vulnerable, version %v, intro: %v, fixed: %v %v %d %d %d", - version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed) - } - } - } - } - for major := 1; major < 2; major++ { - for minor := 0; minor < 30; minor++ { - for patch := 0; patch < 30; patch++ { - vShort := fmt.Sprintf("v%d.%d.%d", major, minor, patch) - check(vShort) - } - } - } -} - -func TestGethPubKeysParseable(t *testing.T) { - t.Parallel() - for _, pubkey := range gethPubKeys { - _, err := minisign.NewPublicKey(pubkey) - if err != nil { - t.Errorf("Should be parseable") - } - } -} - -func TestKeyID(t *testing.T) { - t.Parallel() - type args struct { - id [8]byte - } - tests := []struct { - name string - args args - want string - }{ - {"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"}, - {"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"}, - {"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if got := keyID(tt.args.id); got != tt.want { - t.Errorf("keyID() = %v, want %v", got, tt.want) - } - }) - } -} - -func extractKeyId(pubkey string) [8]byte { - p, _ := minisign.NewPublicKey(pubkey) - return p.KeyId -} From 25439aac048e777f3f6e117948160069528d423d Mon Sep 17 00:00:00 2001 From: Bashmunta Date: Wed, 31 Dec 2025 03:40:43 +0200 Subject: [PATCH 209/277] core/state/snapshot: fix storageList memory accounting (#33505) --- core/state/snapshot/difflayer.go | 2 +- core/state/snapshot/difflayer_test.go | 33 +++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 28957051d4..1286ded7e1 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -465,6 +465,6 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash { storageList := slices.SortedFunc(maps.Keys(dl.storageData[accountHash]), common.Hash.Cmp) dl.storageList[accountHash] = storageList - dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) + dl.memory += uint64(len(storageList)*common.HashLength + common.HashLength) return storageList } diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 2c868b3010..90a265645d 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -198,6 +198,39 @@ func TestInsertAndMerge(t *testing.T) { } } +// TestStorageListMemoryAccounting ensures that StorageList increases dl.memory +// proportionally to the number of storage slots in the requested account and +// does not change memory usage on repeated calls for the same account. +func TestStorageListMemoryAccounting(t *testing.T) { + parent := newDiffLayer(emptyLayer(), common.Hash{}, nil, nil) + account := common.HexToHash("0x01") + + slots := make(map[common.Hash][]byte) + for i := 0; i < 3; i++ { + slots[randomHash()] = []byte{0x01} + } + storage := map[common.Hash]map[common.Hash][]byte{ + account: slots, + } + dl := newDiffLayer(parent, common.Hash{}, nil, storage) + + before := dl.memory + list := dl.StorageList(account) + if have, want := len(list), len(slots); have != want { + t.Fatalf("StorageList length mismatch: have %d, want %d", have, want) + } + expectedDelta := uint64(len(list)*common.HashLength + common.HashLength) + if have, want := dl.memory-before, expectedDelta; have != want { + t.Fatalf("StorageList memory delta mismatch: have %d, want %d", have, want) + } + + before = dl.memory + _ = dl.StorageList(account) + if dl.memory != before { + t.Fatalf("StorageList changed memory on cached call: have %d, want %d", dl.memory, before) + } +} + func emptyLayer() *diskLayer { return &diskLayer{ diskdb: memorydb.New(), From b2843a11d680ddef536a19025e6b9b3d556cbd1f Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Tue, 30 Dec 2025 17:48:50 -0800 Subject: [PATCH 210/277] eth/catalyst: implement getBlobsV3 (#33404) This is used by cell-level dissemination (aka partial messages) to give the CL all blobs the EL knows about and let CL communicate efficiently about any other missing blobs. In other words, partial responses from the EL is useful now. See the related (closed) PR: https://github.com/ethereum/execution-apis/pull/674 and the new PR: https://github.com/ethereum/execution-apis/pull/719 --- eth/catalyst/api.go | 56 +++++++++++++++++++++++++++++----------- eth/catalyst/api_test.go | 29 +++++++++++++-------- eth/catalyst/metrics.go | 12 ++++++--- 3 files changed, 67 insertions(+), 30 deletions(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index cc9086b091..0ab785bab7 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -553,6 +553,23 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo if api.config().LatestFork(head.Time) < forks.Osaka { return nil, nil } + return api.getBlobs(hashes, true) +} + +// GetBlobsV3 returns a set of blobs from the transaction pool. Same as +// GetBlobsV2, except will return partial responses in case there is a missing +// blob. +func (api *ConsensusAPI) GetBlobsV3(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) { + head := api.eth.BlockChain().CurrentHeader() + if api.config().LatestFork(head.Time) < forks.Osaka { + return nil, nil + } + return api.getBlobs(hashes, false) +} + +// getBlobs returns all available blobs. In v2, partial responses are not allowed, +// while v3 supports partial responses. +func (api *ConsensusAPI) getBlobs(hashes []common.Hash, v2 bool) ([]*engine.BlobAndProofV2, error) { if len(hashes) > 128 { return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) } @@ -560,28 +577,30 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo getBlobsRequestedCounter.Inc(int64(len(hashes))) getBlobsAvailableCounter.Inc(int64(available)) - // Optimization: check first if all blobs are available, if not, return empty response - if available != len(hashes) { - getBlobsV2RequestMiss.Inc(1) + // Short circuit if partial response is not allowed + if v2 && available != len(hashes) { + getBlobsRequestMiss.Inc(1) return nil, nil } - + // Retrieve blobs from the pool. This operation is expensive and may involve + // heavy disk I/O. blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1) if err != nil { return nil, engine.InvalidParams.With(err) } - - // To comply with API spec, check again that we really got all data needed - for _, blob := range blobs { - if blob == nil { - getBlobsV2RequestMiss.Inc(1) - return nil, nil - } - } - getBlobsV2RequestHit.Inc(1) - + // Validate the blobs from the pool and assemble the response res := make([]*engine.BlobAndProofV2, len(hashes)) - for i := 0; i < len(blobs); i++ { + for i := range blobs { + // The blob has been evicted since the last AvailableBlobs call. + // Return null if partial response is not allowed. + if blobs[i] == nil { + if !v2 { + continue + } else { + getBlobsRequestMiss.Inc(1) + return nil, nil + } + } var cellProofs []hexutil.Bytes for _, proof := range proofs[i] { cellProofs = append(cellProofs, proof[:]) @@ -591,6 +610,13 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo CellProofs: cellProofs, } } + if len(res) == len(hashes) { + getBlobsRequestCompleteHit.Inc(1) + } else if len(res) > 0 { + getBlobsRequestPartialHit.Inc(1) + } else { + getBlobsRequestMiss.Inc(1) + } return res, nil } diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index a023962b81..4d7246d4ed 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -2016,7 +2016,7 @@ func TestGetBlobsV1AfterOsakaFork(t *testing.T) { } } -func TestGetBlobsV2(t *testing.T) { +func TestGetBlobsV2And3(t *testing.T) { n, api := newGetBlobEnv(t, 1) defer n.Close() @@ -2045,7 +2045,8 @@ func TestGetBlobsV2(t *testing.T) { }, } for i, suite := range suites { - runGetBlobsV2(t, api, suite.start, suite.limit, suite.fillRandom, fmt.Sprintf("suite=%d", i)) + runGetBlobs(t, api.GetBlobsV2, suite.start, suite.limit, suite.fillRandom, false, fmt.Sprintf("GetBlobsV2 suite=%d", i)) + runGetBlobs(t, api.GetBlobsV3, suite.start, suite.limit, suite.fillRandom, true, fmt.Sprintf("GetBlobsV3 suite=%d %v", i, suite)) } } @@ -2060,22 +2061,20 @@ func BenchmarkGetBlobsV2(b *testing.B) { name := fmt.Sprintf("blobs=%d", blobs) b.Run(name, func(b *testing.B) { for b.Loop() { - runGetBlobsV2(b, api, 0, blobs, false, name) + runGetBlobs(b, api.GetBlobsV2, 0, blobs, false, false, name) } }) } } -func runGetBlobsV2(t testing.TB, api *ConsensusAPI, start, limit int, fillRandom bool, name string) { +type getBlobsFn func(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) + +func runGetBlobs(t testing.TB, getBlobs getBlobsFn, start, limit int, fillRandom bool, expectPartialResponse bool, name string) { // Fill the request for retrieving blobs var ( vhashes []common.Hash expect []*engine.BlobAndProofV2 ) - // fill missing blob - if fillRandom { - vhashes = append(vhashes, testrand.Hash()) - } for j := start; j < limit; j++ { vhashes = append(vhashes, testBlobVHashes[j]) var cellProofs []hexutil.Bytes @@ -2087,13 +2086,21 @@ func runGetBlobsV2(t testing.TB, api *ConsensusAPI, start, limit int, fillRandom CellProofs: cellProofs, }) } - result, err := api.GetBlobsV2(vhashes) + // fill missing blob + if fillRandom { + vhashes = append(vhashes, testrand.Hash()) + } + result, err := getBlobs(vhashes) if err != nil { t.Errorf("Unexpected error for case %s, %v", name, err) } - // null is responded if any blob is missing if fillRandom { - expect = nil + if expectPartialResponse { + expect = append(expect, nil) + } else { + // Nil is expected if getBlobs can not return a partial response + expect = nil + } } if !reflect.DeepEqual(result, expect) { t.Fatalf("Unexpected result for case %s", name) diff --git a/eth/catalyst/metrics.go b/eth/catalyst/metrics.go index d0a733a22b..01a24191b0 100644 --- a/eth/catalyst/metrics.go +++ b/eth/catalyst/metrics.go @@ -25,9 +25,13 @@ var ( // Number of blobs requested via getBlobsV2 that are present in the blobpool getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil) - // Number of times getBlobsV2 responded with “hit” - getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) + // Number of times getBlobsV2/V3 responded with all blobs + getBlobsRequestCompleteHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) - // Number of times getBlobsV2 responded with “miss” - getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) + // Number of times getBlobsV2/V3 responded with no blobs. V2 will return no + // blobs if it doesn't have all the blobs (all or nothing). + getBlobsRequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) + + // Number of times getBlobsV3 responded with some, but not all, blobs + getBlobsRequestPartialHit = metrics.NewRegisteredCounter("engine/getblobs/partial", nil) ) From 32fea008d876dbd443b8265cb4fe3f3d07d2d620 Mon Sep 17 00:00:00 2001 From: shhhh Date: Wed, 31 Dec 2025 11:32:44 +0530 Subject: [PATCH 211/277] core/blockchain.go: cleanup finalized block on rewind in setHeadBeyondRoot (#33486) Fix #33390 `setHeadBeyondRoot` was failing to invalidate finalized blocks because it compared against the original head instead of the rewound root. This fix updates the comparison to use the post-rewind block number, preventing the node from reporting a finalized block that no longer exists. Also added relevant test cases for it. --- core/blockchain.go | 5 +++-- core/blockchain_test.go | 42 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 858eceb630..c7647ee7b4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1105,11 +1105,12 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha bc.txLookupCache.Purge() // Clear safe block, finalized block if needed - if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() { + headBlock := bc.CurrentBlock() + if safe := bc.CurrentSafeBlock(); safe != nil && headBlock.Number.Uint64() < safe.Number.Uint64() { log.Warn("SetHead invalidated safe block") bc.SetSafe(nil) } - if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() { + if finalized := bc.CurrentFinalBlock(); finalized != nil && headBlock.Number.Uint64() < finalized.Number.Uint64() { log.Error("SetHead invalidated finalized block") bc.SetFinalized(nil) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 3e3053d9bf..73ffce93fb 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -4515,3 +4515,45 @@ func TestGetCanonicalReceipt(t *testing.T) { } } } + +// TestSetHeadBeyondRootFinalizedBug tests the issue where the finalized block +// is not cleared when rewinding past it using setHeadBeyondRoot. +func TestSetHeadBeyondRootFinalizedBug(t *testing.T) { + // Create a clean blockchain with 100 blocks using PathScheme (PBSS) + _, _, blockchain, err := newCanonical(ethash.NewFaker(), 100, true, rawdb.PathScheme) + if err != nil { + t.Fatalf("failed to create pristine chain: %v", err) + } + defer blockchain.Stop() + + // Set the "Finalized" marker to the current Head (Block 100) + headBlock := blockchain.CurrentBlock() + if headBlock.Number.Uint64() != 100 { + t.Fatalf("Setup failed: expected head 100, got %d", headBlock.Number.Uint64()) + } + blockchain.SetFinalized(headBlock) + + // Verify setup + if blockchain.CurrentFinalBlock().Number.Uint64() != 100 { + t.Fatalf("Setup failed: Finalized block should be 100") + } + targetBlock := blockchain.GetBlockByNumber(50) + + // Call setHeadBeyondRoot with: + // head = 100 + // repair = true + if _, err := blockchain.setHeadBeyondRoot(100, 0, targetBlock.Root(), true); err != nil { + t.Fatalf("Failed to rewind: %v", err) + } + + currentFinal := blockchain.CurrentFinalBlock() + currentHead := blockchain.CurrentBlock().Number.Uint64() + + // The previous finalized block (100) is now invalid because we rewound to 50. + // The function should have cleared the finalized marker (set to nil). + if currentFinal != nil && currentFinal.Number.Uint64() > currentHead { + t.Errorf("Chain Head: %d , Finalized Block: %d , Finalized block was >= head block.", + currentHead, + currentFinal.Number.Uint64()) + } +} From b635e0632ce675be3d7cc0b498e08df8dc6346d6 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 1 Jan 2026 02:52:25 +0800 Subject: [PATCH 212/277] eth/fetcher: improve the condition to stall peer in tx fetcher (#32725) Signed-off-by: Csaba Kiraly Co-authored-by: Csaba Kiraly --- eth/fetcher/tx_fetcher.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index f024f3aeba..78e791f32b 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -352,9 +352,9 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) otherRejectMeter.Mark(otherreject) // If 'other reject' is >25% of the deliveries in any batch, sleep a bit. - if otherreject > addTxsBatchSize/4 { + if otherreject > int64((len(batch)+3)/4) { + log.Debug("Peer delivering stale or invalid transactions", "peer", peer, "rejected", otherreject) time.Sleep(200 * time.Millisecond) - log.Debug("Peer delivering stale transactions", "peer", peer, "rejected", otherreject) } } select { From de5ea2ffd891c603b029d0080ab4626ce81dd91c Mon Sep 17 00:00:00 2001 From: Mask Weller Date: Sun, 4 Jan 2026 13:47:28 +0700 Subject: [PATCH 213/277] core/rawdb: add trienode freezer support to InspectFreezerTable (#33515) Adds missing trienode freezer case to InspectFreezerTable, making it consistent with InspectFreezer which already supports it. Co-authored-by: m6xwzzz --- core/rawdb/ancient_utils.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 7af3d2e197..0ed974b745 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -149,6 +149,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s path, tables = resolveChainFreezerDir(ancient), chainFreezerTableConfigs case MerkleStateFreezerName, VerkleStateFreezerName: path, tables = filepath.Join(ancient, freezerName), stateFreezerTableConfigs + case MerkleTrienodeFreezerName, VerkleTrienodeFreezerName: + path, tables = filepath.Join(ancient, freezerName), trienodeFreezerTableConfigs default: return fmt.Errorf("unknown freezer, supported ones: %v", freezers) } From a8a4804895229d005c75a77ec902b730588b4014 Mon Sep 17 00:00:00 2001 From: Andrew Davis <1709934+Savid@users.noreply.github.com> Date: Tue, 6 Jan 2026 03:49:30 +1100 Subject: [PATCH 214/277] ethstats: report newPayload processing time to stats server (#33395) Add NewPayloadEvent to track engine API newPayload block processing times and report them to ethstats. This enables monitoring of block processing performance. https://notes.ethereum.org/@savid/block-observability related: https://github.com/ethereum/go-ethereum/pull/33231 --------- Co-authored-by: MariusVanDerWijden --- core/blockchain.go | 1 + core/blockchain_reader.go | 10 +++++++ core/events.go | 10 +++++++ eth/api_backend.go | 5 ++++ eth/catalyst/api.go | 10 +++++++ ethstats/ethstats.go | 60 ++++++++++++++++++++++++++++++++++----- 6 files changed, 89 insertions(+), 7 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index c7647ee7b4..39969d96a6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -311,6 +311,7 @@ type BlockChain struct { chainHeadFeed event.Feed logsFeed event.Feed blockProcFeed event.Feed + newPayloadFeed event.Feed // Feed for engine API newPayload events blockProcCounter int32 scope event.SubscriptionScope genesisBlock *types.Block diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 4894523b0e..ee15c152c4 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -522,3 +522,13 @@ func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) } + +// SubscribeNewPayloadEvent registers a subscription for NewPayloadEvent. +func (bc *BlockChain) SubscribeNewPayloadEvent(ch chan<- NewPayloadEvent) event.Subscription { + return bc.scope.Track(bc.newPayloadFeed.Subscribe(ch)) +} + +// SendNewPayloadEvent sends a NewPayloadEvent to subscribers. +func (bc *BlockChain) SendNewPayloadEvent(ev NewPayloadEvent) { + bc.newPayloadFeed.Send(ev) +} diff --git a/core/events.go b/core/events.go index ef0de32426..ed853f1790 100644 --- a/core/events.go +++ b/core/events.go @@ -17,6 +17,9 @@ package core import ( + "time" + + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -35,3 +38,10 @@ type ChainEvent struct { type ChainHeadEvent struct { Header *types.Header } + +// NewPayloadEvent is posted when engine_newPayloadVX processes a block. +type NewPayloadEvent struct { + Hash common.Hash + Number uint64 + ProcessingTime time.Duration +} diff --git a/eth/api_backend.go b/eth/api_backend.go index 766a99fc1e..3f826b7861 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -315,6 +315,11 @@ func (b *EthAPIBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) e return b.eth.BlockChain().SubscribeChainHeadEvent(ch) } +// SubscribeNewPayloadEvent registers a subscription for NewPayloadEvent. +func (b *EthAPIBackend) SubscribeNewPayloadEvent(ch chan<- core.NewPayloadEvent) event.Subscription { + return b.eth.BlockChain().SubscribeNewPayloadEvent(ch) +} + func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return b.eth.BlockChain().SubscribeLogsEvent(ch) } diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 0ab785bab7..e6ecf4ff6a 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" @@ -787,7 +788,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe return engine.PayloadStatusV1{Status: engine.ACCEPTED}, nil } log.Trace("Inserting block without sethead", "hash", block.Hash(), "number", block.Number()) + start := time.Now() proofs, err := api.eth.BlockChain().InsertBlockWithoutSetHead(block, witness) + processingTime := time.Since(start) if err != nil { log.Warn("NewPayload: inserting block failed", "error", err) @@ -800,6 +803,13 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe } hash := block.Hash() + // Emit NewPayloadEvent for ethstats reporting + api.eth.BlockChain().SendNewPayloadEvent(core.NewPayloadEvent{ + Hash: hash, + Number: block.NumberU64(), + ProcessingTime: processingTime, + }) + // If witness collection was requested, inject that into the result too var ow *hexutil.Bytes if proofs != nil { diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index b6191baa12..c17e225165 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -63,6 +63,7 @@ const ( type backend interface { SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription + SubscribeNewPayloadEvent(ch chan<- core.NewPayloadEvent) event.Subscription CurrentHeader() *types.Header HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) Stats() (pending int, queued int) @@ -92,8 +93,9 @@ type Service struct { pongCh chan struct{} // Pong notifications are fed into this channel histCh chan []uint64 // History request block numbers are fed into this channel - headSub event.Subscription - txSub event.Subscription + headSub event.Subscription + txSub event.Subscription + newPayloadSub event.Subscription } // connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the @@ -198,7 +200,9 @@ func (s *Service) Start() error { s.headSub = s.backend.SubscribeChainHeadEvent(chainHeadCh) txEventCh := make(chan core.NewTxsEvent, txChanSize) s.txSub = s.backend.SubscribeNewTxsEvent(txEventCh) - go s.loop(chainHeadCh, txEventCh) + newPayloadCh := make(chan core.NewPayloadEvent, chainHeadChanSize) + s.newPayloadSub = s.backend.SubscribeNewPayloadEvent(newPayloadCh) + go s.loop(chainHeadCh, txEventCh, newPayloadCh) log.Info("Stats daemon started") return nil @@ -208,18 +212,20 @@ func (s *Service) Start() error { func (s *Service) Stop() error { s.headSub.Unsubscribe() s.txSub.Unsubscribe() + s.newPayloadSub.Unsubscribe() log.Info("Stats daemon stopped") return nil } // loop keeps trying to connect to the netstats server, reporting chain events // until termination. -func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent) { +func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent, newPayloadCh chan core.NewPayloadEvent) { // Start a goroutine that exhausts the subscriptions to avoid events piling up var ( - quitCh = make(chan struct{}) - headCh = make(chan *types.Header, 1) - txCh = make(chan struct{}, 1) + quitCh = make(chan struct{}) + headCh = make(chan *types.Header, 1) + txCh = make(chan struct{}, 1) + newPayloadEvCh = make(chan core.NewPayloadEvent, 1) ) go func() { var lastTx mclock.AbsTime @@ -246,11 +252,20 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core default: } + // Notify of new payload events, but drop if too frequent + case ev := <-newPayloadCh: + select { + case newPayloadEvCh <- ev: + default: + } + // node stopped case <-s.txSub.Err(): break HandleLoop case <-s.headSub.Err(): break HandleLoop + case <-s.newPayloadSub.Err(): + break HandleLoop } } close(quitCh) @@ -336,6 +351,10 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core if err = s.reportPending(conn); err != nil { log.Warn("Post-block transaction stats report failed", "err", err) } + case ev := <-newPayloadEvCh: + if err = s.reportNewPayload(conn, ev); err != nil { + log.Warn("New payload stats report failed", "err", err) + } case <-txCh: if err = s.reportPending(conn); err != nil { log.Warn("Transaction stats report failed", "err", err) @@ -600,6 +619,33 @@ func (s uncleStats) MarshalJSON() ([]byte, error) { return []byte("[]"), nil } +// newPayloadStats is the information to report about new payload events. +type newPayloadStats struct { + Number uint64 `json:"number"` + Hash common.Hash `json:"hash"` + ProcessingTime uint64 `json:"processingTime"` // nanoseconds +} + +// reportNewPayload reports a new payload event to the stats server. +func (s *Service) reportNewPayload(conn *connWrapper, ev core.NewPayloadEvent) error { + details := &newPayloadStats{ + Number: ev.Number, + Hash: ev.Hash, + ProcessingTime: uint64(ev.ProcessingTime.Nanoseconds()), + } + + log.Trace("Sending new payload to ethstats", "number", details.Number, "hash", details.Hash) + + stats := map[string]interface{}{ + "id": s.node, + "block": details, + } + report := map[string][]interface{}{ + "emit": {"block_new_payload", stats}, + } + return conn.WriteJSON(report) +} + // reportBlock retrieves the current chain head and reports it to the stats server. func (s *Service) reportBlock(conn *connWrapper, header *types.Header) error { // Gather the block details from the header or block chain From eaaa5b716dcf97e94eb17a1469a7385a7101ffab Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 6 Jan 2026 15:09:15 +0800 Subject: [PATCH 215/277] core: re-organize the stats category (#33525) Check out https://hackmd.io/dg7rizTyTXuCf2LSa2LsyQ for more details --- core/blockchain_stats.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go index d52426d574..b6e9c614c5 100644 --- a/core/blockchain_stats.go +++ b/core/blockchain_stats.go @@ -115,26 +115,31 @@ func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Durat Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v EVM execution: %v + Validation: %v + Account hash: %v + Storage hash: %v + State read: %v Account read: %v(%d) Storage read: %v(%d) Code read: %v(%d) -State hash: %v - Account hash: %v - Storage hash: %v +State write: %v Trie commit: %v - -DB write: %v State write: %v Block write: %v %s ############################## `, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime), + // EVM execution common.PrettyDuration(s.Execution), - common.PrettyDuration(s.Validation+s.CrossValidation), + + // Block validation + common.PrettyDuration(s.Validation+s.CrossValidation+s.AccountHashes+s.AccountUpdates+s.StorageUpdates), + common.PrettyDuration(s.AccountHashes+s.AccountUpdates), + common.PrettyDuration(s.StorageUpdates), // State read common.PrettyDuration(s.AccountReads+s.StorageReads+s.CodeReads), @@ -142,19 +147,15 @@ DB write: %v common.PrettyDuration(s.StorageReads), s.StorageLoaded, common.PrettyDuration(s.CodeReads), s.CodeLoaded, - // State hash - common.PrettyDuration(s.AccountHashes+s.AccountUpdates+s.StorageUpdates+max(s.AccountCommits, s.StorageCommits)), - common.PrettyDuration(s.AccountHashes+s.AccountUpdates), - common.PrettyDuration(s.StorageUpdates), + // State write + common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)+s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite), common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)), - - // Database commit - common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite), common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), common.PrettyDuration(s.BlockWrite), // cache statistics - s.StateReadCacheStats) + s.StateReadCacheStats, + ) for _, line := range strings.Split(msg, "\n") { if line == "" { continue From 710008450f5cbb292ea2ee07b9f82f222e5a24d2 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Wed, 7 Jan 2026 02:52:50 +0100 Subject: [PATCH 216/277] eth: txs fetch/send log at trace level only (#33541) This logging was too intensive at debug level, it is better to have it at trace level only. Signed-off-by: Csaba Kiraly --- eth/handler.go | 2 +- eth/protocols/eth/peer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 0d07e88c7a..46634cae88 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -509,7 +509,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { annCount += len(hashes) peer.AsyncSendPooledTransactionHashes(hashes) } - log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, + log.Trace("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, "bcastpeers", len(txset), "bcastcount", directCount, "annpeers", len(annos), "anncount", annCount) } diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 40c54a3570..df20c672c0 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -341,7 +341,7 @@ func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Requ // RequestTxs fetches a batch of transactions from a remote node. func (p *Peer) RequestTxs(hashes []common.Hash) error { - p.Log().Debug("Fetching batch of transactions", "count", len(hashes)) + p.Log().Trace("Fetching batch of transactions", "count", len(hashes)) id := rand.Uint64() requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id) From 957a3602d98ab84649c67aec38945e136a59b7f3 Mon Sep 17 00:00:00 2001 From: cui Date: Wed, 7 Jan 2026 10:02:27 +0800 Subject: [PATCH 217/277] core/vm: avoid escape to heap (#33537) --- core/vm/contracts.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 00ddbebd6b..867746acc8 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -299,11 +299,11 @@ func (c *ecrecover) Run(input []byte) ([]byte, error) { } // We must make sure not to modify the 'input', so placing the 'v' along with // the signature needs to be done on a new allocation - sig := make([]byte, 65) - copy(sig, input[64:128]) + var sig [65]byte + copy(sig[:], input[64:128]) sig[64] = v // v needs to be at the end for libsecp256k1 - pubKey, err := crypto.Ecrecover(input[:32], sig) + pubKey, err := crypto.Ecrecover(input[:32], sig[:]) // make sure the public key is a valid one if err != nil { return nil, nil From 01b39c96bfc38e4bd1e4d04bb0ecda831e847b7e Mon Sep 17 00:00:00 2001 From: Ng Wei Han <47109095+weiihann@users.noreply.github.com> Date: Thu, 8 Jan 2026 11:07:19 +0800 Subject: [PATCH 218/277] core/state, core/tracing: new state update hook (#33490) ### Description Add a new `OnStateUpdate` hook which gets invoked after state is committed. ### Rationale For our particular use case, we need to obtain the state size metrics at every single block when fuly syncing from genesis. With the current state sizer, whenever the node is stopped, the background process must be freshly initialized. During this re-initialization, it can skip some blocks while the node continues executing blocks, causing gaps in the recorded metrics. Using this state update hook allows us to customize our own data persistence logic, and we would never skip blocks upon node restart. --------- Co-authored-by: Gary Rong --- cmd/geth/chaincmd.go | 2 +- core/blockchain.go | 39 +++++-- core/chain_makers.go | 2 +- core/genesis.go | 38 +++++-- core/genesis_test.go | 10 +- core/headerchain_test.go | 2 +- core/state/state_object.go | 6 + core/state/state_sizer.go | 2 +- core/state/statedb.go | 20 ++-- core/state/stateupdate.go | 181 ++++++++++++++++++++++++++++-- core/tracing/hooks.go | 57 ++++++++++ eth/filters/filter_system_test.go | 2 +- eth/filters/filter_test.go | 4 +- tests/block_test_util.go | 2 +- 14 files changed, 309 insertions(+), 58 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index e535d7d892..55316c14ab 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -296,7 +296,7 @@ func initGenesis(ctx *cli.Context) error { triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) defer triedb.Close() - _, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) + _, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides, nil) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } diff --git a/core/blockchain.go b/core/blockchain.go index 39969d96a6..ba96dc1760 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -367,7 +367,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, // yet. The corresponding chain config will be returned, either from the // provided genesis or from the locally stored configuration if the genesis // has already been initialized. - chainConfig, genesisHash, compatErr, err := SetupGenesisBlockWithOverride(db, triedb, genesis, cfg.Overrides) + chainConfig, genesisHash, compatErr, err := SetupGenesisBlockWithOverride(db, triedb, genesis, cfg.Overrides, cfg.VmConfig.Tracer) if err != nil { return nil, err } @@ -1651,20 +1651,35 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. log.Debug("Committed block data", "size", common.StorageSize(batch.ValueSize()), "elapsed", common.PrettyDuration(time.Since(start))) var ( - err error - root common.Hash - isEIP158 = bc.chainConfig.IsEIP158(block.Number()) - isCancun = bc.chainConfig.IsCancun(block.Number(), block.Time()) + err error + root common.Hash + isEIP158 = bc.chainConfig.IsEIP158(block.Number()) + isCancun = bc.chainConfig.IsCancun(block.Number(), block.Time()) + hasStateHook = bc.logger != nil && bc.logger.OnStateUpdate != nil + hasStateSizer = bc.stateSizer != nil ) - if bc.stateSizer == nil { - root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun) + if hasStateHook || hasStateSizer { + r, update, err := statedb.CommitWithUpdate(block.NumberU64(), isEIP158, isCancun) + if err != nil { + return err + } + if hasStateHook { + trUpdate, err := update.ToTracingUpdate() + if err != nil { + return err + } + bc.logger.OnStateUpdate(trUpdate) + } + if hasStateSizer { + bc.stateSizer.Notify(update) + } + root = r } else { - root, err = statedb.CommitAndTrack(block.NumberU64(), isEIP158, isCancun, bc.stateSizer) + root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun) + if err != nil { + return err + } } - if err != nil { - return err - } - // If node is running in path mode, skip explicit gc operation // which is unnecessary in this mode. if bc.triedb.Scheme() == rawdb.PathScheme { diff --git a/core/chain_makers.go b/core/chain_makers.go index a1e07becba..7ce86b14e9 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -481,7 +481,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, } triedb := triedb.NewDatabase(db, triedbConfig) defer triedb.Close() - _, err := genesis.Commit(db, triedb) + _, err := genesis.Commit(db, triedb, nil) if err != nil { panic(err) } diff --git a/core/genesis.go b/core/genesis.go index 7d640c8cae..983ad4c3cb 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -164,7 +164,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) { // flushAlloc is very similar with hash, but the main difference is all the // generated states will be persisted into the given database. -func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, error) { +func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database, tracer *tracing.Hooks) (common.Hash, error) { emptyRoot := types.EmptyRootHash if triedb.IsVerkle() { emptyRoot = types.EmptyVerkleHash @@ -185,10 +185,26 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e statedb.SetState(addr, key, value) } } - root, err := statedb.Commit(0, false, false) - if err != nil { - return common.Hash{}, err + + var root common.Hash + if tracer != nil && tracer.OnStateUpdate != nil { + r, update, err := statedb.CommitWithUpdate(0, false, false) + if err != nil { + return common.Hash{}, err + } + trUpdate, err := update.ToTracingUpdate() + if err != nil { + return common.Hash{}, err + } + tracer.OnStateUpdate(trUpdate) + root = r + } else { + root, err = statedb.Commit(0, false, false) + if err != nil { + return common.Hash{}, err + } } + // Commit newly generated states into disk if it's not empty. if root != emptyRoot { if err := triedb.Commit(root, true); err != nil { @@ -296,10 +312,10 @@ func (o *ChainOverrides) apply(cfg *params.ChainConfig) error { // specify a fork block below the local head block). In case of a conflict, the // error is a *params.ConfigCompatError and the new, unwritten config is returned. func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { - return SetupGenesisBlockWithOverride(db, triedb, genesis, nil) + return SetupGenesisBlockWithOverride(db, triedb, genesis, nil, nil) } -func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { +func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides, tracer *tracing.Hooks) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { // Copy the genesis, so we can operate on a copy. genesis = genesis.copy() // Sanitize the supplied genesis, ensuring it has the associated chain @@ -320,7 +336,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g return nil, common.Hash{}, nil, err } - block, err := genesis.Commit(db, triedb) + block, err := genesis.Commit(db, triedb, tracer) if err != nil { return nil, common.Hash{}, nil, err } @@ -348,7 +364,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g if hash := genesis.ToBlock().Hash(); hash != ghash { return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash} } - block, err := genesis.Commit(db, triedb) + block, err := genesis.Commit(db, triedb, tracer) if err != nil { return nil, common.Hash{}, nil, err } @@ -537,7 +553,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { // Commit writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Block, error) { +func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database, tracer *tracing.Hooks) (*types.Block, error) { if g.Number != 0 { return nil, errors.New("can't commit genesis block with number > 0") } @@ -552,7 +568,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo return nil, errors.New("can't start clique chain without signers") } // flush the data to disk and compute the state root - root, err := flushAlloc(&g.Alloc, triedb) + root, err := flushAlloc(&g.Alloc, triedb, tracer) if err != nil { return nil, err } @@ -578,7 +594,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.Block { - block, err := g.Commit(db, triedb) + block, err := g.Commit(db, triedb, nil) if err != nil { panic(err) } diff --git a/core/genesis_test.go b/core/genesis_test.go index 1ed475695d..821c71feb9 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -88,7 +88,7 @@ func testSetupGenesis(t *testing.T, scheme string) { name: "custom block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) + customg.Commit(db, tdb, nil) return SetupGenesisBlock(db, tdb, nil) }, wantHash: customghash, @@ -98,7 +98,7 @@ func testSetupGenesis(t *testing.T, scheme string) { name: "custom block in DB, genesis == sepolia", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) + customg.Commit(db, tdb, nil) return SetupGenesisBlock(db, tdb, DefaultSepoliaGenesisBlock()) }, wantErr: &GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, @@ -107,7 +107,7 @@ func testSetupGenesis(t *testing.T, scheme string) { name: "custom block in DB, genesis == hoodi", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) + customg.Commit(db, tdb, nil) return SetupGenesisBlock(db, tdb, DefaultHoodiGenesisBlock()) }, wantErr: &GenesisMismatchError{Stored: customghash, New: params.HoodiGenesisHash}, @@ -116,7 +116,7 @@ func testSetupGenesis(t *testing.T, scheme string) { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - oldcustomg.Commit(db, tdb) + oldcustomg.Commit(db, tdb, nil) return SetupGenesisBlock(db, tdb, &customg) }, wantHash: customghash, @@ -128,7 +128,7 @@ func testSetupGenesis(t *testing.T, scheme string) { // Commit the 'old' genesis block with Homestead transition at #2. // Advance to block #4, past the homestead transition block of customg. tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - oldcustomg.Commit(db, tdb) + oldcustomg.Commit(db, tdb, nil) bc, _ := NewBlockChain(db, &oldcustomg, ethash.NewFullFaker(), DefaultConfig().WithStateScheme(scheme)) defer bc.Stop() diff --git a/core/headerchain_test.go b/core/headerchain_test.go index b51fb8f226..dba04e2cf2 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -69,7 +69,7 @@ func TestHeaderInsertion(t *testing.T) { db = rawdb.NewMemoryDatabase() gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges} ) - gspec.Commit(db, triedb.NewDatabase(db, nil)) + gspec.Commit(db, triedb.NewDatabase(db, nil), nil) hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false }) if err != nil { t.Fatal(err) diff --git a/core/state/state_object.go b/core/state/state_object.go index 411d5fb5b5..3b11553f04 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -440,6 +440,12 @@ func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) { blob: s.code, } s.dirtyCode = false // reset the dirty flag + + if s.origin == nil { + op.code.originHash = types.EmptyCodeHash + } else { + op.code.originHash = common.BytesToHash(s.origin.CodeHash) + } } // Commit storage changes and the associated storage trie s.commitStorage(op) diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go index 3faa750906..fc6781ad93 100644 --- a/core/state/state_sizer.go +++ b/core/state/state_sizer.go @@ -245,7 +245,7 @@ func calSizeStats(update *stateUpdate) (SizeStats, error) { codeExists := make(map[common.Hash]struct{}) for _, code := range update.codes { - if _, ok := codeExists[code.hash]; ok || code.exists { + if _, ok := codeExists[code.hash]; ok || code.duplicate { continue } stats.ContractCodes += 1 diff --git a/core/state/statedb.go b/core/state/statedb.go index c239d66233..fbfb02e8e4 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1318,16 +1318,16 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum // commitAndFlush is a wrapper of commit which also commits the state mutations // to the configured data stores. -func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, dedupCode bool) (*stateUpdate, error) { +func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, deriveCodeFields bool) (*stateUpdate, error) { ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block) if err != nil { return nil, err } - - if dedupCode { - ret.markCodeExistence(s.reader) + if deriveCodeFields { + if err := ret.deriveCodeFields(s.reader); err != nil { + return nil, err + } } - // Commit dirty contract code if any exists if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 { batch := db.NewBatch() @@ -1389,14 +1389,14 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping return ret.root, nil } -// CommitAndTrack writes the state mutations and notifies the size tracker of the state changes. -func (s *StateDB) CommitAndTrack(block uint64, deleteEmptyObjects bool, noStorageWiping bool, sizer *SizeTracker) (common.Hash, error) { +// CommitWithUpdate writes the state mutations and returns the state update for +// external processing (e.g., live tracing hooks or size tracker). +func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) { ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping, true) if err != nil { - return common.Hash{}, err + return common.Hash{}, nil, err } - sizer.Notify(ret) - return ret.root, nil + return ret.root, ret, nil } // Prepare handles the preparatory steps for executing a state transition with. diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go index c043166cf2..0c1b76b4f8 100644 --- a/core/state/stateupdate.go +++ b/core/state/stateupdate.go @@ -17,18 +17,27 @@ package state import ( + "fmt" "maps" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/triedb" ) -// contractCode represents a contract code with associated metadata. +// contractCode represents contract bytecode along with its associated metadata. type contractCode struct { - hash common.Hash // hash is the cryptographic hash of the contract code. - blob []byte // blob is the binary representation of the contract code. - exists bool // flag whether the code has been existent + hash common.Hash // hash is the cryptographic hash of the current contract code. + blob []byte // blob is the binary representation of the current contract code. + originHash common.Hash // originHash is the cryptographic hash of the code before mutation. + + // Derived fields, populated only when state tracking is enabled. + duplicate bool // duplicate indicates whether the updated code already exists. + originBlob []byte // originBlob is the original binary representation of the contract code. } // accountDelete represents an operation for deleting an Ethereum account. @@ -192,21 +201,169 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet { } } -// markCodeExistence determines whether each piece of contract code referenced -// in this state update actually exists. +// deriveCodeFields derives the missing fields of contract code changes +// such as original code value. // -// Note: This operation is expensive and not needed during normal state transitions. -// It is only required when SizeTracker is enabled to produce accurate state -// statistics. -func (sc *stateUpdate) markCodeExistence(reader ContractCodeReader) { +// Note: This operation is expensive and not needed during normal state +// transitions. It is only required when SizeTracker or StateUpdate hook +// is enabled to produce accurate state statistics. +func (sc *stateUpdate) deriveCodeFields(reader ContractCodeReader) error { cache := make(map[common.Hash]bool) for addr, code := range sc.codes { + if code.originHash != types.EmptyCodeHash { + blob, err := reader.Code(addr, code.originHash) + if err != nil { + return err + } + code.originBlob = blob + } if exists, ok := cache[code.hash]; ok { - code.exists = exists + code.duplicate = exists continue } res := reader.Has(addr, code.hash) cache[code.hash] = res - code.exists = res + code.duplicate = res } + return nil +} + +// ToTracingUpdate converts the internal stateUpdate to an exported tracing.StateUpdate. +func (sc *stateUpdate) ToTracingUpdate() (*tracing.StateUpdate, error) { + update := &tracing.StateUpdate{ + OriginRoot: sc.originRoot, + Root: sc.root, + BlockNumber: sc.blockNumber, + AccountChanges: make(map[common.Address]*tracing.AccountChange, len(sc.accountsOrigin)), + StorageChanges: make(map[common.Address]map[common.Hash]*tracing.StorageChange), + CodeChanges: make(map[common.Address]*tracing.CodeChange, len(sc.codes)), + TrieChanges: make(map[common.Hash]map[string]*tracing.TrieNodeChange), + } + // Gather all account changes + for addr, oldData := range sc.accountsOrigin { + addrHash := crypto.Keccak256Hash(addr.Bytes()) + newData, exists := sc.accounts[addrHash] + if !exists { + return nil, fmt.Errorf("account %x not found", addr) + } + change := &tracing.AccountChange{} + + if len(oldData) > 0 { + acct, err := types.FullAccount(oldData) + if err != nil { + return nil, err + } + change.Prev = &types.StateAccount{ + Nonce: acct.Nonce, + Balance: acct.Balance, + Root: acct.Root, + CodeHash: acct.CodeHash, + } + } + if len(newData) > 0 { + acct, err := types.FullAccount(newData) + if err != nil { + return nil, err + } + change.New = &types.StateAccount{ + Nonce: acct.Nonce, + Balance: acct.Balance, + Root: acct.Root, + CodeHash: acct.CodeHash, + } + } + update.AccountChanges[addr] = change + } + + // Gather all storage slot changes + for addr, slots := range sc.storagesOrigin { + addrHash := crypto.Keccak256Hash(addr.Bytes()) + subset, exists := sc.storages[addrHash] + if !exists { + return nil, fmt.Errorf("storage %x not found", addr) + } + storageChanges := make(map[common.Hash]*tracing.StorageChange, len(slots)) + + for key, encPrev := range slots { + // Get new value - handle both raw and hashed key formats + var ( + exists bool + encNew []byte + decPrev []byte + decNew []byte + err error + ) + if sc.rawStorageKey { + encNew, exists = subset[crypto.Keccak256Hash(key.Bytes())] + } else { + encNew, exists = subset[key] + } + if !exists { + return nil, fmt.Errorf("storage slot %x-%x not found", addr, key) + } + + // Decode the prev and new values + if len(encPrev) > 0 { + _, decPrev, _, err = rlp.Split(encPrev) + if err != nil { + return nil, fmt.Errorf("failed to decode prevValue: %v", err) + } + } + if len(encNew) > 0 { + _, decNew, _, err = rlp.Split(encNew) + if err != nil { + return nil, fmt.Errorf("failed to decode newValue: %v", err) + } + } + storageChanges[key] = &tracing.StorageChange{ + Prev: common.BytesToHash(decPrev), + New: common.BytesToHash(decNew), + } + } + update.StorageChanges[addr] = storageChanges + } + + // Gather all contract code changes + for addr, code := range sc.codes { + change := &tracing.CodeChange{ + New: &tracing.ContractCode{ + Hash: code.hash, + Code: code.blob, + Exists: code.duplicate, + }, + } + if code.originHash != types.EmptyCodeHash { + change.Prev = &tracing.ContractCode{ + Hash: code.originHash, + Code: code.originBlob, + Exists: true, + } + } + update.CodeChanges[addr] = change + } + + // Gather all trie node changes + if sc.nodes != nil { + for owner, subset := range sc.nodes.Sets { + nodeChanges := make(map[string]*tracing.TrieNodeChange, len(subset.Origins)) + for path, oldNode := range subset.Origins { + newNode, exists := subset.Nodes[path] + if !exists { + return nil, fmt.Errorf("node %x-%v not found", owner, path) + } + nodeChanges[path] = &tracing.TrieNodeChange{ + Prev: &trienode.Node{ + Hash: crypto.Keccak256Hash(oldNode), + Blob: oldNode, + }, + New: &trienode.Node{ + Hash: newNode.Hash, + Blob: newNode.Blob, + }, + } + } + update.TrieChanges[owner] = nodeChanges + } + } + return update, nil } diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 8e50dc3d8f..d17b94cf9c 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" ) @@ -75,6 +76,56 @@ type BlockEvent struct { Safe *types.Header } +// StateUpdate represents the state mutations resulting from block execution. +// It provides access to account changes, storage changes, and contract code +// deployments with both previous and new values. +type StateUpdate struct { + OriginRoot common.Hash // State root before the update + Root common.Hash // State root after the update + BlockNumber uint64 + + // AccountChanges contains all account state changes keyed by address. + AccountChanges map[common.Address]*AccountChange + + // StorageChanges contains all storage slot changes keyed by address and storage slot key. + StorageChanges map[common.Address]map[common.Hash]*StorageChange + + // CodeChanges contains all contract code changes keyed by address. + CodeChanges map[common.Address]*CodeChange + + // TrieChanges contains trie node mutations keyed by address hash and trie node path. + TrieChanges map[common.Hash]map[string]*TrieNodeChange +} + +// AccountChange represents a change to an account's state. +type AccountChange struct { + Prev *types.StateAccount // nil if account was created + New *types.StateAccount // nil if account was deleted +} + +// StorageChange represents a change to a storage slot. +type StorageChange struct { + Prev common.Hash // previous value (zero if slot was created) + New common.Hash // new value (zero if slot was deleted) +} + +type ContractCode struct { + Hash common.Hash + Code []byte + Exists bool // true if the code was existent +} + +// CodeChange represents a change in contract code of an account. +type CodeChange struct { + Prev *ContractCode // nil if no code existed before + New *ContractCode +} + +type TrieNodeChange struct { + Prev *trienode.Node + New *trienode.Node +} + type ( /* - VM events - @@ -161,6 +212,11 @@ type ( // beacon block root. OnSystemCallEndHook = func() + // StateUpdateHook is called after state is committed for a block. + // It provides access to the complete state mutations including account changes, + // storage changes, trie node mutations, and contract code deployments. + StateUpdateHook = func(update *StateUpdate) + /* - State events - */ @@ -209,6 +265,7 @@ type Hooks struct { OnSystemCallStart OnSystemCallStartHook OnSystemCallStartV2 OnSystemCallStartHookV2 OnSystemCallEnd OnSystemCallEndHook + OnStateUpdate StateUpdateHook // State events OnBalanceChange BalanceChangeHook OnNonceChange NonceChangeHook diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index e5a1a2b25f..6f97d5b664 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -546,7 +546,7 @@ func TestExceedLogQueryLimit(t *testing.T) { } ) - _, err := gspec.Commit(db, triedb.NewDatabase(db, nil)) + _, err := gspec.Commit(db, triedb.NewDatabase(db, nil), nil) if err != nil { t.Fatal(err) } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index edec3e027f..f44ada20b1 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -205,7 +205,7 @@ func testFilters(t *testing.T, history uint64, noHistory bool) { // Hack: GenerateChainWithGenesis creates a new db. // Commit the genesis manually and use GenerateChain. - _, err = gspec.Commit(db, triedb.NewDatabase(db, nil)) + _, err = gspec.Commit(db, triedb.NewDatabase(db, nil), nil) if err != nil { t.Fatal(err) } @@ -426,7 +426,7 @@ func TestRangeLogs(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), } ) - _, err := gspec.Commit(db, triedb.NewDatabase(db, nil)) + _, err := gspec.Commit(db, triedb.NewDatabase(db, nil), nil) if err != nil { t.Fatal(err) } diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 72fd955c8f..4f6ab65c1a 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -138,7 +138,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t gspec.Config.TerminalTotalDifficulty = big.NewInt(stdmath.MaxInt64) } triedb := triedb.NewDatabase(db, tconf) - gblock, err := gspec.Commit(db, triedb) + gblock, err := gspec.Commit(db, triedb, nil) if err != nil { return err } From 9623dcbca2ddca88253eef5695914a61bbaa2e07 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 8 Jan 2026 11:48:45 +0800 Subject: [PATCH 219/277] core/state: add cache statistics of contract code reader (#33532) --- core/state/database.go | 29 ++++++++++----- core/state/reader.go | 84 +++++++++++++++++++++++++++++------------- 2 files changed, 77 insertions(+), 36 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index 1e8fc9d5c9..4a5547d075 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -177,8 +177,8 @@ func NewDatabaseForTesting() *CachingDB { return NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) } -// Reader returns a state reader associated with the specified state root. -func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { +// StateReader returns a state reader associated with the specified state root. +func (db *CachingDB) StateReader(stateRoot common.Hash) (StateReader, error) { var readers []StateReader // Configure the state reader using the standalone snapshot in hash mode. @@ -208,23 +208,32 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { } readers = append(readers, tr) - combined, err := newMultiStateReader(readers...) + return newMultiStateReader(readers...) +} + +// Reader implements Database, returning a reader associated with the specified +// state root. +func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { + sr, err := db.StateReader(stateRoot) if err != nil { return nil, err } - return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), combined), nil + return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), sr), nil } -// ReadersWithCacheStats creates a pair of state readers sharing the same internal cache and -// same backing Reader, but exposing separate statistics. -// and statistics. +// ReadersWithCacheStats creates a pair of state readers that share the same +// underlying state reader and internal state cache, while maintaining separate +// statistics respectively. func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (ReaderWithStats, ReaderWithStats, error) { - reader, err := db.Reader(stateRoot) + r, err := db.StateReader(stateRoot) if err != nil { return nil, nil, err } - shared := newReaderWithCache(reader) - return newReaderWithCacheStats(shared), newReaderWithCacheStats(shared), nil + sr := newStateReaderWithCache(r) + + ra := newReaderWithStats(sr, newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache)) + rb := newReaderWithStats(sr, newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache)) + return ra, rb, nil } // OpenTrie opens the main account trie at a specific root hash. diff --git a/core/state/reader.go b/core/state/reader.go index 38228f8453..2db9d1f9b4 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -58,6 +58,13 @@ type ContractCodeReader interface { CodeSize(addr common.Address, codeHash common.Hash) (int, error) } +// ContractCodeReaderWithStats extends ContractCodeReader by adding GetStats to +// expose statistics of code reader. +type ContractCodeReaderWithStats interface { + ContractCodeReader + GetStats() (int64, int64) +} + // StateReader defines the interface for accessing accounts and storage slots // associated with a specific state. // @@ -97,6 +104,8 @@ type ReaderStats struct { AccountCacheMiss int64 StorageCacheHit int64 StorageCacheMiss int64 + ContractCodeHit int64 + ContractCodeMiss int64 } // String implements fmt.Stringer, returning string format statistics. @@ -104,6 +113,7 @@ func (s ReaderStats) String() string { var ( accountCacheHitRate float64 storageCacheHitRate float64 + contractCodeHitRate float64 ) if s.AccountCacheHit > 0 { accountCacheHitRate = float64(s.AccountCacheHit) / float64(s.AccountCacheHit+s.AccountCacheMiss) * 100 @@ -111,9 +121,13 @@ func (s ReaderStats) String() string { if s.StorageCacheHit > 0 { storageCacheHitRate = float64(s.StorageCacheHit) / float64(s.StorageCacheHit+s.StorageCacheMiss) * 100 } + if s.ContractCodeHit > 0 { + contractCodeHitRate = float64(s.ContractCodeHit) / float64(s.ContractCodeHit+s.ContractCodeMiss) * 100 + } msg := fmt.Sprintf("Reader statistics\n") msg += fmt.Sprintf("account: hit: %d, miss: %d, rate: %.2f\n", s.AccountCacheHit, s.AccountCacheMiss, accountCacheHitRate) msg += fmt.Sprintf("storage: hit: %d, miss: %d, rate: %.2f\n", s.StorageCacheHit, s.StorageCacheMiss, storageCacheHitRate) + msg += fmt.Sprintf("code: hit: %d, miss: %d, rate: %.2f\n", s.ContractCodeHit, s.ContractCodeMiss, contractCodeHitRate) return msg } @@ -134,6 +148,10 @@ type cachingCodeReader struct { // they are natively thread-safe. codeCache *lru.SizeConstrainedCache[common.Hash, []byte] codeSizeCache *lru.Cache[common.Hash, int] + + // Cache statistics + hit atomic.Int64 // Number of code lookups found in the cache. + miss atomic.Int64 // Number of code lookups not found in the cache. } // newCachingCodeReader constructs the code reader. @@ -150,8 +168,11 @@ func newCachingCodeReader(db ethdb.KeyValueReader, codeCache *lru.SizeConstraine func (r *cachingCodeReader) Code(addr common.Address, codeHash common.Hash) ([]byte, error) { code, _ := r.codeCache.Get(codeHash) if len(code) > 0 { + r.hit.Add(1) return code, nil } + r.miss.Add(1) + code = rawdb.ReadCode(r.db, codeHash) if len(code) > 0 { r.codeCache.Add(codeHash, code) @@ -164,6 +185,7 @@ func (r *cachingCodeReader) Code(addr common.Address, codeHash common.Hash) ([]b // If the contract code doesn't exist, no error will be returned. func (r *cachingCodeReader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) { if cached, ok := r.codeSizeCache.Get(codeHash); ok { + r.hit.Add(1) return cached, nil } code, err := r.Code(addr, codeHash) @@ -180,6 +202,11 @@ func (r *cachingCodeReader) Has(addr common.Address, codeHash common.Hash) bool return len(code) > 0 } +// GetStats returns the cache statistics fo the code reader. +func (r *cachingCodeReader) GetStats() (int64, int64) { + return r.hit.Load(), r.miss.Load() +} + // flatReader wraps a database state reader and is safe for concurrent access. type flatReader struct { reader database.StateReader @@ -462,10 +489,10 @@ func newReader(codeReader ContractCodeReader, stateReader StateReader) *reader { } } -// readerWithCache is a wrapper around Reader that maintains additional state caches -// to support concurrent state access. -type readerWithCache struct { - Reader // safe for concurrent read +// stateReaderWithCache is a wrapper around StateReader that maintains additional +// state caches to support concurrent state access. +type stateReaderWithCache struct { + StateReader // Previously resolved state entries. accounts map[common.Address]*types.StateAccount @@ -481,11 +508,11 @@ type readerWithCache struct { } } -// newReaderWithCache constructs the reader with local cache. -func newReaderWithCache(reader Reader) *readerWithCache { - r := &readerWithCache{ - Reader: reader, - accounts: make(map[common.Address]*types.StateAccount), +// newStateReaderWithCache constructs the state reader with local cache. +func newStateReaderWithCache(sr StateReader) *stateReaderWithCache { + r := &stateReaderWithCache{ + StateReader: sr, + accounts: make(map[common.Address]*types.StateAccount), } for i := range r.storageBuckets { r.storageBuckets[i].storages = make(map[common.Address]map[common.Hash]common.Hash) @@ -498,7 +525,7 @@ func newReaderWithCache(reader Reader) *readerWithCache { // might be nil if it's not existent. // // An error will be returned if the state is corrupted in the underlying reader. -func (r *readerWithCache) account(addr common.Address) (*types.StateAccount, bool, error) { +func (r *stateReaderWithCache) account(addr common.Address) (*types.StateAccount, bool, error) { // Try to resolve the requested account in the local cache r.accountLock.RLock() acct, ok := r.accounts[addr] @@ -507,7 +534,7 @@ func (r *readerWithCache) account(addr common.Address) (*types.StateAccount, boo return acct, true, nil } // Try to resolve the requested account from the underlying reader - acct, err := r.Reader.Account(addr) + acct, err := r.StateReader.Account(addr) if err != nil { return nil, false, err } @@ -521,7 +548,7 @@ func (r *readerWithCache) account(addr common.Address) (*types.StateAccount, boo // The returned account might be nil if it's not existent. // // An error will be returned if the state is corrupted in the underlying reader. -func (r *readerWithCache) Account(addr common.Address) (*types.StateAccount, error) { +func (r *stateReaderWithCache) Account(addr common.Address) (*types.StateAccount, error) { account, _, err := r.account(addr) return account, err } @@ -529,7 +556,7 @@ func (r *readerWithCache) Account(addr common.Address) (*types.StateAccount, err // storage retrieves the storage slot specified by the address and slot key, along // with a flag indicating whether it's found in the cache or not. The returned // storage slot might be empty if it's not existent. -func (r *readerWithCache) storage(addr common.Address, slot common.Hash) (common.Hash, bool, error) { +func (r *stateReaderWithCache) storage(addr common.Address, slot common.Hash) (common.Hash, bool, error) { var ( value common.Hash ok bool @@ -546,7 +573,7 @@ func (r *readerWithCache) storage(addr common.Address, slot common.Hash) (common return value, true, nil } // Try to resolve the requested storage slot from the underlying reader - value, err := r.Reader.Storage(addr, slot) + value, err := r.StateReader.Storage(addr, slot) if err != nil { return common.Hash{}, false, err } @@ -567,13 +594,14 @@ func (r *readerWithCache) storage(addr common.Address, slot common.Hash) (common // existent. // // An error will be returned if the state is corrupted in the underlying reader. -func (r *readerWithCache) Storage(addr common.Address, slot common.Hash) (common.Hash, error) { +func (r *stateReaderWithCache) Storage(addr common.Address, slot common.Hash) (common.Hash, error) { value, _, err := r.storage(addr, slot) return value, err } -type readerWithCacheStats struct { - *readerWithCache +type readerWithStats struct { + *stateReaderWithCache + ContractCodeReaderWithStats accountCacheHit atomic.Int64 accountCacheMiss atomic.Int64 @@ -581,10 +609,11 @@ type readerWithCacheStats struct { storageCacheMiss atomic.Int64 } -// newReaderWithCacheStats constructs the reader with additional statistics tracked. -func newReaderWithCacheStats(reader *readerWithCache) *readerWithCacheStats { - return &readerWithCacheStats{ - readerWithCache: reader, +// newReaderWithStats constructs the reader with additional statistics tracked. +func newReaderWithStats(sr *stateReaderWithCache, cr ContractCodeReaderWithStats) *readerWithStats { + return &readerWithStats{ + stateReaderWithCache: sr, + ContractCodeReaderWithStats: cr, } } @@ -592,8 +621,8 @@ func newReaderWithCacheStats(reader *readerWithCache) *readerWithCacheStats { // The returned account might be nil if it's not existent. // // An error will be returned if the state is corrupted in the underlying reader. -func (r *readerWithCacheStats) Account(addr common.Address) (*types.StateAccount, error) { - account, incache, err := r.readerWithCache.account(addr) +func (r *readerWithStats) Account(addr common.Address) (*types.StateAccount, error) { + account, incache, err := r.stateReaderWithCache.account(addr) if err != nil { return nil, err } @@ -610,8 +639,8 @@ func (r *readerWithCacheStats) Account(addr common.Address) (*types.StateAccount // existent. // // An error will be returned if the state is corrupted in the underlying reader. -func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (common.Hash, error) { - value, incache, err := r.readerWithCache.storage(addr, slot) +func (r *readerWithStats) Storage(addr common.Address, slot common.Hash) (common.Hash, error) { + value, incache, err := r.stateReaderWithCache.storage(addr, slot) if err != nil { return common.Hash{}, err } @@ -624,11 +653,14 @@ func (r *readerWithCacheStats) Storage(addr common.Address, slot common.Hash) (c } // GetStats implements ReaderWithStats, returning the statistics of state reader. -func (r *readerWithCacheStats) GetStats() ReaderStats { +func (r *readerWithStats) GetStats() ReaderStats { + codeHit, codeMiss := r.ContractCodeReaderWithStats.GetStats() return ReaderStats{ AccountCacheHit: r.accountCacheHit.Load(), AccountCacheMiss: r.accountCacheMiss.Load(), StorageCacheHit: r.storageCacheHit.Load(), StorageCacheMiss: r.storageCacheMiss.Load(), + ContractCodeHit: codeHit, + ContractCodeMiss: codeMiss, } } From 64d22fd7f7b1c12815ca7a8d457ea13483702dd5 Mon Sep 17 00:00:00 2001 From: LittleBingoo Date: Thu, 8 Jan 2026 11:49:13 +0800 Subject: [PATCH 220/277] internal/flags: update copyright year to 2026 (#33550) --- internal/flags/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index fc84ae85da..e6a6966d9f 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -40,7 +40,7 @@ func NewApp(usage string) *cli.App { app.EnableBashCompletion = true app.Version = version.WithCommit(git.Commit, git.Date) app.Usage = usage - app.Copyright = "Copyright 2013-2025 The go-ethereum Authors" + app.Copyright = "Copyright 2013-2026 The go-ethereum Authors" app.Before = func(ctx *cli.Context) error { MigrateGlobalFlags(ctx) return nil From a32851fac9e97ed2af6eec2238a07f03e1205b16 Mon Sep 17 00:00:00 2001 From: Mask Weller Date: Thu, 8 Jan 2026 13:23:48 +0700 Subject: [PATCH 221/277] graphql: fix GasPrice for blob and setcode transactions (#33542) Adds BlobTxType and SetCodeTxType to GasPrice switch case, aligning with `MaxFeePerGas` and `MaxPriorityFeePerGas` handling. Co-authored-by: m6xwzzz --- graphql/graphql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 0013abf26f..244d6926a2 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -272,7 +272,7 @@ func (t *Transaction) GasPrice(ctx context.Context) hexutil.Big { return hexutil.Big{} } switch tx.Type() { - case types.DynamicFeeTxType: + case types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: if block != nil { if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil { // price = min(gasTipCap + baseFee, gasFeeCap) From d5efd34010874c47795c90e036c23c46fd3fb2eb Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 8 Jan 2026 16:57:35 +0800 Subject: [PATCH 222/277] triedb/pathdb: introduce extension to history index structure (#33399) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's a PR based on #33303 and introduces an approach for trienode history indexing. --- In the current archive node design, resolving a historical trie node at a specific block involves the following steps: - Look up the corresponding trie node index and locate the first entry whose state ID is greater than the target state ID. - Resolve the trie node from the associated trienode history object. A naive approach would be to store mutation records for every trie node, similar to how flat state mutations are recorded. However, the total number of trie nodes is extremely large (approximately 2.4 billion), and the vast majority of them are rarely modified. Creating an index entry for each individual trie node would be very wasteful in both storage and indexing overhead. To address this, we aggregate multiple trie nodes into chunks and index mutations at the chunk level instead. --- For a storage trie, the trie is vertically partitioned into multiple sub tries, each spanning three consecutive levels. The top three levels (1 + 16 + 256 nodes) form the first chunk, and every subsequent three-level segment forms another chunk. ``` Original trie structure Level 0 [ ROOT ] 1 node Level 1 [0] [1] [2] ... [f] 16 nodes Level 2 [00] [01] ... [0f] [10] ... [ff] 256 nodes Level 3 [000] [001] ... [00f] [010] ... [fff] 4096 nodes Level 4 [0000] ... [000f] [0010] ... [001f] ... [ffff] 65536 nodes Vertical split into chunks (3 levels per chunk) Level0 [ ROOT ] 1 chunk Level3 [000] ... [fff] 4096 chunks Level6 [000000] ... [fffffff] 16777216 chunks ``` Within each chunk, there are 273 nodes in total, regardless of the chunk's depth in the trie. ``` Level 0 [ 0 ] 1 node Level 1 [ 1 ] … [ 16 ] 16 nodes Level 2 [ 17 ] … … [ 272 ] 256 nodes ``` Each chunk is uniquely identified by the path prefix of the root node of its corresponding sub-trie. Within a chunk, nodes are identified by a numeric index ranging from 0 to 272. For example, suppose that at block 100, the nodes with paths `[]`, `[0]`, `[f]`, `[00]`, and `[ff]` are modified. The mutation record for chunk 0 is then appended with the following entry: `[100 → [0, 1, 16, 17, 272]]`, `272` is the numeric ID of path `[ff]`. Furthermore, due to the structural properties of the Merkle Patricia Trie, if a child node is modified, all of its ancestors along the same path must also be updated. As a result, in the above example, recording mutations for nodes `00` and `ff` alone is sufficient, as this implicitly indicates that their ancestor nodes `[]`, `[0]` and `[f]` were also modified at block 100. --- Query processing is slightly more complicated. Since trie nodes are indexed at the chunk level, each individual trie node lookup requires an additional filtering step to ensure that a given mutation record actually corresponds to the target trie node. As mentioned earlier, mutation records store only the numeric identifiers of leaf nodes, while ancestor nodes are omitted for storage efficiency. Consequently, when querying an ancestor node, additional checks are required to determine whether the mutation record implicitly represents a modification to that ancestor. Moreover, since trie nodes are indexed at the chunk level, some trie nodes may be updated frequently, causing their mutation records to dominate the index. Queries targeting rarely modified trie nodes would then scan a large amount of irrelevant index data, significantly degrading performance. To address this issue, a bitmap is introduced for each index block and stored in the chunk's metadata. Before loading a specific index block, the bitmap is checked to determine whether the block contains mutation records relevant to the target trie node. If the bitmap indicates that the block does not contain such records, the block is skipped entirely. --- triedb/pathdb/history_index.go | 174 +++++----- triedb/pathdb/history_index_block.go | 220 +++++++++---- triedb/pathdb/history_index_block_test.go | 238 ++++++++++---- triedb/pathdb/history_index_iterator.go | 322 +++++++++++++++++-- triedb/pathdb/history_index_iterator_test.go | 280 ++++++++-------- triedb/pathdb/history_index_test.go | 192 +++++------ triedb/pathdb/history_indexer.go | 9 +- triedb/pathdb/history_reader.go | 6 +- triedb/pathdb/history_trienode.go | 13 +- triedb/pathdb/history_trienode_test.go | 8 +- triedb/pathdb/history_trienode_utils.go | 83 +++++ triedb/pathdb/history_trienode_utils_test.go | 81 +++++ 12 files changed, 1117 insertions(+), 509 deletions(-) create mode 100644 triedb/pathdb/history_trienode_utils.go create mode 100644 triedb/pathdb/history_trienode_utils_test.go diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index cc5cd204b4..0c5eb8db21 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -25,22 +25,28 @@ import ( "github.com/ethereum/go-ethereum/ethdb" ) -// parseIndex parses the index data with the supplied byte stream. The index data -// is a list of fixed-sized metadata. Empty metadata is regarded as invalid. -func parseIndex(blob []byte) ([]*indexBlockDesc, error) { +// parseIndex parses the index data from the provided byte stream. The index data +// is a sequence of fixed-size metadata entries, and any empty metadata entry is +// considered invalid. +// +// Each metadata entry consists of two components: the indexBlockDesc and an +// optional extension bitmap. The bitmap length may vary across different categories, +// but must remain consistent within the same category. +func parseIndex(blob []byte, bitmapSize int) ([]*indexBlockDesc, error) { if len(blob) == 0 { return nil, errors.New("empty state history index") } - if len(blob)%indexBlockDescSize != 0 { - return nil, fmt.Errorf("corrupted state index, len: %d", len(blob)) + size := indexBlockDescSize + bitmapSize + if len(blob)%size != 0 { + return nil, fmt.Errorf("corrupted state index, len: %d, bitmap size: %d", len(blob), bitmapSize) } var ( lastID uint32 descList []*indexBlockDesc ) - for i := 0; i < len(blob)/indexBlockDescSize; i++ { + for i := 0; i < len(blob)/size; i++ { var desc indexBlockDesc - desc.decode(blob[i*indexBlockDescSize : (i+1)*indexBlockDescSize]) + desc.decode(blob[i*size : (i+1)*size]) if desc.empty() { return nil, errors.New("empty state history index block") } @@ -69,33 +75,35 @@ func parseIndex(blob []byte) ([]*indexBlockDesc, error) { // indexReader is the structure to look up the state history index records // associated with the specific state element. type indexReader struct { - db ethdb.KeyValueReader - descList []*indexBlockDesc - readers map[uint32]*blockReader - state stateIdent + db ethdb.KeyValueReader + descList []*indexBlockDesc + readers map[uint32]*blockReader + state stateIdent + bitmapSize int } // loadIndexData loads the index data associated with the specified state. -func loadIndexData(db ethdb.KeyValueReader, state stateIdent) ([]*indexBlockDesc, error) { +func loadIndexData(db ethdb.KeyValueReader, state stateIdent, bitmapSize int) ([]*indexBlockDesc, error) { blob := readStateIndex(state, db) if len(blob) == 0 { return nil, nil } - return parseIndex(blob) + return parseIndex(blob, bitmapSize) } // newIndexReader constructs a index reader for the specified state. Reader with // empty data is allowed. -func newIndexReader(db ethdb.KeyValueReader, state stateIdent) (*indexReader, error) { - descList, err := loadIndexData(db, state) +func newIndexReader(db ethdb.KeyValueReader, state stateIdent, bitmapSize int) (*indexReader, error) { + descList, err := loadIndexData(db, state, bitmapSize) if err != nil { return nil, err } return &indexReader{ - descList: descList, - readers: make(map[uint32]*blockReader), - db: db, - state: state, + descList: descList, + readers: make(map[uint32]*blockReader), + db: db, + state: state, + bitmapSize: bitmapSize, }, nil } @@ -106,11 +114,9 @@ func (r *indexReader) refresh() error { // may have been modified by additional elements written to the disk. if len(r.descList) != 0 { last := r.descList[len(r.descList)-1] - if !last.full() { - delete(r.readers, last.id) - } + delete(r.readers, last.id) } - descList, err := loadIndexData(r.db, r.state) + descList, err := loadIndexData(r.db, r.state, r.bitmapSize) if err != nil { return err } @@ -118,26 +124,10 @@ func (r *indexReader) refresh() error { return nil } -// newIterator creates an iterator for traversing the index entries. -func (r *indexReader) newIterator() *indexIterator { - return newIndexIterator(r.descList, func(id uint32) (*blockReader, error) { - br, ok := r.readers[id] - if !ok { - var err error - br, err = newBlockReader(readStateIndexBlock(r.state, r.db, id)) - if err != nil { - return nil, err - } - r.readers[id] = br - } - return br, nil - }) -} - // readGreaterThan locates the first element that is greater than the specified // id. If no such element is found, MaxUint64 is returned. func (r *indexReader) readGreaterThan(id uint64) (uint64, error) { - it := r.newIterator() + it := r.newIterator(nil) found := it.SeekGT(id) if err := it.Error(); err != nil { return 0, err @@ -155,31 +145,33 @@ func (r *indexReader) readGreaterThan(id uint64) (uint64, error) { // history ids) is stored in these second-layer index blocks, which are size // limited. type indexWriter struct { - descList []*indexBlockDesc // The list of index block descriptions - bw *blockWriter // The live index block writer - frozen []*blockWriter // The finalized index block writers, waiting for flush - lastID uint64 // The ID of the latest tracked history - state stateIdent - db ethdb.KeyValueReader + descList []*indexBlockDesc // The list of index block descriptions + bw *blockWriter // The live index block writer + frozen []*blockWriter // The finalized index block writers, waiting for flush + lastID uint64 // The ID of the latest tracked history + state stateIdent // The identifier of the state being indexed + bitmapSize int // The size of optional extension bitmap + db ethdb.KeyValueReader } // newIndexWriter constructs the index writer for the specified state. Additionally, // it takes an integer as the limit and prunes all existing elements above that ID. // It's essential as the recovery mechanism after unclean shutdown during the history // indexing. -func newIndexWriter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexWriter, error) { +func newIndexWriter(db ethdb.KeyValueReader, state stateIdent, limit uint64, bitmapSize int) (*indexWriter, error) { blob := readStateIndex(state, db) if len(blob) == 0 { - desc := newIndexBlockDesc(0) - bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */) + desc := newIndexBlockDesc(0, bitmapSize) + bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */, bitmapSize != 0) return &indexWriter{ - descList: []*indexBlockDesc{desc}, - bw: bw, - state: state, - db: db, + descList: []*indexBlockDesc{desc}, + bw: bw, + state: state, + db: db, + bitmapSize: bitmapSize, }, nil } - descList, err := parseIndex(blob) + descList, err := parseIndex(blob, bitmapSize) if err != nil { return nil, err } @@ -197,30 +189,31 @@ func newIndexWriter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*i // Construct the writer for the last block. All elements in this block // that exceed the limit will be truncated. - bw, err := newBlockWriter(indexBlock, lastDesc, limit) + bw, err := newBlockWriter(indexBlock, lastDesc, limit, bitmapSize != 0) if err != nil { return nil, err } return &indexWriter{ - descList: descList, - lastID: bw.last(), - bw: bw, - state: state, - db: db, + descList: descList, + lastID: bw.last(), + bw: bw, + state: state, + db: db, + bitmapSize: bitmapSize, }, nil } // append adds the new element into the index writer. -func (w *indexWriter) append(id uint64) error { +func (w *indexWriter) append(id uint64, ext []uint16) error { if id <= w.lastID { return fmt.Errorf("append element out of order, last: %d, this: %d", w.lastID, id) } - if w.bw.full() { + if w.bw.estimateFull(ext) { if err := w.rotate(); err != nil { return err } } - if err := w.bw.append(id); err != nil { + if err := w.bw.append(id, ext); err != nil { return err } w.lastID = id @@ -233,10 +226,10 @@ func (w *indexWriter) append(id uint64) error { func (w *indexWriter) rotate() error { var ( err error - desc = newIndexBlockDesc(w.bw.desc.id + 1) + desc = newIndexBlockDesc(w.bw.desc.id+1, w.bitmapSize) ) w.frozen = append(w.frozen, w.bw) - w.bw, err = newBlockWriter(nil, desc, 0 /* useless if the block is empty */) + w.bw, err = newBlockWriter(nil, desc, 0 /* useless if the block is empty */, w.bitmapSize != 0) if err != nil { return err } @@ -268,7 +261,8 @@ func (w *indexWriter) finish(batch ethdb.Batch) { } w.frozen = nil // release all the frozen writers - buf := make([]byte, 0, indexBlockDescSize*len(descList)) + size := indexBlockDescSize + w.bitmapSize + buf := make([]byte, 0, size*len(descList)) for _, desc := range descList { buf = append(buf, desc.encode()...) } @@ -277,30 +271,32 @@ func (w *indexWriter) finish(batch ethdb.Batch) { // indexDeleter is responsible for deleting index data for a specific state. type indexDeleter struct { - descList []*indexBlockDesc // The list of index block descriptions - bw *blockWriter // The live index block writer - dropped []uint32 // The list of index block id waiting for deleting - lastID uint64 // The ID of the latest tracked history - state stateIdent - db ethdb.KeyValueReader + descList []*indexBlockDesc // The list of index block descriptions + bw *blockWriter // The live index block writer + dropped []uint32 // The list of index block id waiting for deleting + lastID uint64 // The ID of the latest tracked history + state stateIdent // The identifier of the state being indexed + bitmapSize int // The size of optional extension bitmap + db ethdb.KeyValueReader } // newIndexDeleter constructs the index deleter for the specified state. -func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexDeleter, error) { +func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent, limit uint64, bitmapSize int) (*indexDeleter, error) { blob := readStateIndex(state, db) if len(blob) == 0 { // TODO(rjl493456442) we can probably return an error here, // deleter with no data is meaningless. - desc := newIndexBlockDesc(0) - bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */) + desc := newIndexBlockDesc(0, bitmapSize) + bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */, bitmapSize != 0) return &indexDeleter{ - descList: []*indexBlockDesc{desc}, - bw: bw, - state: state, - db: db, + descList: []*indexBlockDesc{desc}, + bw: bw, + state: state, + bitmapSize: bitmapSize, + db: db, }, nil } - descList, err := parseIndex(blob) + descList, err := parseIndex(blob, bitmapSize) if err != nil { return nil, err } @@ -318,16 +314,17 @@ func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (* // Construct the writer for the last block. All elements in this block // that exceed the limit will be truncated. - bw, err := newBlockWriter(indexBlock, lastDesc, limit) + bw, err := newBlockWriter(indexBlock, lastDesc, limit, bitmapSize != 0) if err != nil { return nil, err } return &indexDeleter{ - descList: descList, - lastID: bw.last(), - bw: bw, - state: state, - db: db, + descList: descList, + lastID: bw.last(), + bw: bw, + state: state, + bitmapSize: bitmapSize, + db: db, }, nil } @@ -364,7 +361,7 @@ func (d *indexDeleter) pop(id uint64) error { // Open the previous block writer for deleting lastDesc := d.descList[len(d.descList)-1] indexBlock := readStateIndexBlock(d.state, d.db, lastDesc.id) - bw, err := newBlockWriter(indexBlock, lastDesc, lastDesc.max) + bw, err := newBlockWriter(indexBlock, lastDesc, lastDesc.max, d.bitmapSize != 0) if err != nil { return err } @@ -390,7 +387,8 @@ func (d *indexDeleter) finish(batch ethdb.Batch) { if d.empty() { deleteStateIndex(d.state, batch) } else { - buf := make([]byte, 0, indexBlockDescSize*len(d.descList)) + size := indexBlockDescSize + d.bitmapSize + buf := make([]byte, 0, size*len(d.descList)) for _, desc := range d.descList { buf = append(buf, desc.encode()...) } diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go index 13f16b4cf3..fd43d81b78 100644 --- a/triedb/pathdb/history_index_block.go +++ b/triedb/pathdb/history_index_block.go @@ -17,6 +17,7 @@ package pathdb import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -26,23 +27,27 @@ import ( ) const ( - indexBlockDescSize = 14 // The size of index block descriptor - indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block - indexBlockRestartLen = 256 // The restart interval length of index block - historyIndexBatch = 8 * 1024 * 1024 // The number of state history indexes for constructing or deleting as batch + indexBlockDescSize = 14 // The size of index block descriptor + indexBlockMaxSize = 4096 // The maximum size of a single index block + indexBlockRestartLen = 256 // The restart interval length of index block ) // indexBlockDesc represents a descriptor for an index block, which contains a // list of state mutation records associated with a specific state (either an // account or a storage slot). type indexBlockDesc struct { - max uint64 // The maximum state ID retained within the block - entries uint16 // The number of state mutation records retained within the block - id uint32 // The id of the index block + max uint64 // The maximum state ID retained within the block + entries uint16 // The number of state mutation records retained within the block + id uint32 // The id of the index block + extBitmap []byte // Optional fixed-size bitmap for the included extension elements } -func newIndexBlockDesc(id uint32) *indexBlockDesc { - return &indexBlockDesc{id: id} +func newIndexBlockDesc(id uint32, bitmapSize int) *indexBlockDesc { + var bitmap []byte + if bitmapSize > 0 { + bitmap = make([]byte, bitmapSize) + } + return &indexBlockDesc{id: id, extBitmap: bitmap} } // empty indicates whether the block is empty with no element retained. @@ -50,26 +55,33 @@ func (d *indexBlockDesc) empty() bool { return d.entries == 0 } -// full indicates whether the number of elements in the block exceeds the -// preconfigured limit. -func (d *indexBlockDesc) full() bool { - return d.entries >= indexBlockEntriesCap -} - // encode packs index block descriptor into byte stream. func (d *indexBlockDesc) encode() []byte { - var buf [indexBlockDescSize]byte + buf := make([]byte, indexBlockDescSize+len(d.extBitmap)) binary.BigEndian.PutUint64(buf[0:8], d.max) binary.BigEndian.PutUint16(buf[8:10], d.entries) binary.BigEndian.PutUint32(buf[10:14], d.id) + copy(buf[indexBlockDescSize:], d.extBitmap) return buf[:] } -// decode unpacks index block descriptor from byte stream. +// decode unpacks index block descriptor from byte stream. It's safe to mutate +// the provided byte stream after the function call. func (d *indexBlockDesc) decode(blob []byte) { d.max = binary.BigEndian.Uint64(blob[:8]) d.entries = binary.BigEndian.Uint16(blob[8:10]) d.id = binary.BigEndian.Uint32(blob[10:14]) + d.extBitmap = bytes.Clone(blob[indexBlockDescSize:]) +} + +// copy returns a deep-copied object. +func (d *indexBlockDesc) copy() *indexBlockDesc { + return &indexBlockDesc{ + max: d.max, + entries: d.entries, + id: d.id, + extBitmap: bytes.Clone(d.extBitmap), + } } // parseIndexBlock parses the index block with the supplied byte stream. @@ -97,20 +109,38 @@ func (d *indexBlockDesc) decode(blob []byte) { // A uint16 can cover offsets in the range [0, 65536), which is more than enough // to store 4096 integers. // -// Each chunk begins with the full value of the first integer, followed by -// subsequent integers representing the differences between the current value -// and the preceding one. Integers are encoded with variable-size for best -// storage efficiency. Each chunk can be illustrated as below. +// Each chunk begins with a full integer value for the first element, followed +// by subsequent integers encoded as differences (deltas) from their preceding +// values. All integers use variable-length encoding for optimal space efficiency. // -// Restart ---> +----------------+ -// | Full integer | -// +----------------+ -// | Diff with prev | -// +----------------+ -// | ... | -// +----------------+ -// | Diff with prev | -// +----------------+ +// In the updated format, each element in the chunk may optionally include an +// "extension" section. If an extension is present, it starts with a var-size +// integer indicating the length of the remaining extension payload, followed by +// that many bytes. If no extension is present, the element format is identical +// to the original version (i.e., only the integer or delta value is encoded). +// +// In the trienode history index, the extension field contains the list of +// trie node IDs that fall within this range. For the given state transition, +// these IDs represent the specific nodes in this range that were mutated. +// +// Whether an element includes an extension is determined by the block reader +// based on the specification. Conceptually, a chunk is structured as: +// +// Restart ---> +----------------+ +// | Full integer | +// +----------------+ +// | (Extension?) | +// +----------------+ +// | Diff with prev | +// +----------------+ +// | (Extension?) | +// +----------------+ +// | ... | +// +----------------+ +// | Diff with prev | +// +----------------+ +// | (Extension?) | +// +----------------+ // // Empty index block is regarded as invalid. func parseIndexBlock(blob []byte) ([]uint16, []byte, error) { @@ -148,24 +178,26 @@ func parseIndexBlock(blob []byte) ([]uint16, []byte, error) { type blockReader struct { restarts []uint16 data []byte + hasExt bool } // newBlockReader constructs the block reader with the supplied block data. -func newBlockReader(blob []byte) (*blockReader, error) { +func newBlockReader(blob []byte, hasExt bool) (*blockReader, error) { restarts, data, err := parseIndexBlock(blob) if err != nil { return nil, err } return &blockReader{ restarts: restarts, - data: data, // safe to own the slice + data: data, // safe to own the slice + hasExt: hasExt, // flag whether extension should be resolved }, nil } // readGreaterThan locates the first element in the block that is greater than // the specified value. If no such element is found, MaxUint64 is returned. func (br *blockReader) readGreaterThan(id uint64) (uint64, error) { - it := newBlockIterator(br.data, br.restarts) + it := br.newIterator(nil) found := it.SeekGT(id) if err := it.Error(); err != nil { return 0, err @@ -180,17 +212,19 @@ type blockWriter struct { desc *indexBlockDesc // Descriptor of the block restarts []uint16 // Offsets into the data slice, marking the start of each section data []byte // Aggregated encoded data slice + hasExt bool // Flag whether the extension field for each element exists } // newBlockWriter constructs a block writer. In addition to the existing data // and block description, it takes an element ID and prunes all existing elements // above that ID. It's essential as the recovery mechanism after unclean shutdown // during the history indexing. -func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64) (*blockWriter, error) { +func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64, hasExt bool) (*blockWriter, error) { if len(blob) == 0 { return &blockWriter{ - desc: desc, - data: make([]byte, 0, 1024), + desc: desc, + data: make([]byte, 0, 1024), + hasExt: hasExt, }, nil } restarts, data, err := parseIndexBlock(blob) @@ -201,6 +235,7 @@ func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64) (*blockWrit desc: desc, restarts: restarts, data: data, // safe to own the slice + hasExt: hasExt, } var trimmed int for !writer.empty() && writer.last() > limit { @@ -215,9 +250,26 @@ func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64) (*blockWrit return writer, nil } +// setBitmap applies the given extension elements into the bitmap. +func (b *blockWriter) setBitmap(ext []uint16) { + for _, n := range ext { + // Node ID zero is intentionally filtered out. Any element in this range + // can indicate that the sub-tree's root node was mutated, so storing zero + // is redundant and saves one byte for bitmap. + if n != 0 { + setBit(b.desc.extBitmap, int(n-1)) + } + } +} + // append adds a new element to the block. The new element must be greater than // the previous one. The provided ID is assumed to always be greater than 0. -func (b *blockWriter) append(id uint64) error { +// +// ext refers to the optional extension field attached to the appended element. +// This extension mechanism is used by trie-node history and represents a list of +// trie node IDs that fall within the range covered by the index element +// (typically corresponding to a sub-trie in trie-node history). +func (b *blockWriter) append(id uint64, ext []uint16) error { if id == 0 { return errors.New("invalid zero id") } @@ -244,13 +296,29 @@ func (b *blockWriter) append(id uint64) error { // element. b.data = binary.AppendUvarint(b.data, id-b.desc.max) } + // Extension validation + if (len(ext) == 0) != !b.hasExt { + if len(ext) == 0 { + return errors.New("missing extension") + } + return errors.New("unexpected extension") + } + // Append the extension if it is not nil. The extension is prefixed with a + // length indicator, and the block reader MUST understand this scheme and + // decode the extension accordingly. + if len(ext) > 0 { + b.setBitmap(ext) + enc := encodeIDs(ext) + b.data = binary.AppendUvarint(b.data, uint64(len(enc))) + b.data = append(b.data, enc...) + } b.desc.entries++ b.desc.max = id return nil } // scanSection traverses the specified section and terminates if fn returns true. -func (b *blockWriter) scanSection(section int, fn func(uint64, int) bool) { +func (b *blockWriter) scanSection(section int, fn func(uint64, int, []uint16) bool) error { var ( value uint64 start = int(b.restarts[section]) @@ -269,28 +337,47 @@ func (b *blockWriter) scanSection(section int, fn func(uint64, int) bool) { } else { value += x } - if fn(value, pos) { - return + // Resolve the extension if exists + var ( + err error + ext []uint16 + extLen int + ) + if b.hasExt { + l, ln := binary.Uvarint(b.data[pos+n:]) + extLen = ln + int(l) + ext, err = decodeIDs(b.data[pos+n+ln : pos+n+extLen]) } + if err != nil { + return err + } + if fn(value, pos, ext) { + return nil + } + // Shift to next position pos += n + pos += extLen } + return nil } // sectionLast returns the last element in the specified section. -func (b *blockWriter) sectionLast(section int) uint64 { +func (b *blockWriter) sectionLast(section int) (uint64, error) { var n uint64 - b.scanSection(section, func(v uint64, _ int) bool { + if err := b.scanSection(section, func(v uint64, _ int, _ []uint16) bool { n = v return false - }) - return n + }); err != nil { + return 0, err + } + return n, nil } // sectionSearch looks up the specified value in the given section, // the position and the preceding value will be returned if found. // It assumes that the preceding element exists in the section. -func (b *blockWriter) sectionSearch(section int, n uint64) (found bool, prev uint64, pos int) { - b.scanSection(section, func(v uint64, p int) bool { +func (b *blockWriter) sectionSearch(section int, n uint64) (found bool, prev uint64, pos int, err error) { + if err := b.scanSection(section, func(v uint64, p int, _ []uint16) bool { if n == v { pos = p found = true @@ -298,8 +385,24 @@ func (b *blockWriter) sectionSearch(section int, n uint64) (found bool, prev uin } prev = v return false // continue iteration - }) - return found, prev, pos + }); err != nil { + return false, 0, 0, err + } + return found, prev, pos, nil +} + +// rebuildBitmap scans the entire block and rebuilds the bitmap. +func (b *blockWriter) rebuildBitmap() error { + clear(b.desc.extBitmap) + for i := 0; i < len(b.restarts); i++ { + if err := b.scanSection(i, func(v uint64, p int, ext []uint16) bool { + b.setBitmap(ext) + return false // continue iteration + }); err != nil { + return err + } + } + return nil } // pop removes the last element from the block. The assumption is held that block @@ -315,6 +418,7 @@ func (b *blockWriter) pop(id uint64) error { if b.desc.entries == 1 { b.desc.max = 0 b.desc.entries = 0 + clear(b.desc.extBitmap) b.restarts = nil b.data = b.data[:0] return nil @@ -324,28 +428,36 @@ func (b *blockWriter) pop(id uint64) error { if b.desc.entries%indexBlockRestartLen == 1 { b.data = b.data[:b.restarts[len(b.restarts)-1]] b.restarts = b.restarts[:len(b.restarts)-1] - b.desc.max = b.sectionLast(len(b.restarts) - 1) + last, err := b.sectionLast(len(b.restarts) - 1) + if err != nil { + return err + } + b.desc.max = last b.desc.entries -= 1 - return nil + return b.rebuildBitmap() } // Look up the element preceding the one to be popped, in order to update // the maximum element in the block. - found, prev, pos := b.sectionSearch(len(b.restarts)-1, id) + found, prev, pos, err := b.sectionSearch(len(b.restarts)-1, id) + if err != nil { + return err + } if !found { return fmt.Errorf("pop element is not found, last: %d, this: %d", b.desc.max, id) } b.desc.max = prev b.data = b.data[:pos] b.desc.entries -= 1 - return nil + return b.rebuildBitmap() } func (b *blockWriter) empty() bool { return b.desc.empty() } -func (b *blockWriter) full() bool { - return b.desc.full() +func (b *blockWriter) estimateFull(ext []uint16) bool { + size := 8 + 2*len(ext) + return len(b.data)+size > indexBlockMaxSize } // last returns the last element in the block. It should only be called when diff --git a/triedb/pathdb/history_index_block_test.go b/triedb/pathdb/history_index_block_test.go index f8c6d3ab87..923ae29348 100644 --- a/triedb/pathdb/history_index_block_test.go +++ b/triedb/pathdb/history_index_block_test.go @@ -17,6 +17,7 @@ package pathdb import ( + "bytes" "math" "math/rand" "slices" @@ -24,16 +25,36 @@ import ( "testing" ) +func randomExt(bitmapSize int, n int) []uint16 { + if bitmapSize == 0 { + return nil + } + var ( + limit = bitmapSize * 8 + extList []uint16 + ) + for i := 0; i < n; i++ { + extList = append(extList, uint16(rand.Intn(limit+1))) + } + return extList +} + func TestBlockReaderBasic(t *testing.T) { + testBlockReaderBasic(t, 0) + testBlockReaderBasic(t, 2) + testBlockReaderBasic(t, 34) +} + +func testBlockReaderBasic(t *testing.T, bitmapSize int) { elements := []uint64{ 1, 5, 10, 11, 20, } - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + bw.append(elements[i], randomExt(bitmapSize, 5)) } - br, err := newBlockReader(bw.finish()) + br, err := newBlockReader(bw.finish(), bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct the block reader, %v", err) } @@ -60,18 +81,24 @@ func TestBlockReaderBasic(t *testing.T) { } func TestBlockReaderLarge(t *testing.T) { + testBlockReaderLarge(t, 0) + testBlockReaderLarge(t, 2) + testBlockReaderLarge(t, 34) +} + +func testBlockReaderLarge(t *testing.T, bitmapSize int) { var elements []uint64 for i := 0; i < 1000; i++ { elements = append(elements, rand.Uint64()) } slices.Sort(elements) - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + bw.append(elements[i], randomExt(bitmapSize, 5)) } - br, err := newBlockReader(bw.finish()) + br, err := newBlockReader(bw.finish(), bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct the block reader, %v", err) } @@ -95,26 +122,32 @@ func TestBlockReaderLarge(t *testing.T) { } func TestBlockWriterBasic(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + testBlockWriteBasic(t, 0) + testBlockWriteBasic(t, 2) + testBlockWriteBasic(t, 34) +} + +func testBlockWriteBasic(t *testing.T, bitmapSize int) { + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) if !bw.empty() { t.Fatal("expected empty block") } - bw.append(2) - if err := bw.append(1); err == nil { + bw.append(2, randomExt(bitmapSize, 5)) + if err := bw.append(1, randomExt(bitmapSize, 5)); err == nil { t.Fatal("out-of-order insertion is not expected") } var maxElem uint64 for i := 0; i < 10; i++ { - bw.append(uint64(i + 3)) + bw.append(uint64(i+3), randomExt(bitmapSize, 5)) maxElem = uint64(i + 3) } - bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0), maxElem) + bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0, bitmapSize), maxElem, bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct the block writer, %v", err) } for i := 0; i < 10; i++ { - if err := bw.append(uint64(i + 100)); err != nil { + if err := bw.append(uint64(i+100), randomExt(bitmapSize, 5)); err != nil { t.Fatalf("Failed to append value %d: %v", i, err) } } @@ -122,58 +155,38 @@ func TestBlockWriterBasic(t *testing.T) { } func TestBlockWriterWithLimit(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + testBlockWriterWithLimit(t, 0) + testBlockWriterWithLimit(t, 2) + testBlockWriterWithLimit(t, 34) +} - var maxElem uint64 - for i := 0; i < indexBlockRestartLen*2; i++ { - bw.append(uint64(i + 1)) - maxElem = uint64(i + 1) - } +func testBlockWriterWithLimit(t *testing.T, bitmapSize int) { + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) - suites := []struct { - limit uint64 - expMax uint64 - }{ - // nothing to truncate - { - maxElem, maxElem, - }, - // truncate the last element - { - maxElem - 1, maxElem - 1, - }, - // truncation around the restart boundary - { - uint64(indexBlockRestartLen + 1), - uint64(indexBlockRestartLen + 1), - }, - // truncation around the restart boundary - { - uint64(indexBlockRestartLen), - uint64(indexBlockRestartLen), - }, - { - uint64(1), uint64(1), - }, - // truncate the entire block, it's in theory invalid - { - uint64(0), uint64(0), - }, + var bitmaps [][]byte + for i := 0; i < indexBlockRestartLen+2; i++ { + bw.append(uint64(i+1), randomExt(bitmapSize, 5)) + bitmaps = append(bitmaps, bytes.Clone(bw.desc.extBitmap)) } - for i, suite := range suites { - desc := *bw.desc - block, err := newBlockWriter(bw.finish(), &desc, suite.limit) + for i := 0; i < indexBlockRestartLen+2; i++ { + limit := uint64(i + 1) + + desc := bw.desc.copy() + block, err := newBlockWriter(bytes.Clone(bw.finish()), desc, limit, bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct the block writer, %v", err) } - if block.desc.max != suite.expMax { - t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, suite.expMax) + if block.desc.max != limit { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, limit) + } + if !bytes.Equal(desc.extBitmap, bitmaps[i]) { + t.Fatalf("Test %d, unexpected bitmap, got: %v, want: %v", i, block.desc.extBitmap, bitmaps[i]) } // Re-fill the elements var maxElem uint64 - for elem := suite.limit + 1; elem < indexBlockRestartLen*4; elem++ { - if err := block.append(elem); err != nil { + for elem := limit + 1; elem < indexBlockRestartLen+4; elem++ { + if err := block.append(elem, randomExt(bitmapSize, 5)); err != nil { t.Fatalf("Failed to append value %d: %v", elem, err) } maxElem = elem @@ -185,9 +198,15 @@ func TestBlockWriterWithLimit(t *testing.T) { } func TestBlockWriterDelete(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + testBlockWriterDelete(t, 0) + testBlockWriterDelete(t, 2) + testBlockWriterDelete(t, 34) +} + +func testBlockWriterDelete(t *testing.T, bitmapSize int) { + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) for i := 0; i < 10; i++ { - bw.append(uint64(i + 1)) + bw.append(uint64(i+1), randomExt(bitmapSize, 5)) } // Pop unknown id, the request should be rejected if err := bw.pop(100); err == nil { @@ -209,12 +228,18 @@ func TestBlockWriterDelete(t *testing.T) { } func TestBlcokWriterDeleteWithData(t *testing.T) { + testBlcokWriterDeleteWithData(t, 0) + testBlcokWriterDeleteWithData(t, 2) + testBlcokWriterDeleteWithData(t, 34) +} + +func testBlcokWriterDeleteWithData(t *testing.T, bitmapSize int) { elements := []uint64{ 1, 5, 10, 11, 20, } - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + bw.append(elements[i], randomExt(bitmapSize, 5)) } // Re-construct the block writer with data @@ -223,7 +248,10 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { max: 20, entries: 5, } - bw, err := newBlockWriter(bw.finish(), desc, elements[len(elements)-1]) + if bitmapSize > 0 { + desc.extBitmap = make([]byte, bitmapSize) + } + bw, err := newBlockWriter(bw.finish(), desc, elements[len(elements)-1], bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct block writer %v", err) } @@ -234,7 +262,7 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { newTail := elements[i-1] // Ensure the element can still be queried with no issue - br, err := newBlockReader(bw.finish()) + br, err := newBlockReader(bw.finish(), bitmapSize != 0) if err != nil { t.Fatalf("Failed to construct the block reader, %v", err) } @@ -266,29 +294,60 @@ func TestBlcokWriterDeleteWithData(t *testing.T) { } func TestCorruptedIndexBlock(t *testing.T) { - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, 0), 0, false) var maxElem uint64 for i := 0; i < 10; i++ { - bw.append(uint64(i + 1)) + bw.append(uint64(i+1), nil) maxElem = uint64(i + 1) } buf := bw.finish() // Mutate the buffer manually buf[len(buf)-1]++ - _, err := newBlockWriter(buf, newIndexBlockDesc(0), maxElem) + _, err := newBlockWriter(buf, newIndexBlockDesc(0, 0), maxElem, false) if err == nil { t.Fatal("Corrupted index block data is not detected") } } // BenchmarkParseIndexBlock benchmarks the performance of parseIndexBlock. +// +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/triedb/pathdb +// cpu: Apple M1 Pro +// BenchmarkParseIndexBlock +// BenchmarkParseIndexBlock-8 35829495 34.16 ns/op func BenchmarkParseIndexBlock(b *testing.B) { // Generate a realistic index block blob - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, 0), 0, false) for i := 0; i < 4096; i++ { - bw.append(uint64(i * 2)) + bw.append(uint64(i*2), nil) + } + blob := bw.finish() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := parseIndexBlock(blob) + if err != nil { + b.Fatalf("parseIndexBlock failed: %v", err) + } + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/triedb/pathdb +// cpu: Apple M1 Pro +// BenchmarkParseIndexBlockWithExt +// BenchmarkParseIndexBlockWithExt-8 35773242 33.72 ns/op +func BenchmarkParseIndexBlockWithExt(b *testing.B) { + // Generate a realistic index block blob + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, 34), 0, true) + for i := 0; i < 4096; i++ { + id, ext := uint64(i*2), randomExt(34, 3) + bw.append(id, ext) } blob := bw.finish() @@ -302,21 +361,58 @@ func BenchmarkParseIndexBlock(b *testing.B) { } // BenchmarkBlockWriterAppend benchmarks the performance of indexblock.writer +// +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/triedb/pathdb +// cpu: Apple M1 Pro +// BenchmarkBlockWriterAppend +// BenchmarkBlockWriterAppend-8 293611083 4.113 ns/op 3 B/op 0 allocs/op func BenchmarkBlockWriterAppend(b *testing.B) { b.ReportAllocs() b.ResetTimer() var blockID uint32 - desc := newIndexBlockDesc(blockID) - writer, _ := newBlockWriter(nil, desc, 0) + desc := newIndexBlockDesc(blockID, 0) + writer, _ := newBlockWriter(nil, desc, 0, false) for i := 0; i < b.N; i++ { - if writer.full() { + if writer.estimateFull(nil) { blockID += 1 - desc = newIndexBlockDesc(blockID) - writer, _ = newBlockWriter(nil, desc, 0) + desc = newIndexBlockDesc(blockID, 0) + writer, _ = newBlockWriter(nil, desc, 0, false) } - if err := writer.append(writer.desc.max + 1); err != nil { + if err := writer.append(writer.desc.max+1, nil); err != nil { + b.Error(err) + } + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/triedb/pathdb +// cpu: Apple M1 Pro +// BenchmarkBlockWriterAppendWithExt +// BenchmarkBlockWriterAppendWithExt-8 11123844 103.6 ns/op 42 B/op 2 allocs/op +func BenchmarkBlockWriterAppendWithExt(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + var ( + bitmapSize = 34 + blockID uint32 + ) + desc := newIndexBlockDesc(blockID, bitmapSize) + writer, _ := newBlockWriter(nil, desc, 0, true) + + for i := 0; i < b.N; i++ { + ext := randomExt(bitmapSize, 3) + if writer.estimateFull(ext) { + blockID += 1 + desc = newIndexBlockDesc(blockID, bitmapSize) + writer, _ = newBlockWriter(nil, desc, 0, true) + } + if err := writer.append(writer.desc.max+1, ext); err != nil { b.Error(err) } } diff --git a/triedb/pathdb/history_index_iterator.go b/triedb/pathdb/history_index_iterator.go index 1ccb39ad09..076baaa9e5 100644 --- a/triedb/pathdb/history_index_iterator.go +++ b/triedb/pathdb/history_index_iterator.go @@ -40,31 +40,133 @@ type HistoryIndexIterator interface { Error() error } +// extFilter provides utilities for filtering index entries based on their +// extension field. +// +// It supports two primary operations: +// +// - determine whether a given target node ID or any of its descendants +// appears explicitly in the extension list. +// +// - determine whether a given target node ID or any of its descendants +// is marked in the extension bitmap. +// +// Together, these checks allow callers to efficiently filter out the irrelevant +// index entries during the lookup. +type extFilter uint16 + +// exists takes the entire extension field in the index block and determines +// whether the target ID or its descendants appears. Note, any of descendant +// can implicitly mean the presence of ancestor. +func (f extFilter) exists(ext []byte) (bool, error) { + fn := uint16(f) + list, err := decodeIDs(ext) + if err != nil { + return false, err + } + for _, elem := range list { + if elem == fn { + return true, nil + } + if isAncestor(fn, elem) { + return true, nil + } + } + return false, nil +} + +const ( + // bitmapBytesTwoLevels is the size of the bitmap for two levels of the + // 16-ary tree (16 nodes total, excluding the root). + bitmapBytesTwoLevels = 2 + + // bitmapBytesThreeLevels is the size of the bitmap for three levels of + // the 16-ary tree (272 nodes total, excluding the root). + bitmapBytesThreeLevels = 34 + + // bitmapElementThresholdTwoLevels is the total number of elements in the + // two levels of a 16-ary tree (16 nodes total, excluding the root). + bitmapElementThresholdTwoLevels = 16 + + // bitmapElementThresholdThreeLevels is the total number of elements in the + // two levels of a 16-ary tree (16 nodes total, excluding the root). + bitmapElementThresholdThreeLevels = bitmapElementThresholdTwoLevels + 16*16 +) + +// contains takes the bitmap from the block metadata and determines whether the +// target ID or its descendants is marked in the bitmap. Note, any of descendant +// can implicitly mean the presence of ancestor. +func (f extFilter) contains(bitmap []byte) (bool, error) { + id := int(f) + if id == 0 { + return true, nil + } + n := id - 1 // apply the position shift for excluding root node + + switch len(bitmap) { + case 0: + // Bitmap is not available, return "false positive" + return true, nil + case bitmapBytesTwoLevels: + // Bitmap for 2-level trie with at most 16 elements inside + if n >= bitmapElementThresholdTwoLevels { + return false, fmt.Errorf("invalid extension filter %d for 2 bytes bitmap", id) + } + return isBitSet(bitmap, n), nil + case bitmapBytesThreeLevels: + // Bitmap for 3-level trie with at most 16+16*16 elements inside + if n >= bitmapElementThresholdThreeLevels { + return false, fmt.Errorf("invalid extension filter %d for 34 bytes bitmap", id) + } else if n >= bitmapElementThresholdTwoLevels { + return isBitSet(bitmap, n), nil + } else { + // Check the element itself first + if isBitSet(bitmap, n) { + return true, nil + } + // Check descendants: the presence of any descendant implicitly + // represents a mutation of its ancestor. + return bitmap[2+2*n] != 0 || bitmap[3+2*n] != 0, nil + } + default: + return false, fmt.Errorf("unsupported bitmap size %d", len(bitmap)) + } +} + // blockIterator is the iterator to traverse the indices within a single block. type blockIterator struct { // immutable fields data []byte // Reference to the data segment within the block reader restarts []uint16 // Offsets pointing to the restart sections within the data + hasExt bool // Flag whether the extension is included in the data + + // Optional extension filter + filter *extFilter // Filters index entries based on the extension field. // mutable fields id uint64 // ID of the element at the iterators current position + ext []byte // Extension field of the element at the iterators current position dataPtr int // Current read position within the data slice restartPtr int // Index of the restart section where the iterator is currently positioned exhausted bool // Flag whether the iterator has been exhausted err error // Accumulated error during the traversal } -func newBlockIterator(data []byte, restarts []uint16) *blockIterator { +func (br *blockReader) newIterator(filter *extFilter) *blockIterator { it := &blockIterator{ - data: data, // hold the slice directly with no deep copy - restarts: restarts, // hold the slice directly with no deep copy + data: br.data, // hold the slice directly with no deep copy + restarts: br.restarts, // hold the slice directly with no deep copy + hasExt: br.hasExt, // flag whether the extension should be resolved + filter: filter, // optional extension filter } it.reset() return it } -func (it *blockIterator) set(dataPtr int, restartPtr int, id uint64) { +func (it *blockIterator) set(dataPtr int, restartPtr int, id uint64, ext []byte) { it.id = id + it.ext = ext + it.dataPtr = dataPtr it.restartPtr = restartPtr it.exhausted = dataPtr == len(it.data) @@ -79,6 +181,8 @@ func (it *blockIterator) setErr(err error) { func (it *blockIterator) reset() { it.id = 0 + it.ext = nil + it.dataPtr = -1 it.restartPtr = -1 it.exhausted = false @@ -90,12 +194,26 @@ func (it *blockIterator) reset() { } } -// SeekGT moves the iterator to the first element whose id is greater than the +func (it *blockIterator) resolveExt(pos int) ([]byte, int, error) { + if !it.hasExt { + return nil, 0, nil + } + length, n := binary.Uvarint(it.data[pos:]) + if n <= 0 { + return nil, 0, fmt.Errorf("too short for extension, pos: %d, datalen: %d", pos, len(it.data)) + } + if len(it.data[pos+n:]) < int(length) { + return nil, 0, fmt.Errorf("too short for extension, pos: %d, length: %d, datalen: %d", pos, length, len(it.data)) + } + return it.data[pos+n : pos+n+int(length)], n + int(length), nil +} + +// seekGT moves the iterator to the first element whose id is greater than the // given number. It returns whether such element exists. // // Note, this operation will unset the exhausted status and subsequent traversal // is allowed. -func (it *blockIterator) SeekGT(id uint64) bool { +func (it *blockIterator) seekGT(id uint64) bool { if it.err != nil { return false } @@ -112,11 +230,20 @@ func (it *blockIterator) SeekGT(id uint64) bool { return false } if index == 0 { - item, n := binary.Uvarint(it.data[it.restarts[0]:]) + pos := int(it.restarts[0]) + item, n := binary.Uvarint(it.data[pos:]) + if n <= 0 { + it.setErr(fmt.Errorf("failed to decode item at pos %d", it.restarts[0])) + return false + } + pos = pos + n - // If the restart size is 1, then the restart pointer shouldn't be 0. - // It's not practical and should be denied in the first place. - it.set(int(it.restarts[0])+n, 0, item) + ext, shift, err := it.resolveExt(pos) + if err != nil { + it.setErr(err) + return false + } + it.set(pos+shift, 0, item, ext) return true } var ( @@ -154,11 +281,18 @@ func (it *blockIterator) SeekGT(id uint64) bool { } pos += n + ext, shift, err := it.resolveExt(pos) + if err != nil { + it.setErr(err) + return false + } + pos += shift + if result > id { if pos == limit { - it.set(pos, restartIndex+1, result) + it.set(pos, restartIndex+1, result, ext) } else { - it.set(pos, restartIndex, result) + it.set(pos, restartIndex, result, ext) } return true } @@ -170,8 +304,45 @@ func (it *blockIterator) SeekGT(id uint64) bool { } // The element which is the first one greater than the specified id // is exactly the one located at the restart point. - item, n := binary.Uvarint(it.data[it.restarts[index]:]) - it.set(int(it.restarts[index])+n, index, item) + pos = int(it.restarts[index]) + item, n := binary.Uvarint(it.data[pos:]) + if n <= 0 { + it.setErr(fmt.Errorf("failed to decode item at pos %d", it.restarts[index])) + return false + } + pos = pos + n + + ext, shift, err := it.resolveExt(pos) + if err != nil { + it.setErr(err) + return false + } + it.set(pos+shift, index, item, ext) + return true +} + +// SeekGT implements HistoryIndexIterator, is the wrapper of the seekGT with +// optional extension filter logic applied. +func (it *blockIterator) SeekGT(id uint64) bool { + if !it.seekGT(id) { + return false + } + if it.filter == nil { + return true + } + for { + found, err := it.filter.exists(it.ext) + if err != nil { + it.setErr(err) + return false + } + if found { + break + } + if !it.next() { + return false + } + } return true } @@ -183,10 +354,9 @@ func (it *blockIterator) init() { it.restartPtr = 0 } -// Next implements the HistoryIndexIterator, moving the iterator to the next -// element. If the iterator has been exhausted, and boolean with false should -// be returned. -func (it *blockIterator) Next() bool { +// next moves the iterator to the next element. If the iterator has been exhausted, +// and boolean with false should be returned. +func (it *blockIterator) next() bool { if it.exhausted || it.err != nil { return false } @@ -198,7 +368,6 @@ func (it *blockIterator) Next() bool { it.setErr(fmt.Errorf("failed to decode item at pos %d", it.dataPtr)) return false } - var val uint64 if it.dataPtr == int(it.restarts[it.restartPtr]) { val = v @@ -206,16 +375,48 @@ func (it *blockIterator) Next() bool { val = it.id + v } + // Decode the extension field + ext, shift, err := it.resolveExt(it.dataPtr + n) + if err != nil { + it.setErr(err) + return false + } + // Move to the next restart section if the data pointer crosses the boundary nextRestartPtr := it.restartPtr - if it.restartPtr < len(it.restarts)-1 && it.dataPtr+n == int(it.restarts[it.restartPtr+1]) { + if it.restartPtr < len(it.restarts)-1 && it.dataPtr+n+shift == int(it.restarts[it.restartPtr+1]) { nextRestartPtr = it.restartPtr + 1 } - it.set(it.dataPtr+n, nextRestartPtr, val) + it.set(it.dataPtr+n+shift, nextRestartPtr, val, ext) return true } +// Next implements the HistoryIndexIterator, moving the iterator to the next +// element. It's a wrapper of next with optional extension filter logic applied. +func (it *blockIterator) Next() bool { + if !it.next() { + return false + } + if it.filter == nil { + return true + } + for { + found, err := it.filter.exists(it.ext) + if err != nil { + it.setErr(err) + return false + } + if found { + break + } + if !it.next() { + return false + } + } + return true +} + // ID implements HistoryIndexIterator, returning the id of the element where the // iterator is positioned at. func (it *blockIterator) ID() uint64 { @@ -226,15 +427,15 @@ func (it *blockIterator) ID() uint64 { // Exhausting all the elements is not considered to be an error. func (it *blockIterator) Error() error { return it.err } -// blockLoader defines the method to retrieve the specific block for reading. -type blockLoader func(id uint32) (*blockReader, error) - // indexIterator is an iterator to traverse the history indices belonging to the // specific state entry. type indexIterator struct { // immutable fields descList []*indexBlockDesc - loader blockLoader + reader *indexReader + + // Optional extension filter + filter *extFilter // mutable fields blockIt *blockIterator @@ -243,10 +444,26 @@ type indexIterator struct { err error } -func newIndexIterator(descList []*indexBlockDesc, loader blockLoader) *indexIterator { +// newBlockIter initializes the block iterator with the specified block ID. +func (r *indexReader) newBlockIter(id uint32, filter *extFilter) (*blockIterator, error) { + br, ok := r.readers[id] + if !ok { + var err error + br, err = newBlockReader(readStateIndexBlock(r.state, r.db, id), r.bitmapSize != 0) + if err != nil { + return nil, err + } + r.readers[id] = br + } + return br.newIterator(filter), nil +} + +// newIterator initializes the index iterator with the specified extension filter. +func (r *indexReader) newIterator(filter *extFilter) *indexIterator { it := &indexIterator{ - descList: descList, - loader: loader, + descList: r.descList, + reader: r, + filter: filter, } it.reset() return it @@ -271,16 +488,32 @@ func (it *indexIterator) reset() { } func (it *indexIterator) open(blockPtr int) error { - id := it.descList[blockPtr].id - br, err := it.loader(id) + blockIt, err := it.reader.newBlockIter(it.descList[blockPtr].id, it.filter) if err != nil { return err } - it.blockIt = newBlockIterator(br.data, br.restarts) + it.blockIt = blockIt it.blockPtr = blockPtr return nil } +func (it *indexIterator) applyFilter(index int) (int, error) { + if it.filter == nil { + return index, nil + } + for index < len(it.descList) { + found, err := it.filter.contains(it.descList[index].extBitmap) + if err != nil { + return 0, err + } + if found { + break + } + index++ + } + return index, nil +} + // SeekGT moves the iterator to the first element whose id is greater than the // given number. It returns whether such element exists. // @@ -293,6 +526,11 @@ func (it *indexIterator) SeekGT(id uint64) bool { index := sort.Search(len(it.descList), func(i int) bool { return id < it.descList[i].max }) + index, err := it.applyFilter(index) + if err != nil { + it.setErr(err) + return false + } if index == len(it.descList) { return false } @@ -304,7 +542,13 @@ func (it *indexIterator) SeekGT(id uint64) bool { return false } } - return it.blockIt.SeekGT(id) + // Terminate if the element which is greater than the id can be found in the + // last block; otherwise move to the next block. It may happen that all the + // target elements in this block are all less than id. + if it.blockIt.SeekGT(id) { + return true + } + return it.Next() } func (it *indexIterator) init() error { @@ -325,15 +569,23 @@ func (it *indexIterator) Next() bool { it.setErr(err) return false } - if it.blockIt.Next() { return true } - if it.blockPtr == len(it.descList)-1 { + it.blockPtr++ + + index, err := it.applyFilter(it.blockPtr) + if err != nil { + it.setErr(err) + return false + } + it.blockPtr = index + + if it.blockPtr == len(it.descList) { it.exhausted = true return false } - if err := it.open(it.blockPtr + 1); err != nil { + if err := it.open(it.blockPtr); err != nil { it.setErr(err) return false } diff --git a/triedb/pathdb/history_index_iterator_test.go b/triedb/pathdb/history_index_iterator_test.go index f0dd3fee4a..8b7591ce26 100644 --- a/triedb/pathdb/history_index_iterator_test.go +++ b/triedb/pathdb/history_index_iterator_test.go @@ -19,7 +19,9 @@ package pathdb import ( "errors" "fmt" + "maps" "math/rand" + "slices" "sort" "testing" @@ -28,12 +30,30 @@ import ( "github.com/ethereum/go-ethereum/ethdb" ) -func makeTestIndexBlock(count int) ([]byte, []uint64) { +func checkExt(f *extFilter, ext []uint16) bool { + if f == nil { + return true + } + fn := uint16(*f) + + for _, n := range ext { + if n == fn { + return true + } + if isAncestor(fn, n) { + return true + } + } + return false +} + +func makeTestIndexBlock(count int, bitmapSize int) ([]byte, []uint64, [][]uint16) { var ( marks = make(map[uint64]bool) - elements []uint64 + elements = make([]uint64, 0, count) + extList = make([][]uint16, 0, count) ) - bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0) + bw, _ := newBlockWriter(nil, newIndexBlockDesc(0, bitmapSize), 0, bitmapSize != 0) for i := 0; i < count; i++ { n := uint64(rand.Uint32()) if marks[n] { @@ -45,17 +65,20 @@ func makeTestIndexBlock(count int) ([]byte, []uint64) { sort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] }) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + ext := randomExt(bitmapSize, 5) + extList = append(extList, ext) + bw.append(elements[i], ext) } data := bw.finish() - return data, elements + return data, elements, extList } -func makeTestIndexBlocks(db ethdb.KeyValueStore, stateIdent stateIdent, count int) []uint64 { +func makeTestIndexBlocks(db ethdb.KeyValueStore, stateIdent stateIdent, count int, bitmapSize int) ([]uint64, [][]uint16) { var ( marks = make(map[uint64]bool) elements []uint64 + extList [][]uint16 ) for i := 0; i < count; i++ { n := uint64(rand.Uint32()) @@ -67,15 +90,17 @@ func makeTestIndexBlocks(db ethdb.KeyValueStore, stateIdent stateIdent, count in } sort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] }) - iw, _ := newIndexWriter(db, stateIdent, 0) + iw, _ := newIndexWriter(db, stateIdent, 0, bitmapSize) for i := 0; i < len(elements); i++ { - iw.append(elements[i]) + ext := randomExt(bitmapSize, 5) + extList = append(extList, ext) + iw.append(elements[i], ext) } batch := db.NewBatch() iw.finish(batch) batch.Write() - return elements + return elements, extList } func checkSeekGT(it HistoryIndexIterator, input uint64, exp bool, expVal uint64) error { @@ -113,43 +138,40 @@ func checkNext(it HistoryIndexIterator, values []uint64) error { return it.Error() } -func TestBlockIteratorSeekGT(t *testing.T) { - /* 0-size index block is not allowed - - data, elements := makeTestIndexBlock(0) - testBlockIterator(t, data, elements) - */ - - data, elements := makeTestIndexBlock(1) - testBlockIterator(t, data, elements) - - data, elements = makeTestIndexBlock(indexBlockRestartLen) - testBlockIterator(t, data, elements) - - data, elements = makeTestIndexBlock(3 * indexBlockRestartLen) - testBlockIterator(t, data, elements) - - data, elements = makeTestIndexBlock(indexBlockEntriesCap) - testBlockIterator(t, data, elements) -} - -func testBlockIterator(t *testing.T, data []byte, elements []uint64) { - br, err := newBlockReader(data) - if err != nil { - t.Fatalf("Failed to open the block for reading, %v", err) +func verifySeekGT(t *testing.T, elements []uint64, ext [][]uint16, newIter func(filter *extFilter) HistoryIndexIterator) { + set := make(map[extFilter]bool) + for _, extList := range ext { + for _, f := range extList { + set[extFilter(f)] = true + } } - it := newBlockIterator(br.data, br.restarts) + filters := slices.Collect(maps.Keys(set)) for i := 0; i < 128; i++ { + var filter *extFilter + if rand.Intn(2) == 0 && len(filters) > 0 { + filter = &filters[rand.Intn(len(filters))] + } else { + filter = nil + } + var input uint64 if rand.Intn(2) == 0 { input = elements[rand.Intn(len(elements))] } else { input = uint64(rand.Uint32()) } + index := sort.Search(len(elements), func(i int) bool { return elements[i] > input }) + for index < len(elements) { + if checkExt(filter, ext[index]) { + break + } + index++ + } + var ( exp bool expVal uint64 @@ -160,10 +182,17 @@ func testBlockIterator(t *testing.T, data []byte, elements []uint64) { } else { exp = true expVal = elements[index] - if index < len(elements) { - remains = elements[index+1:] + + index++ + for index < len(elements) { + if checkExt(filter, ext[index]) { + remains = append(remains, elements[index]) + } + index++ } } + + it := newIter(filter) if err := checkSeekGT(it, input, exp, expVal); err != nil { t.Fatal(err) } @@ -175,62 +204,71 @@ func testBlockIterator(t *testing.T, data []byte, elements []uint64) { } } +func verifyTraversal(t *testing.T, elements []uint64, ext [][]uint16, newIter func(filter *extFilter) HistoryIndexIterator) { + set := make(map[extFilter]bool) + for _, extList := range ext { + for _, f := range extList { + set[extFilter(f)] = true + } + } + filters := slices.Collect(maps.Keys(set)) + + for i := 0; i < 16; i++ { + var filter *extFilter + if len(filters) > 0 { + filter = &filters[rand.Intn(len(filters))] + } else { + filter = nil + } + it := newIter(filter) + + var ( + pos int + exp []uint64 + ) + for pos < len(elements) { + if checkExt(filter, ext[pos]) { + exp = append(exp, elements[pos]) + } + pos++ + } + if err := checkNext(it, exp); err != nil { + t.Fatal(err) + } + } +} + +func TestBlockIteratorSeekGT(t *testing.T) { + for _, size := range []int{0, 2, 34} { + for _, n := range []int{1, indexBlockRestartLen, 3 * indexBlockRestartLen} { + data, elements, ext := makeTestIndexBlock(n, size) + + verifySeekGT(t, elements, ext, func(filter *extFilter) HistoryIndexIterator { + br, err := newBlockReader(data, size != 0) + if err != nil { + t.Fatalf("Failed to open the block for reading, %v", err) + } + return br.newIterator(filter) + }) + } + } +} + func TestIndexIteratorSeekGT(t *testing.T) { ident := newAccountIdent(common.Hash{0x1}) - dbA := rawdb.NewMemoryDatabase() - testIndexIterator(t, ident, dbA, makeTestIndexBlocks(dbA, ident, 1)) + for _, size := range []int{0, 2, 34} { + for _, n := range []int{1, 4096, 3 * 4096} { + db := rawdb.NewMemoryDatabase() + elements, ext := makeTestIndexBlocks(db, ident, n, size) - dbB := rawdb.NewMemoryDatabase() - testIndexIterator(t, ident, dbB, makeTestIndexBlocks(dbB, ident, 3*indexBlockEntriesCap)) - - dbC := rawdb.NewMemoryDatabase() - testIndexIterator(t, ident, dbC, makeTestIndexBlocks(dbC, ident, indexBlockEntriesCap-1)) - - dbD := rawdb.NewMemoryDatabase() - testIndexIterator(t, ident, dbD, makeTestIndexBlocks(dbD, ident, indexBlockEntriesCap+1)) -} - -func testIndexIterator(t *testing.T, stateIdent stateIdent, db ethdb.Database, elements []uint64) { - ir, err := newIndexReader(db, stateIdent) - if err != nil { - t.Fatalf("Failed to open the index reader, %v", err) - } - it := newIndexIterator(ir.descList, func(id uint32) (*blockReader, error) { - return newBlockReader(readStateIndexBlock(stateIdent, db, id)) - }) - - for i := 0; i < 128; i++ { - var input uint64 - if rand.Intn(2) == 0 { - input = elements[rand.Intn(len(elements))] - } else { - input = uint64(rand.Uint32()) - } - index := sort.Search(len(elements), func(i int) bool { - return elements[i] > input - }) - var ( - exp bool - expVal uint64 - remains []uint64 - ) - if index == len(elements) { - exp = false - } else { - exp = true - expVal = elements[index] - if index < len(elements) { - remains = elements[index+1:] - } - } - if err := checkSeekGT(it, input, exp, expVal); err != nil { - t.Fatal(err) - } - if exp { - if err := checkNext(it, remains); err != nil { - t.Fatal(err) - } + verifySeekGT(t, elements, ext, func(filter *extFilter) HistoryIndexIterator { + ir, err := newIndexReader(db, ident, size) + if err != nil { + t.Fatalf("Failed to open the index reader, %v", err) + } + return ir.newIterator(filter) + }) } } } @@ -242,56 +280,36 @@ func TestBlockIteratorTraversal(t *testing.T) { testBlockIterator(t, data, elements) */ - data, elements := makeTestIndexBlock(1) - testBlockIteratorTraversal(t, data, elements) + for _, size := range []int{0, 2, 34} { + for _, n := range []int{1, indexBlockRestartLen, 3 * indexBlockRestartLen} { + data, elements, ext := makeTestIndexBlock(n, size) - data, elements = makeTestIndexBlock(indexBlockRestartLen) - testBlockIteratorTraversal(t, data, elements) - - data, elements = makeTestIndexBlock(3 * indexBlockRestartLen) - testBlockIteratorTraversal(t, data, elements) - - data, elements = makeTestIndexBlock(indexBlockEntriesCap) - testBlockIteratorTraversal(t, data, elements) -} - -func testBlockIteratorTraversal(t *testing.T, data []byte, elements []uint64) { - br, err := newBlockReader(data) - if err != nil { - t.Fatalf("Failed to open the block for reading, %v", err) - } - it := newBlockIterator(br.data, br.restarts) - - if err := checkNext(it, elements); err != nil { - t.Fatal(err) + verifyTraversal(t, elements, ext, func(filter *extFilter) HistoryIndexIterator { + br, err := newBlockReader(data, size != 0) + if err != nil { + t.Fatalf("Failed to open the block for reading, %v", err) + } + return br.newIterator(filter) + }) + } } } func TestIndexIteratorTraversal(t *testing.T) { ident := newAccountIdent(common.Hash{0x1}) - dbA := rawdb.NewMemoryDatabase() - testIndexIteratorTraversal(t, ident, dbA, makeTestIndexBlocks(dbA, ident, 1)) + for _, size := range []int{0, 2, 34} { + for _, n := range []int{1, 4096, 3 * 4096} { + db := rawdb.NewMemoryDatabase() + elements, ext := makeTestIndexBlocks(db, ident, n, size) - dbB := rawdb.NewMemoryDatabase() - testIndexIteratorTraversal(t, ident, dbB, makeTestIndexBlocks(dbB, ident, 3*indexBlockEntriesCap)) - - dbC := rawdb.NewMemoryDatabase() - testIndexIteratorTraversal(t, ident, dbC, makeTestIndexBlocks(dbC, ident, indexBlockEntriesCap-1)) - - dbD := rawdb.NewMemoryDatabase() - testIndexIteratorTraversal(t, ident, dbD, makeTestIndexBlocks(dbD, ident, indexBlockEntriesCap+1)) -} - -func testIndexIteratorTraversal(t *testing.T, stateIdent stateIdent, db ethdb.KeyValueReader, elements []uint64) { - ir, err := newIndexReader(db, stateIdent) - if err != nil { - t.Fatalf("Failed to open the index reader, %v", err) - } - it := newIndexIterator(ir.descList, func(id uint32) (*blockReader, error) { - return newBlockReader(readStateIndexBlock(stateIdent, db, id)) - }) - if err := checkNext(it, elements); err != nil { - t.Fatal(err) + verifyTraversal(t, elements, ext, func(filter *extFilter) HistoryIndexIterator { + ir, err := newIndexReader(db, ident, size) + if err != nil { + t.Fatalf("Failed to open the index reader, %v", err) + } + return ir.newIterator(filter) + }) + } } } diff --git a/triedb/pathdb/history_index_test.go b/triedb/pathdb/history_index_test.go index 42cb04b001..2644db46b5 100644 --- a/triedb/pathdb/history_index_test.go +++ b/triedb/pathdb/history_index_test.go @@ -29,19 +29,25 @@ import ( ) func TestIndexReaderBasic(t *testing.T) { + testIndexReaderBasic(t, 0) + testIndexReaderBasic(t, 2) + testIndexReaderBasic(t, 34) +} + +func testIndexReaderBasic(t *testing.T, bitmapSize int) { elements := []uint64{ 1, 5, 10, 11, 20, } db := rawdb.NewMemoryDatabase() - bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + bw.append(elements[i], randomExt(bitmapSize, 5)) } batch := db.NewBatch() bw.finish(batch) batch.Write() - br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa})) + br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa}), bitmapSize) if err != nil { t.Fatalf("Failed to construct the index reader, %v", err) } @@ -68,22 +74,28 @@ func TestIndexReaderBasic(t *testing.T) { } func TestIndexReaderLarge(t *testing.T) { + testIndexReaderLarge(t, 0) + testIndexReaderLarge(t, 2) + testIndexReaderLarge(t, 34) +} + +func testIndexReaderLarge(t *testing.T, bitmapSize int) { var elements []uint64 - for i := 0; i < 10*indexBlockEntriesCap; i++ { + for i := 0; i < 10*4096; i++ { elements = append(elements, rand.Uint64()) } slices.Sort(elements) db := rawdb.NewMemoryDatabase() - bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) for i := 0; i < len(elements); i++ { - bw.append(elements[i]) + bw.append(elements[i], randomExt(bitmapSize, 5)) } batch := db.NewBatch() bw.finish(batch) batch.Write() - br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa})) + br, err := newIndexReader(db, newAccountIdent(common.Hash{0xa}), bitmapSize) if err != nil { t.Fatalf("Failed to construct the index reader, %v", err) } @@ -107,7 +119,7 @@ func TestIndexReaderLarge(t *testing.T) { } func TestEmptyIndexReader(t *testing.T) { - br, err := newIndexReader(rawdb.NewMemoryDatabase(), newAccountIdent(common.Hash{0xa})) + br, err := newIndexReader(rawdb.NewMemoryDatabase(), newAccountIdent(common.Hash{0xa}), 0) if err != nil { t.Fatalf("Failed to construct the index reader, %v", err) } @@ -121,27 +133,33 @@ func TestEmptyIndexReader(t *testing.T) { } func TestIndexWriterBasic(t *testing.T) { + testIndexWriterBasic(t, 0) + testIndexWriterBasic(t, 2) + testIndexWriterBasic(t, 34) +} + +func testIndexWriterBasic(t *testing.T, bitmapSize int) { db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) - iw.append(2) - if err := iw.append(1); err == nil { + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) + iw.append(2, randomExt(bitmapSize, 5)) + if err := iw.append(1, randomExt(bitmapSize, 5)); err == nil { t.Fatal("out-of-order insertion is not expected") } var maxElem uint64 for i := 0; i < 10; i++ { - iw.append(uint64(i + 3)) + iw.append(uint64(i+3), randomExt(bitmapSize, 5)) maxElem = uint64(i + 3) } batch := db.NewBatch() iw.finish(batch) batch.Write() - iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), maxElem) + iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), maxElem, bitmapSize) if err != nil { t.Fatalf("Failed to construct the block writer, %v", err) } for i := 0; i < 10; i++ { - if err := iw.append(uint64(i + 100)); err != nil { + if err := iw.append(uint64(i+100), randomExt(bitmapSize, 5)); err != nil { t.Fatalf("Failed to append item, %v", err) } } @@ -149,61 +167,37 @@ func TestIndexWriterBasic(t *testing.T) { } func TestIndexWriterWithLimit(t *testing.T) { - db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + testIndexWriterWithLimit(t, 0) + testIndexWriterWithLimit(t, 2) + testIndexWriterWithLimit(t, 34) +} - var maxElem uint64 - for i := 0; i < indexBlockEntriesCap*2; i++ { - iw.append(uint64(i + 1)) - maxElem = uint64(i + 1) +func testIndexWriterWithLimit(t *testing.T, bitmapSize int) { + db := rawdb.NewMemoryDatabase() + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) + + // 200 iterations (with around 50 bytes extension) is enough to cross + // the block boundary (4096 bytes) + for i := 0; i < 200; i++ { + iw.append(uint64(i+1), randomExt(bitmapSize, 50)) } batch := db.NewBatch() iw.finish(batch) batch.Write() - suites := []struct { - limit uint64 - expMax uint64 - }{ - // nothing to truncate - { - maxElem, maxElem, - }, - // truncate the last element - { - maxElem - 1, maxElem - 1, - }, - // truncation around the block boundary - { - uint64(indexBlockEntriesCap + 1), - uint64(indexBlockEntriesCap + 1), - }, - // truncation around the block boundary - { - uint64(indexBlockEntriesCap), - uint64(indexBlockEntriesCap), - }, - { - uint64(1), uint64(1), - }, - // truncate the entire index, it's in theory invalid - { - uint64(0), uint64(0), - }, - } - for i, suite := range suites { - iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), suite.limit) + for i := 0; i < 200; i++ { + limit := uint64(i + 1) + iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), limit, bitmapSize) if err != nil { t.Fatalf("Failed to construct the index writer, %v", err) } - if iw.lastID != suite.expMax { - t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, suite.expMax) + if iw.lastID != limit { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, limit) } - // Re-fill the elements var maxElem uint64 - for elem := suite.limit + 1; elem < indexBlockEntriesCap*4; elem++ { - if err := iw.append(elem); err != nil { + for elem := limit + 1; elem < 500; elem++ { + if err := iw.append(elem, randomExt(bitmapSize, 5)); err != nil { t.Fatalf("Failed to append value %d: %v", elem, err) } maxElem = elem @@ -215,12 +209,20 @@ func TestIndexWriterWithLimit(t *testing.T) { } func TestIndexDeleterBasic(t *testing.T) { - db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + testIndexDeleterBasic(t, 0) + testIndexDeleterBasic(t, 2) + testIndexDeleterBasic(t, 34) +} +func testIndexDeleterBasic(t *testing.T, bitmapSize int) { + db := rawdb.NewMemoryDatabase() + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) + + // 200 iterations (with around 50 bytes extension) is enough to cross + // the block boundary (4096 bytes) var maxElem uint64 - for i := 0; i < indexBlockEntriesCap*4; i++ { - iw.append(uint64(i + 1)) + for i := 0; i < 200; i++ { + iw.append(uint64(i+1), randomExt(bitmapSize, 50)) maxElem = uint64(i + 1) } batch := db.NewBatch() @@ -228,11 +230,11 @@ func TestIndexDeleterBasic(t *testing.T) { batch.Write() // Delete unknown id, the request should be rejected - id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), maxElem) - if err := id.pop(indexBlockEntriesCap * 5); err == nil { + id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), maxElem, bitmapSize) + if err := id.pop(500); err == nil { t.Fatal("Expect error to occur for unknown id") } - for i := indexBlockEntriesCap * 4; i >= 1; i-- { + for i := 200; i >= 1; i-- { if err := id.pop(uint64(i)); err != nil { t.Fatalf("Unexpected error for element popping, %v", err) } @@ -243,57 +245,33 @@ func TestIndexDeleterBasic(t *testing.T) { } func TestIndexDeleterWithLimit(t *testing.T) { - db := rawdb.NewMemoryDatabase() - iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0) + testIndexDeleterWithLimit(t, 0) + testIndexDeleterWithLimit(t, 2) + testIndexDeleterWithLimit(t, 34) +} - var maxElem uint64 - for i := 0; i < indexBlockEntriesCap*2; i++ { - iw.append(uint64(i + 1)) - maxElem = uint64(i + 1) +func testIndexDeleterWithLimit(t *testing.T, bitmapSize int) { + db := rawdb.NewMemoryDatabase() + iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0, bitmapSize) + + // 200 iterations (with around 50 bytes extension) is enough to cross + // the block boundary (4096 bytes) + for i := 0; i < 200; i++ { + iw.append(uint64(i+1), randomExt(bitmapSize, 50)) } batch := db.NewBatch() iw.finish(batch) batch.Write() - suites := []struct { - limit uint64 - expMax uint64 - }{ - // nothing to truncate - { - maxElem, maxElem, - }, - // truncate the last element - { - maxElem - 1, maxElem - 1, - }, - // truncation around the block boundary - { - uint64(indexBlockEntriesCap + 1), - uint64(indexBlockEntriesCap + 1), - }, - // truncation around the block boundary - { - uint64(indexBlockEntriesCap), - uint64(indexBlockEntriesCap), - }, - { - uint64(1), uint64(1), - }, - // truncate the entire index, it's in theory invalid - { - uint64(0), uint64(0), - }, - } - for i, suite := range suites { - id, err := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), suite.limit) + for i := 0; i < 200; i++ { + limit := uint64(i + 1) + id, err := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), limit, bitmapSize) if err != nil { t.Fatalf("Failed to construct the index writer, %v", err) } - if id.lastID != suite.expMax { - t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, id.lastID, suite.expMax) + if id.lastID != limit { + t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, limit) } - // Keep removing elements for elem := id.lastID; elem > 0; elem-- { if err := id.pop(elem); err != nil { @@ -339,7 +317,7 @@ func TestBatchIndexerWrite(t *testing.T) { } } for addrHash, indexes := range accounts { - ir, _ := newIndexReader(db, newAccountIdent(addrHash)) + ir, _ := newIndexReader(db, newAccountIdent(addrHash), 0) for i := 0; i < len(indexes)-1; i++ { n, err := ir.readGreaterThan(indexes[i]) if err != nil { @@ -359,7 +337,7 @@ func TestBatchIndexerWrite(t *testing.T) { } for addrHash, slots := range storages { for slotHash, indexes := range slots { - ir, _ := newIndexReader(db, newStorageIdent(addrHash, slotHash)) + ir, _ := newIndexReader(db, newStorageIdent(addrHash, slotHash), 0) for i := 0; i < len(indexes)-1; i++ { n, err := ir.readGreaterThan(indexes[i]) if err != nil { diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index 9af7a96dc6..ddb4a293cc 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -34,7 +34,8 @@ import ( const ( // The batch size for reading state histories - historyReadBatch = 1000 + historyReadBatch = 1000 + historyIndexBatch = 8 * 1024 * 1024 // The number of state history indexes for constructing or deleting as batch stateHistoryIndexV0 = uint8(0) // initial version of state index structure stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version @@ -191,12 +192,12 @@ func (b *batchIndexer) finish(force bool) error { for ident, list := range b.index { eg.Go(func() error { if !b.delete { - iw, err := newIndexWriter(b.db, ident, indexed) + iw, err := newIndexWriter(b.db, ident, indexed, 0) if err != nil { return err } for _, n := range list { - if err := iw.append(n); err != nil { + if err := iw.append(n, nil); err != nil { return err } } @@ -204,7 +205,7 @@ func (b *batchIndexer) finish(force bool) error { iw.finish(batch) }) } else { - id, err := newIndexDeleter(b.db, ident, indexed) + id, err := newIndexDeleter(b.db, ident, indexed, 0) if err != nil { return err } diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index 1bf4cf648d..69e7d5bd22 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -40,8 +40,8 @@ type indexReaderWithLimitTag struct { } // newIndexReaderWithLimitTag constructs a index reader with indexing position. -func newIndexReaderWithLimitTag(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexReaderWithLimitTag, error) { - r, err := newIndexReader(db, state) +func newIndexReaderWithLimitTag(db ethdb.KeyValueReader, state stateIdent, limit uint64, bitmapSize int) (*indexReaderWithLimitTag, error) { + r, err := newIndexReader(db, state, bitmapSize) if err != nil { return nil, err } @@ -252,7 +252,7 @@ func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint6 // state retrieval ir, ok := r.readers[state.String()] if !ok { - ir, err = newIndexReaderWithLimitTag(r.disk, state.stateIdent, metadata.Last) + ir, err = newIndexReaderWithLimitTag(r.disk, state.stateIdent, metadata.Last, 0) if err != nil { return nil, err } diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 1004106af9..6c0c0fe8cc 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -159,17 +159,6 @@ func newTrienodeHistory(root common.Hash, parent common.Hash, block uint64, node } } -// sharedLen returns the length of the common prefix shared by a and b. -func sharedLen(a, b []byte) int { - n := min(len(a), len(b)) - for i := range n { - if a[i] != b[i] { - return i - } - } - return n -} - // typ implements the history interface, returning the historical data type held. func (h *trienodeHistory) typ() historyType { return typeTrienodeHistory @@ -219,7 +208,7 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { restarts = append(restarts, internalValOffset) prefixLen = 0 } else { - prefixLen = sharedLen(prevKey, key) + prefixLen = commonPrefixLen(prevKey, key) } value := h.nodes[owner][path] diff --git a/triedb/pathdb/history_trienode_test.go b/triedb/pathdb/history_trienode_test.go index be4740a904..0c0422f00f 100644 --- a/triedb/pathdb/history_trienode_test.go +++ b/triedb/pathdb/history_trienode_test.go @@ -580,8 +580,8 @@ func TestTrienodeHistoryReaderIterator(t *testing.T) { } } -// TestSharedLen tests the sharedLen helper function -func TestSharedLen(t *testing.T) { +// TestCommonPrefixLen tests the commonPrefixLen helper function +func TestCommonPrefixLen(t *testing.T) { tests := []struct { a, b []byte expected int @@ -610,13 +610,13 @@ func TestSharedLen(t *testing.T) { } for i, test := range tests { - result := sharedLen(test.a, test.b) + result := commonPrefixLen(test.a, test.b) if result != test.expected { t.Errorf("Test %d: sharedLen(%q, %q) = %d, expected %d", i, test.a, test.b, result, test.expected) } // Test commutativity - resultReverse := sharedLen(test.b, test.a) + resultReverse := commonPrefixLen(test.b, test.a) if result != resultReverse { t.Errorf("Test %d: sharedLen is not commutative: sharedLen(a,b)=%d, sharedLen(b,a)=%d", i, result, resultReverse) diff --git a/triedb/pathdb/history_trienode_utils.go b/triedb/pathdb/history_trienode_utils.go new file mode 100644 index 0000000000..0513343404 --- /dev/null +++ b/triedb/pathdb/history_trienode_utils.go @@ -0,0 +1,83 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "encoding/binary" + "fmt" + "slices" +) + +// commonPrefixLen returns the length of the common prefix shared by a and b. +func commonPrefixLen(a, b []byte) int { + n := min(len(a), len(b)) + for i := range n { + if a[i] != b[i] { + return i + } + } + return n +} + +// encodeIDs sorts the given list of uint16 IDs and encodes them into a +// compact byte slice using variable-length unsigned integer encoding. +func encodeIDs(ids []uint16) []byte { + slices.Sort(ids) + buf := make([]byte, 0, len(ids)) + for _, id := range ids { + buf = binary.AppendUvarint(buf, uint64(id)) + } + return buf +} + +// decodeIDs decodes a sequence of variable-length encoded uint16 IDs from the +// given byte slice and returns them as a set. +// +// Returns an error if the input buffer does not contain a complete Uvarint value. +func decodeIDs(buf []byte) ([]uint16, error) { + var res []uint16 + for len(buf) > 0 { + id, n := binary.Uvarint(buf) + if n <= 0 { + return nil, fmt.Errorf("too short for decoding node id, %v", buf) + } + buf = buf[n:] + res = append(res, uint16(id)) + } + return res, nil +} + +// isAncestor reports whether node x is the ancestor of node y. +func isAncestor(x, y uint16) bool { + for y > x { + y = (y - 1) / 16 // parentID(y) = (y - 1) / 16 + if y == x { + return true + } + } + return false +} + +// isBitSet reports whether the bit at `index` in the byte slice `b` is set. +func isBitSet(b []byte, index int) bool { + return b[index/8]&(1<<(7-index%8)) != 0 +} + +// setBit sets the bit at `index` in the byte slice `b` to 1. +func setBit(b []byte, index int) { + b[index/8] |= 1 << (7 - index%8) +} diff --git a/triedb/pathdb/history_trienode_utils_test.go b/triedb/pathdb/history_trienode_utils_test.go new file mode 100644 index 0000000000..17eabb2a98 --- /dev/null +++ b/triedb/pathdb/history_trienode_utils_test.go @@ -0,0 +1,81 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "testing" +) + +func TestIsAncestor(t *testing.T) { + suites := []struct { + x, y uint16 + want bool + }{ + {0, 1, true}, + {0, 16, true}, + {0, 17, true}, + {0, 272, true}, + + {1, 0, false}, + {1, 2, false}, + {1, 17, true}, + {1, 18, true}, + {17, 273, true}, + {1, 1, false}, + } + for _, tc := range suites { + result := isAncestor(tc.x, tc.y) + if result != tc.want { + t.Fatalf("isAncestor(%d, %d) = %v, want %v", tc.x, tc.y, result, tc.want) + } + } +} + +func TestBitmapSet(t *testing.T) { + suites := []struct { + index int + expect []byte + }{ + { + 0, []byte{0b10000000, 0x0}, + }, + { + 1, []byte{0b01000000, 0x0}, + }, + { + 7, []byte{0b00000001, 0x0}, + }, + { + 8, []byte{0b00000000, 0b10000000}, + }, + { + 15, []byte{0b00000000, 0b00000001}, + }, + } + for _, tc := range suites { + var buf [2]byte + setBit(buf[:], tc.index) + + if !bytes.Equal(buf[:], tc.expect) { + t.Fatalf("bitmap = %v, want %v", buf, tc.expect) + } + if !isBitSet(buf[:], tc.index) { + t.Fatal("bit is not set") + } + } +} From 52f998d5ec9b41115eaffd84fecb0353dc1ebb44 Mon Sep 17 00:00:00 2001 From: Madison carter Date: Thu, 8 Jan 2026 05:09:29 -0500 Subject: [PATCH 223/277] ethclient: omit nil address/topics from filter args (#33464) Fixes #33369 This omits "topics" and "addresses" from the filter when they are unspecified. It is required for interoperability with some server implementations that cannot handle `null` for these fields. --- ethclient/ethclient.go | 9 ++++++--- ethclient/types_test.go | 12 ++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 5008378da6..426194b59f 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -497,9 +497,12 @@ func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuer } func toFilterArg(q ethereum.FilterQuery) (interface{}, error) { - arg := map[string]interface{}{ - "address": q.Addresses, - "topics": q.Topics, + arg := map[string]interface{}{} + if q.Addresses != nil { + arg["address"] = q.Addresses + } + if q.Topics != nil { + arg["topics"] = q.Topics } if q.BlockHash != nil { arg["blockHash"] = *q.BlockHash diff --git a/ethclient/types_test.go b/ethclient/types_test.go index 02f9f21758..dcb9a579b7 100644 --- a/ethclient/types_test.go +++ b/ethclient/types_test.go @@ -41,6 +41,18 @@ func TestToFilterArg(t *testing.T) { output interface{} err error }{ + { + "without addresses", + ethereum.FilterQuery{ + FromBlock: big.NewInt(1), + ToBlock: big.NewInt(2), + }, + map[string]interface{}{ + "fromBlock": "0x1", + "toBlock": "0x2", + }, + nil, + }, { "without BlockHash", ethereum.FilterQuery{ From f51870e40e3888d4f0471f90f7bbb493287f3c5b Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 8 Jan 2026 21:58:02 +0800 Subject: [PATCH 224/277] rlp, trie, triedb/pathdb: compress trienode history (#32913) This pull request introduces a mechanism to compress trienode history by storing only the node diffs between consecutive versions. - For full nodes, only the modified children are recorded in the history; - For short nodes, only the modified value is stored; If the node type has changed, or if the node is newly created or deleted, the entire node value is stored instead. To mitigate the overhead of reassembling nodes from diffs during history reads, checkpoints are introduced by periodically storing full node values. The current checkpoint interval is set to every 16 mutations, though this parameter may be made configurable in the future. --- rlp/raw.go | 29 ++++ rlp/raw_test.go | 266 +++++++++++++++++++++++++++++++++ trie/node.go | 69 +++++++++ trie/node_test.go | 283 ++++++++++++++++++++++++++++++++++++ triedb/pathdb/nodes.go | 272 ++++++++++++++++++++++++++++++++++ triedb/pathdb/nodes_test.go | 48 ++++++ 6 files changed, 967 insertions(+) diff --git a/rlp/raw.go b/rlp/raw.go index cec90346a1..114037df78 100644 --- a/rlp/raw.go +++ b/rlp/raw.go @@ -152,6 +152,35 @@ func CountValues(b []byte) (int, error) { return i, nil } +// SplitListValues extracts the raw elements from the list RLP-encoding blob. +func SplitListValues(b []byte) ([][]byte, error) { + b, _, err := SplitList(b) + if err != nil { + return nil, err + } + var elements [][]byte + for len(b) > 0 { + _, tagsize, size, err := readKind(b) + if err != nil { + return nil, err + } + elements = append(elements, b[:tagsize+size]) + b = b[tagsize+size:] + } + return elements, nil +} + +// MergeListValues takes a list of raw elements and rlp-encodes them as list. +func MergeListValues(elems [][]byte) ([]byte, error) { + w := NewEncoderBuffer(nil) + offset := w.List() + for _, elem := range elems { + w.Write(elem) + } + w.ListEnd(offset) + return w.ToBytes(), nil +} + func readKind(buf []byte) (k Kind, tagsize, contentsize uint64, err error) { if len(buf) == 0 { return 0, 0, 0, io.ErrUnexpectedEOF diff --git a/rlp/raw_test.go b/rlp/raw_test.go index 7b3255eca3..2ed77b384c 100644 --- a/rlp/raw_test.go +++ b/rlp/raw_test.go @@ -336,3 +336,269 @@ func TestBytesSize(t *testing.T) { } } } + +func TestSplitListValues(t *testing.T) { + tests := []struct { + name string + input string // hex-encoded RLP list + want []string // hex-encoded expected elements + wantErr error + }{ + { + name: "empty list", + input: "C0", + want: []string{}, + }, + { + name: "single byte element", + input: "C101", + want: []string{"01"}, + }, + { + name: "single empty string", + input: "C180", + want: []string{"80"}, + }, + { + name: "two byte elements", + input: "C20102", + want: []string{"01", "02"}, + }, + { + name: "three elements", + input: "C3010203", + want: []string{"01", "02", "03"}, + }, + { + name: "mixed size elements", + input: "C80182020283030303", + want: []string{"01", "820202", "83030303"}, + }, + { + name: "string elements", + input: "C88363617483646F67", + want: []string{"83636174", "83646F67"}, // cat,dog + }, + { + name: "nested list element", + input: "C4C3010203", // [[1,2,3]] + want: []string{"C3010203"}, // [1,2,3] + }, + { + name: "multiple nested lists", + input: "C6C20102C20304", // [[1,2],[3,4]] + want: []string{"C20102", "C20304"}, // [1,2], [3,4] + }, + { + name: "large list", + input: "C6010203040506", + want: []string{"01", "02", "03", "04", "05", "06"}, + }, + { + name: "list with empty strings", + input: "C3808080", + want: []string{"80", "80", "80"}, + }, + // Error cases + { + name: "single byte", + input: "01", + wantErr: ErrExpectedList, + }, + { + name: "string", + input: "83636174", + wantErr: ErrExpectedList, + }, + { + name: "empty input", + input: "", + wantErr: io.ErrUnexpectedEOF, + }, + { + name: "invalid list - value too large", + input: "C60102030405", + wantErr: ErrValueTooLarge, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SplitListValues(unhex(tt.input)) + if !errors.Is(err, tt.wantErr) { + t.Errorf("SplitListValues() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err != nil { + return + } + if len(got) != len(tt.want) { + t.Errorf("SplitListValues() got %d elements, want %d", len(got), len(tt.want)) + return + } + for i, elem := range got { + want := unhex(tt.want[i]) + if !bytes.Equal(elem, want) { + t.Errorf("SplitListValues() element[%d] = %x, want %x", i, elem, want) + } + } + }) + } +} + +func TestMergeListValues(t *testing.T) { + tests := []struct { + name string + elems []string // hex-encoded RLP elements + want string // hex-encoded expected result + wantErr error + }{ + { + name: "empty list", + elems: []string{}, + want: "C0", + }, + { + name: "single byte element", + elems: []string{"01"}, + want: "C101", + }, + { + name: "single empty string", + elems: []string{"80"}, + want: "C180", + }, + { + name: "two byte elements", + elems: []string{"01", "02"}, + want: "C20102", + }, + { + name: "three elements", + elems: []string{"01", "02", "03"}, + want: "C3010203", + }, + { + name: "mixed size elements", + elems: []string{"01", "820202", "83030303"}, + want: "C80182020283030303", + }, + { + name: "string elements", + elems: []string{"83636174", "83646F67"}, // cat, dog + want: "C88363617483646F67", + }, + { + name: "nested list element", + elems: []string{"C20102", "03"}, // [[1, 2], 3] + want: "C4C2010203", + }, + { + name: "multiple nested lists", + elems: []string{"C20102", "C3030405"}, // [[1,2],[3,4,5]], + want: "C7C20102C3030405", + }, + { + name: "large list", + elems: []string{"01", "02", "03", "04", "05", "06"}, + want: "C6010203040506", + }, + { + name: "list with empty strings", + elems: []string{"80", "80", "80"}, + want: "C3808080", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + elems := make([][]byte, len(tt.elems)) + for i, s := range tt.elems { + elems[i] = unhex(s) + } + got, err := MergeListValues(elems) + if !errors.Is(err, tt.wantErr) { + t.Errorf("MergeListValues() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err != nil { + return + } + want := unhex(tt.want) + if !bytes.Equal(got, want) { + t.Errorf("MergeListValues() = %x, want %x", got, want) + } + }) + } +} + +func TestSplitMergeList(t *testing.T) { + tests := []struct { + name string + input string // hex-encoded RLP list + }{ + { + name: "empty list", + input: "C0", + }, + { + name: "single byte element", + input: "C101", + }, + { + name: "two byte elements", + input: "C20102", + }, + { + name: "three elements", + input: "C3010203", + }, + { + name: "mixed size elements", + input: "C80182020283030303", + }, + { + name: "string elements", + input: "C88363617483646F67", // [cat, dog] + }, + { + name: "nested list element", + input: "C4C2010203", // [[1,2],3] + }, + { + name: "multiple nested lists", + input: "C6C20102C20304", // [[1,2],[3,4]] + }, + { + name: "large list", + input: "C6010203040506", // [1,2,3,4,5,6] + }, + { + name: "list with empty strings", + input: "C3808080", // ["", "", ""] + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + original := unhex(tt.input) + + // Split the list + elements, err := SplitListValues(original) + if err != nil { + t.Fatalf("SplitListValues() error = %v", err) + } + + // Merge back + merged, err := MergeListValues(elements) + if err != nil { + t.Fatalf("MergeListValues() error = %v", err) + } + + // The merged result should match the original + if !bytes.Equal(merged, original) { + t.Errorf("Round trip failed: original = %x, merged = %x", original, merged) + } + }) + } +} diff --git a/trie/node.go b/trie/node.go index 74fac4fd4e..3f14f07d63 100644 --- a/trie/node.go +++ b/trie/node.go @@ -17,6 +17,7 @@ package trie import ( + "bytes" "fmt" "io" "strings" @@ -242,6 +243,74 @@ func decodeRef(buf []byte) (node, []byte, error) { } } +// decodeNodeElements parses the RLP encoding of a trie node and returns all the +// elements in raw byte format. +// +// For full node, it returns a slice of 17 elements; +// For short node, it returns a slice of 2 elements; +func decodeNodeElements(buf []byte) ([][]byte, error) { + if len(buf) == 0 { + return nil, io.ErrUnexpectedEOF + } + return rlp.SplitListValues(buf) +} + +// encodeNodeElements encodes the provided node elements into a rlp list. +func encodeNodeElements(elements [][]byte) ([]byte, error) { + if len(elements) != 2 && len(elements) != 17 { + return nil, fmt.Errorf("invalid number of elements: %d", len(elements)) + } + return rlp.MergeListValues(elements) +} + +// NodeDifference accepts two RLP-encoding nodes and figures out the difference +// between them. +// +// An error is returned if any of the provided blob is nil, or the type of nodes +// are different. +func NodeDifference(oldvalue []byte, newvalue []byte) (int, []int, [][]byte, error) { + oldElems, err := decodeNodeElements(oldvalue) + if err != nil { + return 0, nil, nil, err + } + newElems, err := decodeNodeElements(newvalue) + if err != nil { + return 0, nil, nil, err + } + if len(oldElems) != len(newElems) { + return 0, nil, nil, fmt.Errorf("different node type, old elements: %d, new elements: %d", len(oldElems), len(newElems)) + } + var ( + indices = make([]int, 0, len(oldElems)) + diff = make([][]byte, 0, len(oldElems)) + ) + for i := 0; i < len(oldElems); i++ { + if !bytes.Equal(oldElems[i], newElems[i]) { + indices = append(indices, i) + diff = append(diff, oldElems[i]) + } + } + return len(oldElems), indices, diff, nil +} + +// ReassembleNode accepts a RLP-encoding node along with a set of mutations, +// applying the modification diffs according to the indices and re-assemble. +func ReassembleNode(blob []byte, mutations [][][]byte, indices [][]int) ([]byte, error) { + if len(mutations) == 0 && len(indices) == 0 { + return blob, nil + } + elements, err := decodeNodeElements(blob) + if err != nil { + return nil, err + } + for i := 0; i < len(mutations); i++ { + for j, pos := range indices[i] { + elements[pos] = mutations[i][j] + } + } + return encodeNodeElements(elements) +} + // wraps a decoding error with information about the path to the // invalid child node (for debugging encoding issues). type decodeError struct { diff --git a/trie/node_test.go b/trie/node_test.go index 9b8b33748f..875f6e38dc 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -18,9 +18,12 @@ package trie import ( "bytes" + "math/rand" + "reflect" "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/testrand" "github.com/ethereum/go-ethereum/rlp" ) @@ -94,6 +97,286 @@ func TestDecodeFullNode(t *testing.T) { } } +func makeTestLeafNode(small bool) []byte { + l := leafNodeEncoder{} + l.Key = hexToCompact(keybytesToHex(testrand.Bytes(10))) + if small { + l.Val = testrand.Bytes(10) + } else { + l.Val = testrand.Bytes(32) + } + buf := rlp.NewEncoderBuffer(nil) + l.encode(buf) + return buf.ToBytes() +} + +func makeTestFullNode(small bool) []byte { + n := fullnodeEncoder{} + for i := 0; i < 16; i++ { + switch rand.Intn(3) { + case 0: + // write nil + case 1: + // write hash + n.Children[i] = testrand.Bytes(32) + case 2: + // write embedded node + n.Children[i] = makeTestLeafNode(small) + } + } + n.Children[16] = testrand.Bytes(32) // value + buf := rlp.NewEncoderBuffer(nil) + n.encode(buf) + return buf.ToBytes() +} + +func TestEncodeDecodeNodeElements(t *testing.T) { + var nodes [][]byte + nodes = append(nodes, makeTestFullNode(true)) + nodes = append(nodes, makeTestFullNode(false)) + nodes = append(nodes, makeTestLeafNode(true)) + nodes = append(nodes, makeTestLeafNode(false)) + + for _, blob := range nodes { + elements, err := decodeNodeElements(blob) + if err != nil { + t.Fatalf("Failed to decode node elements: %v", err) + } + enc, err := encodeNodeElements(elements) + if err != nil { + t.Fatalf("Failed to encode node elements: %v", err) + } + if !bytes.Equal(enc, blob) { + t.Fatalf("Unexpected encoded node element, want: %v, got: %v", blob, enc) + } + } +} + +func makeTestLeafNodePair() ([]byte, []byte, [][]byte, []int) { + var ( + na = leafNodeEncoder{} + nb = leafNodeEncoder{} + ) + key := keybytesToHex(testrand.Bytes(10)) + na.Key = hexToCompact(key) + nb.Key = hexToCompact(key) + + valA := testrand.Bytes(32) + valB := testrand.Bytes(32) + na.Val = valA + nb.Val = valB + + bufa, bufb := rlp.NewEncoderBuffer(nil), rlp.NewEncoderBuffer(nil) + na.encode(bufa) + nb.encode(bufb) + diff, _ := rlp.EncodeToBytes(valA) + return bufa.ToBytes(), bufb.ToBytes(), [][]byte{diff}, []int{1} +} + +func makeTestFullNodePair() ([]byte, []byte, [][]byte, []int) { + var ( + na = fullnodeEncoder{} + nb = fullnodeEncoder{} + indices []int + values [][]byte + ) + for i := 0; i < 16; i++ { + switch rand.Intn(3) { + case 0: + // write nil + case 1: + // write same + var child []byte + if rand.Intn(2) == 0 { + child = testrand.Bytes(32) // hashnode + } else { + child = makeTestLeafNode(true) // embedded node + } + na.Children[i] = child + nb.Children[i] = child + case 2: + // write different + var ( + va []byte + diff []byte + ) + rnd := rand.Intn(3) + if rnd == 0 { + va = testrand.Bytes(32) // hashnode + diff, _ = rlp.EncodeToBytes(va) + } else if rnd == 1 { + va = makeTestLeafNode(true) // embedded node + diff = va + } else { + va = nil + diff = rlp.EmptyString + } + vb := testrand.Bytes(32) // hashnode + na.Children[i] = va + nb.Children[i] = vb + + indices = append(indices, i) + values = append(values, diff) + } + } + na.Children[16] = nil + nb.Children[16] = nil + + bufa, bufb := rlp.NewEncoderBuffer(nil), rlp.NewEncoderBuffer(nil) + na.encode(bufa) + nb.encode(bufb) + return bufa.ToBytes(), bufb.ToBytes(), values, indices +} + +func TestNodeDifference(t *testing.T) { + type testsuite struct { + old []byte + new []byte + expErr bool + expIndices []int + expValues [][]byte + } + var tests = []testsuite{ + // Invalid node data + { + old: nil, new: nil, expErr: true, + }, + { + old: testrand.Bytes(32), new: nil, expErr: true, + }, + { + old: nil, new: testrand.Bytes(32), expErr: true, + }, + { + old: testrand.Bytes(32), new: testrand.Bytes(32), expErr: true, + }, + // Different node type + { + old: makeTestLeafNode(true), new: makeTestFullNode(true), expErr: true, + }, + } + for range 10 { + va, vb, elements, indices := makeTestLeafNodePair() + tests = append(tests, testsuite{ + old: va, + new: vb, + expErr: false, + expIndices: indices, + expValues: elements, + }) + } + for range 10 { + va, vb, elements, indices := makeTestFullNodePair() + tests = append(tests, testsuite{ + old: va, + new: vb, + expErr: false, + expIndices: indices, + expValues: elements, + }) + } + + for _, test := range tests { + _, indices, values, err := NodeDifference(test.old, test.new) + if test.expErr && err == nil { + t.Fatal("Expect error, got nil") + } + if !test.expErr && err != nil { + t.Fatalf("Unexpect error, %v", err) + } + if err == nil { + if !reflect.DeepEqual(indices, test.expIndices) { + t.Fatalf("Unexpected indices, want: %v, got: %v", test.expIndices, indices) + } + if !reflect.DeepEqual(values, test.expValues) { + t.Fatalf("Unexpected values, want: %v, got: %v", test.expValues, values) + } + } + } +} + +func TestReassembleFullNode(t *testing.T) { + var fn fullnodeEncoder + for i := 0; i < 16; i++ { + if rand.Intn(2) == 0 { + fn.Children[i] = testrand.Bytes(32) + } + } + buf := rlp.NewEncoderBuffer(nil) + fn.encode(buf) + enc := buf.ToBytes() + + // Generate a list of diffs + var ( + values [][][]byte + indices [][]int + ) + for i := 0; i < 10; i++ { + var ( + pos = make(map[int]struct{}) + poslist []int + valuelist [][]byte + ) + for j := 0; j < 3; j++ { + p := rand.Intn(16) + if _, ok := pos[p]; ok { + continue + } + pos[p] = struct{}{} + + nh := testrand.Bytes(32) + diff, _ := rlp.EncodeToBytes(nh) + poslist = append(poslist, p) + valuelist = append(valuelist, diff) + fn.Children[p] = nh + } + values = append(values, valuelist) + indices = append(indices, poslist) + } + reassembled, err := ReassembleNode(enc, values, indices) + if err != nil { + t.Fatalf("Failed to re-assemble full node %v", err) + } + buf2 := rlp.NewEncoderBuffer(nil) + fn.encode(buf2) + enc2 := buf2.ToBytes() + if !reflect.DeepEqual(enc2, reassembled) { + t.Fatalf("Unexpeted reassembled node") + } +} + +func TestReassembleShortNode(t *testing.T) { + var ln leafNodeEncoder + ln.Key = hexToCompact(keybytesToHex(testrand.Bytes(10))) + ln.Val = testrand.Bytes(10) + buf := rlp.NewEncoderBuffer(nil) + ln.encode(buf) + enc := buf.ToBytes() + + // Generate a list of diffs + var ( + values [][][]byte + indices [][]int + ) + for i := 0; i < 10; i++ { + val := testrand.Bytes(10) + ln.Val = val + diff, _ := rlp.EncodeToBytes(val) + values = append(values, [][]byte{diff}) + indices = append(indices, []int{1}) + } + reassembled, err := ReassembleNode(enc, values, indices) + if err != nil { + t.Fatalf("Failed to re-assemble full node %v", err) + } + buf2 := rlp.NewEncoderBuffer(nil) + ln.encode(buf2) + enc2 := buf2.ToBytes() + if !reflect.DeepEqual(enc2, reassembled) { + t.Fatalf("Unexpeted reassembled node") + } +} + // goos: darwin // goarch: arm64 // pkg: github.com/ethereum/go-ethereum/trie diff --git a/triedb/pathdb/nodes.go b/triedb/pathdb/nodes.go index c6f9e7aece..4eede439e4 100644 --- a/triedb/pathdb/nodes.go +++ b/triedb/pathdb/nodes.go @@ -14,12 +14,14 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// nolint:unused package pathdb import ( "bytes" "errors" "fmt" + "hash/fnv" "io" "maps" @@ -30,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" ) @@ -424,3 +427,272 @@ func (s *nodeSetWithOrigin) decode(r *rlp.Stream) error { s.computeSize() return nil } + +// encodeNodeCompressed encodes the trie node differences between two consecutive +// versions into byte stream. The format is as below: +// +// - metadata byte layout (1 byte): +// +// ┌──── Bits (from MSB to LSB) ───┐ +// │ 7 │ 6 │ 5 │ 4 │ 3 │ 2 │ 1 │ 0 │ +// └───────────────────────────────┘ +// │ │ │ │ │ │ │ └─ FlagA: set if value is encoded in compressed format +// │ │ │ │ │ │ └───── FlagB: set if no extended bitmap is present after the metadata byte +// │ │ │ │ │ └───────── FlagC: bitmap for node (only used when flagB == 1) +// │ │ │ │ └───────────── FlagD: bitmap for node (only used when flagB == 1) +// │ │ │ └───────────────── FlagE: reserved (marks the presence of the 16th child in a full node) +// │ │ └───────────────────── FlagF: reserved +// │ └───────────────────────── FlagG: reserved +// └───────────────────────────── FlagH: reserved +// +// Note: +// - If flagB is 1, the node refers to a shortNode; +// - flagC indicates whether the key of the shortNode is recorded. +// - flagD indicates whether the value of the shortNode is recorded. +// +// - If flagB is 0, the node refers to a fullNode; +// - each bit in extended bitmap indicates whether the corresponding +// child have been modified. +// +// Example: +// +// 0b_0000_1011 +// +// Bit0=1, Bit1=1 -> node in compressed format, no extended bitmap +// Bit2=0, Bit3=1 -> the key of a short node is not stored; its value is stored. +// +// - 2 bytes extended bitmap (only if the flagB in metadata is 0), each bit +// represents a corresponding child; +// +// - concatenation of original value of modified children along with its size; +func encodeNodeCompressed(addExtension bool, elements [][]byte, indices []int) []byte { + var ( + enc []byte + flag = byte(1) // The compression format indicator + ) + // Pre-allocate the byte slice for the node encoder + size := 1 + if addExtension { + size += 2 + } + for _, element := range elements { + size += len(element) + 1 + } + enc = make([]byte, 0, size) + + if !addExtension { + flag |= 2 // The embedded bitmap indicator + + // Embedded bitmap + for _, pos := range indices { + flag |= 1 << (pos + 2) + } + enc = append(enc, flag) + } else { + // Extended bitmap + bitmap := make([]byte, 2) // bitmaps for at most 16 children + for _, pos := range indices { + // Children[16] is only theoretically possible in the Merkle-Patricia-trie, + // in practice this field is never used in the Ethereum case. If it occurs, + // use the FlagE for marking the presence. + if pos >= 16 { + log.Warn("Unexpected 16th child encountered in a full node") + flag |= 1 << 4 // Use the reserved flagE + continue + } + bitIndex := uint(pos % 8) + bitmap[pos/8] |= 1 << bitIndex + } + enc = append(enc, flag) + enc = append(enc, bitmap...) + } + for _, element := range elements { + enc = append(enc, byte(len(element))) // 1 byte is sufficient for element size + enc = append(enc, element...) + } + return enc +} + +// encodeNodeFull encodes the full trie node value into byte stream. The format is +// as below: +// +// - metadata byte layout (1 byte): 0b0 +// - node value +func encodeNodeFull(value []byte) []byte { + enc := make([]byte, len(value)+1) + copy(enc[1:], value) + return enc +} + +// decodeNodeCompressed decodes the byte stream of compressed trie node +// back to the original elements and their indices. +// +// It assumes the byte stream contains a compressed format node. +func decodeNodeCompressed(data []byte) ([][]byte, []int, error) { + if len(data) < 1 { + return nil, nil, errors.New("invalid data: too short") + } + flag := data[0] + if flag&byte(1) == 0 { + return nil, nil, errors.New("invalid data: full node value") + } + noExtend := flag&byte(2) != 0 + + // Reconstruct indices from bitmap + var indices []int + if noExtend { + if flag&byte(4) != 0 { // flagC + indices = append(indices, 0) + } + if flag&byte(8) != 0 { // flagD + indices = append(indices, 1) + } + data = data[1:] + } else { + if len(data) < 3 { + return nil, nil, errors.New("invalid data: too short") + } + bitmap := data[1:3] + for index, b := range bitmap { + for bitIdx := 0; bitIdx < 8; bitIdx++ { + if b&(1< Date: Fri, 9 Jan 2026 10:43:15 +0100 Subject: [PATCH 225/277] core/txpool/blobpool: allow gaps in blobpool (#32717) Allow the blobpool to accept blobs out of nonce order Previously, we were dropping blobs that arrived out-of-order. However, since fetch decisions are done on receiver side, out-of-order delivery can happen, leading to inefficiencies. This PR: - adds an in-memory blob tx storage, similar to the queue in the legacypool - a limited number of received txs can be added to this per account - txs waiting in the gapped queue are not processed further and not propagated further until they are unblocked by adding the previos nonce to the blobpool The size of the in-memory storage is currently limited per account, following a slow-start logic. An overall size limit, and a TTL is also enforced for DoS protection. --------- Signed-off-by: Csaba Kiraly Co-authored-by: MariusVanDerWijden --- core/txpool/blobpool/blobpool.go | 156 ++++++++++++++++++++++++-- core/txpool/blobpool/blobpool_test.go | 41 +++++-- 2 files changed, 181 insertions(+), 16 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 28326ae605..27441ac2e2 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -94,6 +94,16 @@ const ( // storeVersion is the current slotter layout used for the billy.Database // store. storeVersion = 1 + + // gappedLifetime is the approximate duration for which nonce-gapped transactions + // are kept before being dropped. Since gapped is only a reorder buffer and it + // is expected that the original transactions were inserted in the mempool in + // nonce order, the duration is kept short to avoid DoS vectors. + gappedLifetime = 1 * time.Minute + + // maxGappedTxs is the maximum number of gapped transactions kept overall. + // This is a safety limit to avoid DoS vectors. + maxGapped = 128 ) // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and @@ -330,6 +340,9 @@ type BlobPool struct { stored uint64 // Useful data size of all transactions on disk limbo *limbo // Persistent data store for the non-finalized blobs + gapped map[common.Address][]*types.Transaction // Transactions that are currently gapped (nonce too high) + gappedSource map[common.Hash]common.Address // Source of gapped transactions to allow rechecking on inclusion + signer types.Signer // Transaction signer to use for sender recovery chain BlockChain // Chain object to access the state through @@ -363,6 +376,8 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo lookup: newLookup(), index: make(map[common.Address][]*blobTxMeta), spent: make(map[common.Address]*uint256.Int), + gapped: make(map[common.Address][]*types.Transaction), + gappedSource: make(map[common.Hash]common.Address), } } @@ -834,6 +849,9 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { resettimeHist.Update(time.Since(start).Nanoseconds()) }(time.Now()) + // Handle reorg buffer timeouts evicting old gapped transactions + p.evictGapped() + statedb, err := p.chain.StateAt(newHead.Root) if err != nil { log.Error("Failed to reset blobpool state", "err", err) @@ -1196,7 +1214,9 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error { State: p.state, FirstNonceGap: func(addr common.Address) uint64 { - // Nonce gaps are not permitted in the blob pool, the first gap will + // Nonce gaps are permitted in the blob pool, but only as part of the + // in-memory 'gapped' buffer. We expose the gap here to validateTx, + // then handle the error by adding to the buffer. The first gap will // be the next nonce shifted by however many transactions we already // have pooled. return p.state.GetNonce(addr) + uint64(len(p.index[addr])) @@ -1275,7 +1295,9 @@ func (p *BlobPool) Has(hash common.Hash) bool { p.lock.RLock() defer p.lock.RUnlock() - return p.lookup.exists(hash) + poolHas := p.lookup.exists(hash) + _, gapped := p.gappedSource[hash] + return poolHas || gapped } func (p *BlobPool) getRLP(hash common.Hash) []byte { @@ -1466,10 +1488,6 @@ func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error { adds = append(adds, tx.WithoutBlobTxSidecar()) } } - if len(adds) > 0 { - p.discoverFeed.Send(core.NewTxsEvent{Txs: adds}) - p.insertFeed.Send(core.NewTxsEvent{Txs: adds}) - } return errs } @@ -1488,6 +1506,13 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) { addtimeHist.Update(time.Since(start).Nanoseconds()) }(time.Now()) + return p.addLocked(tx, true) +} + +// addLocked inserts a new blob transaction into the pool if it passes validation (both +// consensus validity and pool restrictions). It must be called with the pool lock held. +// Only for internal use. +func (p *BlobPool) addLocked(tx *types.Transaction, checkGapped bool) (err error) { // Ensure the transaction is valid from all perspectives if err := p.validateTx(tx); err != nil { log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err) @@ -1500,6 +1525,21 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) { addStaleMeter.Mark(1) case errors.Is(err, core.ErrNonceTooHigh): addGappedMeter.Mark(1) + // Store the tx in memory, and revalidate later + from, _ := types.Sender(p.signer, tx) + allowance := p.gappedAllowance(from) + if allowance >= 1 && len(p.gapped) < maxGapped { + p.gapped[from] = append(p.gapped[from], tx) + p.gappedSource[tx.Hash()] = from + log.Trace("added tx to gapped blob queue", "allowance", allowance, "hash", tx.Hash(), "from", from, "nonce", tx.Nonce(), "qlen", len(p.gapped[from])) + return nil + } else { + // if maxGapped is reached, it is better to give time to gapped + // transactions by keeping the old and dropping this one. + // Thus replacing a gapped transaction with another gapped transaction + // is discouraged. + log.Trace("no gapped blob queue allowance", "allowance", allowance, "hash", tx.Hash(), "from", from, "nonce", tx.Nonce(), "qlen", len(p.gapped[from])) + } case errors.Is(err, core.ErrInsufficientFunds): addOverdraftedMeter.Mark(1) case errors.Is(err, txpool.ErrAccountLimitExceeded): @@ -1637,6 +1677,58 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) { p.updateStorageMetrics() addValidMeter.Mark(1) + + // Notify all listeners of the new arrival + p.discoverFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx.WithoutBlobTxSidecar()}}) + p.insertFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx.WithoutBlobTxSidecar()}}) + + //check the gapped queue for this account and try to promote + if gtxs, ok := p.gapped[from]; checkGapped && ok && len(gtxs) > 0 { + // We have to add in nonce order, but we want to stable sort to cater for situations + // where transactions are replaced, keeping the original receive order for same nonce + sort.SliceStable(gtxs, func(i, j int) bool { + return gtxs[i].Nonce() < gtxs[j].Nonce() + }) + for len(gtxs) > 0 { + stateNonce := p.state.GetNonce(from) + firstgap := stateNonce + uint64(len(p.index[from])) + + if gtxs[0].Nonce() > firstgap { + // Anything beyond the first gap is not addable yet + break + } + + // Drop any buffered transactions that became stale in the meantime (included in chain or replaced) + // If we arrive to the transaction in the pending range (between the state Nonce and first gap, we + // try to add them now while removing from here. + tx := gtxs[0] + gtxs[0] = nil + gtxs = gtxs[1:] + delete(p.gappedSource, tx.Hash()) + + if tx.Nonce() < stateNonce { + // Stale, drop it. Eventually we could add to limbo here if hash matches. + log.Trace("Gapped blob transaction became stale", "hash", tx.Hash(), "from", from, "nonce", tx.Nonce(), "state", stateNonce, "qlen", len(p.gapped[from])) + continue + } + + if tx.Nonce() <= firstgap { + // If we hit the pending range, including the first gap, add it and continue to try to add more. + // We do not recurse here, but continue to loop instead. + // We are under lock, so we can add the transaction directly. + if err := p.addLocked(tx, false); err == nil { + log.Trace("Gapped blob transaction added to pool", "hash", tx.Hash(), "from", from, "nonce", tx.Nonce(), "qlen", len(p.gapped[from])) + } else { + log.Trace("Gapped blob transaction not accepted", "hash", tx.Hash(), "from", from, "nonce", tx.Nonce(), "err", err) + } + } + } + if len(gtxs) == 0 { + delete(p.gapped, from) + } else { + p.gapped[from] = gtxs + } + } return nil } @@ -1868,6 +1960,50 @@ func (p *BlobPool) Nonce(addr common.Address) uint64 { return p.state.GetNonce(addr) } +// gappedAllowance returns the number of gapped transactions still +// allowed for the given account. Allowance is based on a slow-start +// logic, allowing more gaps (resource usage) to accounts with a +// higher nonce. Can also return negative values. +func (p *BlobPool) gappedAllowance(addr common.Address) int { + // Gaps happen, but we don't want to allow too many. + // Use log10(nonce+1) as the allowance, with a minimum of 0. + nonce := p.state.GetNonce(addr) + allowance := int(math.Log10(float64(nonce + 1))) + // Cap the allowance to the remaining pool space + return min(allowance, maxTxsPerAccount-len(p.index[addr])) - len(p.gapped[addr]) +} + +// evictGapped removes the old transactions from the gapped reorder buffer. +// Concurrency: The caller must hold the pool lock before calling this function. +func (p *BlobPool) evictGapped() { + cutoff := time.Now().Add(-gappedLifetime) + for from, txs := range p.gapped { + nonce := p.state.GetNonce(from) + // Reuse the original slice to avoid extra allocations. + // This is safe because we only keep references to the original gappedTx objects, + // and we overwrite the slice for this account after filtering. + keep := txs[:0] + for i, gtx := range txs { + if gtx.Time().Before(cutoff) || gtx.Nonce() < nonce { + // Evict old or stale transactions + // Should we add stale to limbo here if it would belong? + delete(p.gappedSource, gtx.Hash()) + txs[i] = nil // Explicitly nil out evicted element + } else { + keep = append(keep, gtx) + } + } + if len(keep) < len(txs) { + log.Trace("Evicting old gapped blob transactions", "count", len(txs)-len(keep), "from", from) + } + if len(keep) == 0 { + delete(p.gapped, from) + } else { + p.gapped[from] = keep + } + } +} + // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (p *BlobPool) Stats() (int, int) { @@ -1902,9 +2038,15 @@ func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*ty // Status returns the known status (unknown/pending/queued) of a transaction // identified by their hashes. func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { - if p.Has(hash) { + p.lock.RLock() + defer p.lock.RUnlock() + + if p.lookup.exists(hash) { return txpool.TxStatusPending } + if _, gapped := p.gappedSource[hash]; gapped { + return txpool.TxStatusQueued + } return txpool.TxStatusUnknown } diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index eda87008c3..4bb3567b69 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -1352,9 +1352,10 @@ func TestAdd(t *testing.T) { } // addtx is a helper sender/tx tuple to represent a new tx addition type addtx struct { - from string - tx *types.BlobTx - err error + from string + tx *types.BlobTx + err error + check func(*BlobPool, *types.Transaction) bool } tests := []struct { @@ -1371,6 +1372,7 @@ func TestAdd(t *testing.T) { "bob": {balance: 21100 + blobSize, nonce: 1}, "claire": {balance: 21100 + blobSize}, "dave": {balance: 21100 + blobSize, nonce: 1}, + "eve": {balance: 21100 + blobSize, nonce: 10}, // High nonce to test gapped acceptance }, adds: []addtx{ { // New account, no previous txs: accept nonce 0 @@ -1398,6 +1400,14 @@ func TestAdd(t *testing.T) { tx: makeUnsignedTx(2, 1, 1, 1), err: core.ErrNonceTooHigh, }, + { // Old account, 10 txs in chain: 0 pending: accept nonce 11 as gapped + from: "eve", + tx: makeUnsignedTx(11, 1, 1, 1), + err: nil, + check: func(pool *BlobPool, tx *types.Transaction) bool { + return pool.Status(tx.Hash()) == txpool.TxStatusQueued + }, + }, }, }, // Transactions from already pooled accounts should only be accepted if @@ -1758,15 +1768,28 @@ func TestAdd(t *testing.T) { t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, errs[0], add.err) } if add.err == nil { - size, exist := pool.lookup.sizeOfTx(signed.Hash()) - if !exist { - t.Errorf("test %d, tx %d: failed to lookup transaction's size", i, j) + // first check if tx is in the pool (reorder queue or pending) + if !pool.Has(signed.Hash()) { + t.Errorf("test %d, tx %d: added transaction not found in pool", i, j) } - if size != signed.Size() { - t.Errorf("test %d, tx %d: transaction's size mismatches: have %v, want %v", - i, j, size, signed.Size()) + // if it is pending, check if size matches + if pool.Status(signed.Hash()) == txpool.TxStatusPending { + size, exist := pool.lookup.sizeOfTx(signed.Hash()) + if !exist { + t.Errorf("test %d, tx %d: failed to lookup transaction's size", i, j) + } + if size != signed.Size() { + t.Errorf("test %d, tx %d: transaction's size mismatches: have %v, want %v", + i, j, size, signed.Size()) + } } } + if add.check != nil { + if !add.check(pool, signed) { + t.Errorf("test %d, tx %d: custom check failed", i, j) + } + } + // Verify the pool internals after each addition verifyPoolInternals(t, pool) } verifyPoolInternals(t, pool) From 4eb5b66d9ec0c4c56c223535e3df24e3a76b63b4 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Fri, 9 Jan 2026 11:25:04 +0100 Subject: [PATCH 226/277] ethclient: restore BlockReceipts support for `BlockNumberOrHash` objects (#33242) - pass `rpc.BlockNumberOrHash` directly to `eth_getBlockReceipts` so `requireCanonical` and other fields survive - aligns `BlockReceipts` with other `ethclient` methods and re-enables canonical-only receipt queries --- ethclient/ethclient.go | 2 +- ethclient/ethclient_test.go | 43 +++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 426194b59f..6f2fb5ebc8 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -124,7 +124,7 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) { // BlockReceipts returns the receipts of a given block number or hash. func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { var r []*types.Receipt - err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String()) + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash) if err == nil && r == nil { return nil, ethereum.NotFound } diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 302ccf2e16..f9e761e412 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -687,6 +687,49 @@ func testTransactionSender(t *testing.T, client *rpc.Client) { } } +func TestBlockReceiptsPreservesCanonicalFlag(t *testing.T) { + srv := rpc.NewServer() + service := &blockReceiptsTestService{calls: make(chan rpc.BlockNumberOrHash, 1)} + if err := srv.RegisterName("eth", service); err != nil { + t.Fatalf("failed to register service: %v", err) + } + defer srv.Stop() + + client := rpc.DialInProc(srv) + defer client.Close() + + ec := ethclient.NewClient(client) + defer ec.Close() + + hash := common.HexToHash("0x01") + ref := rpc.BlockNumberOrHashWithHash(hash, true) + + if _, err := ec.BlockReceipts(context.Background(), ref); err != nil { + t.Fatalf("BlockReceipts returned error: %v", err) + } + + select { + case call := <-service.calls: + if call.BlockHash == nil || *call.BlockHash != hash { + t.Fatalf("unexpected block hash: got %v, want %v", call.BlockHash, hash) + } + if !call.RequireCanonical { + t.Fatalf("requireCanonical flag was lost: %+v", call) + } + default: + t.Fatal("service was not called") + } +} + +type blockReceiptsTestService struct { + calls chan rpc.BlockNumberOrHash +} + +func (s *blockReceiptsTestService) GetBlockReceipts(ctx context.Context, block rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + s.calls <- block + return []*types.Receipt{}, nil +} + func newCanceledContext() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() From 7cd400612f2462e0827e7d7cea914c6ff8c9f63e Mon Sep 17 00:00:00 2001 From: Xuanyu Hu Date: Fri, 9 Jan 2026 19:53:55 +0800 Subject: [PATCH 227/277] tests: check correct revert on invalid tests (#33543) This PR fixes an issue where `evm statetest` would not verify the post-state root hash if the test case expected an exception (e.g. invalid transaction). The fix involves: 1. Modifying `tests/state_test_util.go` in the `Run` method. 2. When an expected error occurs (`err != nil`), we now check if `post.Root` is defined. 3. If defined, we recalculate the intermediate root from the current state (which is reverted to the pre-transaction snapshot upon error). 4. We use `GetChainConfig` and `IsEIP158` to ensure the correct state clearing rules are applied when calculating the root, avoiding regressions on forks that require EIP-158 state clearing. 5. If the calculated root mismatches the expected root, the test now fails. This ensures that state tests are strictly verified against their expected post-state, even for failure scenarios. Fixes issue #33527 --------- Co-authored-by: MariusVanDerWijden --- tests/state_test_util.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 1d6cc8db70..3c1df35157 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -234,6 +234,20 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo if err != nil { // Here, an error exists but it was expected. // We do not check the post state or logs. + // However, if the test defines a post state root, we should check it. + // In case of an error, the state is reverted to the snapshot, so we need to + // recalculate the root. + post := t.json.Post[subtest.Fork][subtest.Index] + if post.Root != (common.UnprefixedHash{}) { + config, _, err := GetChainConfig(subtest.Fork) + if err != nil { + return fmt.Errorf("failed to get chain config: %w", err) + } + root = st.StateDB.IntermediateRoot(config.IsEIP158(new(big.Int).SetUint64(t.json.Env.Number))) + if root != common.Hash(post.Root) { + return fmt.Errorf("post-state root does not match the pre-state root, indicates an error in the test: got %x, want %x", root, post.Root) + } + } return nil } post := t.json.Post[subtest.Fork][subtest.Index] From 127d1f42bb22df090644df774f479afe7a08bafd Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Fri, 9 Jan 2026 14:40:40 +0100 Subject: [PATCH 228/277] core: remove duplicate chainHeadFeed.Send code (#33563) The code was simply duplicate, so we can remove some code lines here. Signed-off-by: Csaba Kiraly --- core/blockchain.go | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index ba96dc1760..e71f97b7b9 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -746,21 +746,7 @@ func (bc *BlockChain) SetHead(head uint64) error { if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil { return err } - // Send chain head event to update the transaction pool - header := bc.CurrentBlock() - if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil { - // In a pruned node the genesis block will not exist in the freezer. - // It should not happen that we set head to any other pruned block. - if header.Number.Uint64() > 0 { - // This should never happen. In practice, previously currentBlock - // contained the entire block whereas now only a "marker", so there - // is an ever so slight chance for a race we should handle. - log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) - return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4]) - } - } - bc.chainHeadFeed.Send(ChainHeadEvent{Header: header}) - return nil + return bc.sendChainHeadEvent() } // SetHeadWithTimestamp rewinds the local chain to a new head that has at max @@ -771,7 +757,12 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error { if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil { return err } - // Send chain head event to update the transaction pool + return bc.sendChainHeadEvent() +} + +// sendChainHeadEvent notifies all subscribers about the new chain head, +// checking first that the current block is actually available. +func (bc *BlockChain) sendChainHeadEvent() error { header := bc.CurrentBlock() if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil { // In a pruned node the genesis block will not exist in the freezer. From c890637af9d90d3559be37fa5f3d4cd28d55cb4d Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Mon, 12 Jan 2026 00:25:22 -0600 Subject: [PATCH 229/277] core/rawdb: skip missing block bodies during tx unindexing (#33573) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes an issue where the tx indexer would repeatedly try to “unindex” a block with a missing body, causing a spike in CPU usage. This change skips these blocks and advances the index tail. The fix was verified both manually on a local development chain and with a new test. resolves #33371 --- core/rawdb/chain_iterator.go | 32 ++++++++++++++++++++++--------- core/rawdb/chain_iterator_test.go | 30 +++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 9 deletions(-) diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index e7c89ca8d9..713c3d8ae2 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -87,6 +87,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) { type blockTxHashes struct { number uint64 hashes []common.Hash + err error } // iterateTransactions iterates over all transactions in the (canon) block @@ -144,17 +145,22 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool }() for data := range rlpCh { var body types.Body + var result *blockTxHashes if err := rlp.DecodeBytes(data.rlp, &body); err != nil { log.Warn("Failed to decode block body", "block", data.number, "error", err) - return - } - var hashes []common.Hash - for _, tx := range body.Transactions { - hashes = append(hashes, tx.Hash()) - } - result := &blockTxHashes{ - hashes: hashes, - number: data.number, + result = &blockTxHashes{ + number: data.number, + err: err, + } + } else { + var hashes []common.Hash + for _, tx := range body.Transactions { + hashes = append(hashes, tx.Hash()) + } + result = &blockTxHashes{ + hashes: hashes, + number: data.number, + } } // Feed the block to the aggregator, or abort on interrupt select { @@ -214,6 +220,10 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan // Next block available, pop it off and index it delivery := queue.PopItem() lastNum = delivery.number + if delivery.err != nil { + log.Warn("Skipping tx indexing for block with missing/corrupt body", "block", delivery.number, "error", delivery.err) + continue + } WriteTxLookupEntries(batch, delivery.number, delivery.hashes) blocks++ txs += len(delivery.hashes) @@ -307,6 +317,10 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch } delivery := queue.PopItem() nextNum = delivery.number + 1 + if delivery.err != nil { + log.Warn("Skipping tx unindexing for block with missing/corrupt body", "block", delivery.number, "error", delivery.err) + continue + } DeleteTxLookupEntries(batch, delivery.hashes) txs += len(delivery.hashes) blocks++ diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 75bd5a9a94..089ebfe828 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -218,6 +218,36 @@ func TestIndexTransactions(t *testing.T) { verify(0, 8, false, 8) } +func TestUnindexTransactionsMissingBody(t *testing.T) { + // Construct test chain db + chainDB := NewMemoryDatabase() + blocks, _ := initDatabaseWithTransactions(chainDB) + + // Index the entire chain. + lastBlock := blocks[len(blocks)-1].NumberU64() + IndexTransactions(chainDB, 0, lastBlock+1, nil, false) + + // Prove that block 2 body exists in the database. + if raw := ReadCanonicalBodyRLP(chainDB, 2, nil); len(raw) == 0 { + t.Fatalf("Block 2 body does not exist in the database.") + } + + // Delete body for block 2. This simulates a corrupted database. + key := blockBodyKey(2, blocks[2].Hash()) + if err := chainDB.Delete(key); err != nil { + t.Fatalf("Failed to delete block body %v", err) + } + + // Unindex blocks [0, 3) + UnindexTransactions(chainDB, 0, 3, nil, false) + + // Verify that tx index tail is updated to 3. + tail := ReadTxIndexTail(chainDB) + if tail == nil || *tail != 3 { + t.Fatalf("The tx index tail is wrong: got %v want %d", *tail, 3) + } +} + func TestPruneTransactionIndex(t *testing.T) { chainDB := NewMemoryDatabase() blocks, _ := initDatabaseWithTransactions(chainDB) From 31d5d82ce595f8bdb519d0062c92ddc361a471ec Mon Sep 17 00:00:00 2001 From: 0xcharry Date: Mon, 12 Jan 2026 07:31:45 +0100 Subject: [PATCH 230/277] internal/ethapi: refactor RPC tx formatter (#33582) --- internal/ethapi/api.go | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index eb437201d5..d48bffd818 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -186,6 +186,15 @@ func NewTxPoolAPI(b Backend) *TxPoolAPI { return &TxPoolAPI{b} } +// flattenTxs builds the RPC transaction map keyed by nonce for a set of pool txs. +func flattenTxs(txs types.Transactions, header *types.Header, cfg *params.ChainConfig) map[string]*RPCTransaction { + dump := make(map[string]*RPCTransaction, len(txs)) + for _, tx := range txs { + dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCPendingTransaction(tx, header, cfg) + } + return dump +} + // Content returns the transactions contained within the transaction pool. func (api *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { pending, queue := api.b.TxPoolContent() @@ -196,19 +205,11 @@ func (api *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction curHeader := api.b.CurrentHeader() // Flatten the pending transactions for account, txs := range pending { - dump := make(map[string]*RPCTransaction, len(txs)) - for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCPendingTransaction(tx, curHeader, api.b.ChainConfig()) - } - content["pending"][account.Hex()] = dump + content["pending"][account.Hex()] = flattenTxs(txs, curHeader, api.b.ChainConfig()) } // Flatten the queued transactions for account, txs := range queue { - dump := make(map[string]*RPCTransaction, len(txs)) - for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCPendingTransaction(tx, curHeader, api.b.ChainConfig()) - } - content["queued"][account.Hex()] = dump + content["queued"][account.Hex()] = flattenTxs(txs, curHeader, api.b.ChainConfig()) } return content } @@ -220,18 +221,10 @@ func (api *TxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RP curHeader := api.b.CurrentHeader() // Build the pending transactions - dump := make(map[string]*RPCTransaction, len(pending)) - for _, tx := range pending { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCPendingTransaction(tx, curHeader, api.b.ChainConfig()) - } - content["pending"] = dump + content["pending"] = flattenTxs(pending, curHeader, api.b.ChainConfig()) // Build the queued transactions - dump = make(map[string]*RPCTransaction, len(queue)) - for _, tx := range queue { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCPendingTransaction(tx, curHeader, api.b.ChainConfig()) - } - content["queued"] = dump + content["queued"] = flattenTxs(queue, curHeader, api.b.ChainConfig()) return content } From 1278b4891d084c587ceb209a0b1a140984b55437 Mon Sep 17 00:00:00 2001 From: bigbear <155267841+aso20455@users.noreply.github.com> Date: Tue, 13 Jan 2026 10:24:45 +0200 Subject: [PATCH 231/277] tests: repair oss-fuzz coverage command (#33304) The coverage build path was generating go test commands with a bogus -tags flag that held the coverpkg value, so the run kept failing. I switched coverbuild to treat the optional argument as an override for -coverpkg and stopped passing coverpkg from the caller. Now the script emits a clean go test invocation that should actually succeed. --- oss-fuzz.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/oss-fuzz.sh b/oss-fuzz.sh index 020b6fee27..bd87665125 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -64,7 +64,7 @@ function compile_fuzzer() { go get github.com/holiman/gofuzz-shim/testing if [[ $SANITIZER == *coverage* ]]; then - coverbuild $path $function $fuzzer $coverpkg + coverbuild $path $function $fuzzer else gofuzz-shim --func $function --package $package -f $file -o $fuzzer.a $CXX $CXXFLAGS $LIB_FUZZING_ENGINE $fuzzer.a -o $OUT/$fuzzer From 5a1990d1d8b30168c427fc76468dd482a90957eb Mon Sep 17 00:00:00 2001 From: Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Date: Tue, 13 Jan 2026 14:25:53 +0200 Subject: [PATCH 232/277] rpc: fix limitedBuffer.Write to properly enforce size limit (#33545) Updated the `avail` calculation to correctly compute remaining capacity: `buf.limit - len(buf.output)`, ensuring the buffer never exceeds its configured limit regardless of how many times `Write()` is called. --- rpc/handler.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/rpc/handler.go b/rpc/handler.go index 45558d5821..462519d872 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -612,8 +612,11 @@ type limitedBuffer struct { } func (buf *limitedBuffer) Write(data []byte) (int, error) { - avail := max(buf.limit, len(buf.output)) - if len(data) < avail { + avail := buf.limit - len(buf.output) + if avail <= 0 { + return 0, errTruncatedOutput + } + if len(data) <= avail { buf.output = append(buf.output, data...) return len(data), nil } From ea4935430b3a8fffa39742a0c53e23a4620bde1e Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 13 Jan 2026 17:07:44 +0100 Subject: [PATCH 233/277] version: begin v1.17.0 release cycle --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index a3aad5d398..bcb61f27b1 100644 --- a/version/version.go +++ b/version/version.go @@ -18,7 +18,7 @@ package version const ( Major = 1 // Major version component of the current release - Minor = 16 // Minor version component of the current release - Patch = 8 // Patch version component of the current release + Minor = 17 // Minor version component of the current release + Patch = 0 // Patch version component of the current release Meta = "unstable" // Version metadata to append to the version string ) From 5b99d2bba47702f7435b07561926c71a5f29a117 Mon Sep 17 00:00:00 2001 From: MariusVanDerWijden Date: Fri, 19 Dec 2025 13:09:40 +0100 Subject: [PATCH 234/277] core/txpool: drop peers on invalid KZG proofs Co-authored-by: Gary Rong Co-authored-by: MariusVanDerWijden : --- core/txpool/errors.go | 3 + core/txpool/validation.go | 7 ++- eth/fetcher/tx_fetcher.go | 32 ++++++++-- eth/fetcher/tx_fetcher_test.go | 112 +++++++++++++++++++++++++++++++++ 4 files changed, 147 insertions(+), 7 deletions(-) diff --git a/core/txpool/errors.go b/core/txpool/errors.go index 9bc435d67e..8285cbf10e 100644 --- a/core/txpool/errors.go +++ b/core/txpool/errors.go @@ -71,4 +71,7 @@ var ( // ErrInflightTxLimitReached is returned when the maximum number of in-flight // transactions is reached for specific accounts. ErrInflightTxLimitReached = errors.New("in-flight transaction limit reached for delegated accounts") + + // ErrKZGVerificationError is returned when a KZG proof was not verified correctly. + ErrKZGVerificationError = errors.New("KZG verification error") ) diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 4f985a8bd0..e0a333dfa5 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -202,7 +202,7 @@ func validateBlobSidecarLegacy(sidecar *types.BlobTxSidecar, hashes []common.Has } for i := range sidecar.Blobs { if err := kzg4844.VerifyBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i], sidecar.Proofs[i]); err != nil { - return fmt.Errorf("invalid blob %d: %v", i, err) + return fmt.Errorf("%w: invalid blob proof: %v", ErrKZGVerificationError, err) } } return nil @@ -212,7 +212,10 @@ func validateBlobSidecarOsaka(sidecar *types.BlobTxSidecar, hashes []common.Hash if len(sidecar.Proofs) != len(hashes)*kzg4844.CellProofsPerBlob { return fmt.Errorf("invalid number of %d blob proofs expected %d", len(sidecar.Proofs), len(hashes)*kzg4844.CellProofsPerBlob) } - return kzg4844.VerifyCellProofs(sidecar.Blobs, sidecar.Commitments, sidecar.Proofs) + if err := kzg4844.VerifyCellProofs(sidecar.Blobs, sidecar.Commitments, sidecar.Proofs); err != nil { + return fmt.Errorf("%w: %v", ErrKZGVerificationError, err) + } + return nil } // ValidationOptionsWithState define certain differences between stateful transaction diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 78e791f32b..50d6f2f7ad 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -114,10 +114,11 @@ type txRequest struct { // txDelivery is the notification that a batch of transactions have been added // to the pool and should be untracked. type txDelivery struct { - origin string // Identifier of the peer originating the notification - hashes []common.Hash // Batch of transaction hashes having been delivered - metas []txMetadata // Batch of metadata associated with the delivered hashes - direct bool // Whether this is a direct reply or a broadcast + origin string // Identifier of the peer originating the notification + hashes []common.Hash // Batch of transaction hashes having been delivered + metas []txMetadata // Batch of metadata associated with the delivered hashes + direct bool // Whether this is a direct reply or a broadcast + violation error // Whether we encountered a protocol violation } // txDrop is the notification that a peer has disconnected. @@ -292,6 +293,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) knownMeter = txReplyKnownMeter underpricedMeter = txReplyUnderpricedMeter otherRejectMeter = txReplyOtherRejectMeter + violation error ) if !direct { inMeter = txBroadcastInMeter @@ -338,6 +340,12 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) case errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) || errors.Is(err, txpool.ErrTxGasPriceTooLow): underpriced++ + case errors.Is(err, txpool.ErrKZGVerificationError): + // KZG verification failed, terminate transaction processing immediately. + // Since KZG verification is computationally expensive, this acts as a + // defensive measure against potential DoS attacks. + violation = err + default: otherreject++ } @@ -346,6 +354,11 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) kind: batch[j].Type(), size: uint32(batch[j].Size()), }) + // Terminate the transaction processing if violation is encountered. All + // the remaining transactions in response will be silently discarded. + if violation != nil { + break + } } knownMeter.Mark(duplicate) underpricedMeter.Mark(underpriced) @@ -356,9 +369,13 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) log.Debug("Peer delivering stale or invalid transactions", "peer", peer, "rejected", otherreject) time.Sleep(200 * time.Millisecond) } + // If we encountered a protocol violation, disconnect this peer. + if violation != nil { + break + } } select { - case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct}: + case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct, violation: violation}: return nil case <-f.quit: return errTerminated @@ -753,6 +770,11 @@ func (f *TxFetcher) loop() { // Something was delivered, try to reschedule requests f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too } + // If we encountered a protocol violation, disconnect the peer + if delivery.violation != nil { + log.Warn("Disconnect peer for protocol violation", "peer", delivery.origin, "error", delivery.violation) + f.dropPeer(delivery.origin) + } case drop := <-f.drop: // A peer was dropped, remove all traces of it diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index d6d5a8692e..58f5fd3e3d 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -17,6 +17,7 @@ package fetcher import ( + "crypto/sha256" "errors" "math/big" "math/rand" @@ -28,7 +29,10 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" ) var ( @@ -1937,6 +1941,114 @@ func TestTransactionFetcherWrongMetadata(t *testing.T) { }) } +func makeInvalidBlobTx() *types.Transaction { + key, _ := crypto.GenerateKey() + blob := &kzg4844.Blob{byte(0xa)} + commitment, _ := kzg4844.BlobToCommitment(blob) + blobHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + cellProof, _ := kzg4844.ComputeCellProofs(blob) + + // Mutate the cell proof + cellProof[0][0] = 0x0 + + blobtx := &types.BlobTx{ + ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), + Nonce: 0, + GasTipCap: uint256.NewInt(100), + GasFeeCap: uint256.NewInt(200), + Gas: 21000, + BlobFeeCap: uint256.NewInt(200), + BlobHashes: []common.Hash{blobHash}, + Value: uint256.NewInt(100), + Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion1, []kzg4844.Blob{*blob}, []kzg4844.Commitment{commitment}, cellProof), + } + return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) +} + +// This test ensures that the peer will be disconnected for protocol violation +// and all its internal traces should be removed properly. +func TestTransactionProtocolViolation(t *testing.T) { + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) + + var ( + badTx = makeInvalidBlobTx() + drop = make(chan struct{}, 1) + ) + testTransactionFetcherParallel(t, txFetcherTest{ + init: func() *TxFetcher { + return NewTxFetcher( + func(common.Hash, byte) error { return nil }, + func(txs []*types.Transaction) []error { + var errs []error + for range txs { + errs = append(errs, txpool.ErrKZGVerificationError) + } + return errs + }, + func(a string, b []common.Hash) error { + return nil + }, + func(peer string) { drop <- struct{}{} }, + ) + }, + steps: []interface{}{ + // Initial announcement to get something into the waitlist + doTxNotify{ + peer: "A", + hashes: []common.Hash{testTxs[0].Hash(), badTx.Hash(), testTxs[1].Hash()}, + types: []byte{types.LegacyTxType, types.BlobTxType, types.LegacyTxType}, + sizes: []uint32{uint32(testTxs[0].Size()), uint32(badTx.Size()), uint32(testTxs[1].Size())}, + }, + isWaiting(map[string][]announce{ + "A": { + {testTxs[0].Hash(), types.LegacyTxType, uint32(testTxs[0].Size())}, + {badTx.Hash(), types.BlobTxType, uint32(badTx.Size())}, + {testTxs[1].Hash(), types.LegacyTxType, uint32(testTxs[1].Size())}, + }, + }), + doWait{time: 0, step: true}, // zero time, but the blob fetching should be scheduled + + isWaiting(map[string][]announce{ + "A": { + {testTxs[0].Hash(), types.LegacyTxType, uint32(testTxs[0].Size())}, + {testTxs[1].Hash(), types.LegacyTxType, uint32(testTxs[1].Size())}, + }, + }), + isScheduled{ + tracking: map[string][]announce{ + "A": { + {badTx.Hash(), types.BlobTxType, uint32(badTx.Size())}, + }, + }, + fetching: map[string][]common.Hash{ + "A": {badTx.Hash()}, + }, + }, + + doTxEnqueue{ + peer: "A", + txs: []*types.Transaction{badTx}, + direct: true, + }, + // Some internal traces are left and will be cleaned by a following drop + // operation. + isWaiting(map[string][]announce{ + "A": { + {testTxs[0].Hash(), types.LegacyTxType, uint32(testTxs[0].Size())}, + {testTxs[1].Hash(), types.LegacyTxType, uint32(testTxs[1].Size())}, + }, + }), + isScheduled{}, + doFunc(func() { <-drop }), + + // Simulate the drop operation emitted by the server + doDrop("A"), + isWaiting(nil), + isScheduled{nil, nil, nil}, + }, + }) +} + func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { t.Parallel() testTransactionFetcher(t, tt) From 3b17e782747fcd2cf06622324f3d48ad91f64ab3 Mon Sep 17 00:00:00 2001 From: lightclient Date: Fri, 9 Jan 2026 10:38:18 -0700 Subject: [PATCH 235/277] crypto/ecies: use aes blocksize Co-authored-by: Gary Rong --- crypto/ecies/ecies.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index 76f934c72d..9a892781f4 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -290,7 +290,7 @@ func (prv *PrivateKey) Decrypt(c, s1, s2 []byte) (m []byte, err error) { switch c[0] { case 2, 3, 4: rLen = (prv.PublicKey.Curve.Params().BitSize + 7) / 4 - if len(c) < (rLen + hLen + 1) { + if len(c) < (rLen + hLen + params.BlockSize) { return nil, ErrInvalidMessage } default: From 94710f79a21fb64299555a545113545677e5dfbe Mon Sep 17 00:00:00 2001 From: DeFi Junkie Date: Wed, 14 Jan 2026 13:51:48 +0300 Subject: [PATCH 236/277] accounts/keystore: fix panic in decryptPreSaleKey (#33602) Validate ciphertext length in decryptPreSaleKey, preventing runtime panics on invalid input. --- accounts/keystore/presale.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 0664dc2cdd..6311e8d90a 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -81,6 +81,9 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error */ passBytes := []byte(password) derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New) + if len(cipherText)%aes.BlockSize != 0 { + return nil, errors.New("ciphertext must be a multiple of block size") + } plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv) if err != nil { return nil, err From a9acb3ff93bb1319aa1a822f1c1f3b54c8c27b77 Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Wed, 14 Jan 2026 11:58:30 -0600 Subject: [PATCH 237/277] rpc, internal/telemetry: add OpenTelemetry tracing for JSON-RPC calls (#33452) Add Open Telemetry tracing inside the RPC server to help attribute runtime costs within `handler.handleCall()`. In particular, it allows us to distinguish time spent decoding arguments, invoking methods via reflection, and actually executing the method and constructing/encoding JSON responses. --------- Co-authored-by: lightclient --- cmd/keeper/go.mod | 2 +- cmd/keeper/go.sum | 12 +- go.mod | 18 ++- go.sum | 37 ++++-- internal/telemetry/telemetry.go | 104 +++++++++++++++++ rpc/client.go | 2 +- rpc/handler.go | 94 ++++++++++++---- rpc/server.go | 22 +++- rpc/service.go | 6 +- rpc/tracing_test.go | 192 ++++++++++++++++++++++++++++++++ 10 files changed, 436 insertions(+), 53 deletions(-) create mode 100644 internal/telemetry/telemetry.go create mode 100644 rpc/tracing_test.go diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index a42be042aa..cee1ce05a7 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -34,7 +34,7 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect golang.org/x/crypto v0.36.0 // indirect golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.36.0 // indirect + golang.org/x/sys v0.39.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index 133a3b10b1..b93969cc60 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -96,12 +96,12 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -118,8 +118,8 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= diff --git a/go.mod b/go.mod index 66f3a3ffa5..7bfb6d25d7 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.2 github.com/golang/snappy v1.0.0 github.com/google/gofuzz v1.2.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 @@ -56,16 +56,19 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.27.5 + go.opentelemetry.io/otel v1.39.0 + go.opentelemetry.io/otel/sdk v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 go.uber.org/automaxprocs v1.5.2 go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/sync v0.12.0 - golang.org/x/sys v0.36.0 + golang.org/x/sys v0.39.0 golang.org/x/text v0.23.0 golang.org/x/time v0.9.0 golang.org/x/tools v0.29.0 @@ -74,6 +77,13 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) +require ( + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect +) + require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect @@ -136,7 +146,7 @@ require ( github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect diff --git a/go.sum b/go.sum index ad066abc03..c9978a3d9e 100644 --- a/go.sum +++ b/go.sum @@ -136,6 +136,11 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -170,16 +175,16 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -322,8 +327,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -343,8 +348,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -363,6 +368,18 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBi github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -444,8 +461,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/internal/telemetry/telemetry.go b/internal/telemetry/telemetry.go new file mode 100644 index 0000000000..6bd16da66c --- /dev/null +++ b/internal/telemetry/telemetry.go @@ -0,0 +1,104 @@ +// Copyright 2026 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package telemetry + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.38.0" + "go.opentelemetry.io/otel/trace" +) + +// Attribute is an alias for attribute.KeyValue. +type Attribute = attribute.KeyValue + +// StringAttribute creates an attribute with a string value. +func StringAttribute(key, val string) Attribute { + return attribute.String(key, val) +} + +// Int64Attribute creates an attribute with an int64 value. +func Int64Attribute(key string, val int64) Attribute { + return attribute.Int64(key, val) +} + +// BoolAttribute creates an attribute with a bool value. +func BoolAttribute(key string, val bool) Attribute { + return attribute.Bool(key, val) +} + +// StartSpan creates a SpanKind=INTERNAL span. +func StartSpan(ctx context.Context, spanName string, attributes ...Attribute) (context.Context, trace.Span, func(error)) { + return StartSpanWithTracer(ctx, otel.Tracer(""), spanName, attributes...) +} + +// StartSpanWithTracer requires a tracer to be passed in and creates a SpanKind=INTERNAL span. +func StartSpanWithTracer(ctx context.Context, tracer trace.Tracer, name string, attributes ...Attribute) (context.Context, trace.Span, func(error)) { + return startSpan(ctx, tracer, trace.SpanKindInternal, name, attributes...) +} + +// RPCInfo contains information about the RPC request. +type RPCInfo struct { + System string + Service string + Method string + RequestID string +} + +// StartServerSpan creates a SpanKind=SERVER span at the JSON-RPC boundary. +// The span name is formatted as $rpcSystem.$rpcService/$rpcMethod +// (e.g. "jsonrpc.engine/newPayloadV4") which follows the Open Telemetry +// semantic convensions: https://opentelemetry.io/docs/specs/semconv/rpc/rpc-spans/#span-name. +func StartServerSpan(ctx context.Context, tracer trace.Tracer, rpc RPCInfo, others ...Attribute) (context.Context, func(error)) { + var ( + name = fmt.Sprintf("%s.%s/%s", rpc.System, rpc.Service, rpc.Method) + attributes = append([]Attribute{ + semconv.RPCSystemKey.String(rpc.System), + semconv.RPCServiceKey.String(rpc.Service), + semconv.RPCMethodKey.String(rpc.Method), + semconv.RPCJSONRPCRequestID(rpc.RequestID), + }, + others..., + ) + ) + ctx, _, end := startSpan(ctx, tracer, trace.SpanKindServer, name, attributes...) + return ctx, end +} + +// startSpan creates a span with the given kind. +func startSpan(ctx context.Context, tracer trace.Tracer, kind trace.SpanKind, spanName string, attributes ...Attribute) (context.Context, trace.Span, func(error)) { + ctx, span := tracer.Start(ctx, spanName, trace.WithSpanKind(kind)) + if len(attributes) > 0 { + span.SetAttributes(attributes...) + } + return ctx, span, endSpan(span) +} + +// endSpan ends the span and handles error recording. +func endSpan(span trace.Span) func(error) { + return func(err error) { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + span.End() + } +} diff --git a/rpc/client.go b/rpc/client.go index 9dc36a6105..8d81503d59 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -119,7 +119,7 @@ func (c *Client) newClientConn(conn ServerCodec) *clientConn { ctx := context.Background() ctx = context.WithValue(ctx, clientContextKey{}, c) ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize) + handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize, nil) return &clientConn{conn, handler} } diff --git a/rpc/handler.go b/rpc/handler.go index 462519d872..4ac3a26df1 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -28,7 +28,10 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/internal/telemetry" "github.com/ethereum/go-ethereum/log" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" ) // handler handles JSON-RPC messages. There is one handler per connection. Note that @@ -65,6 +68,7 @@ type handler struct { allowSubscribe bool batchRequestLimit int batchResponseMaxSize int + tracerProvider trace.TracerProvider subLock sync.Mutex serverSubs map[ID]*Subscription @@ -73,9 +77,10 @@ type handler struct { type callProc struct { ctx context.Context notifiers []*Notifier + isBatch bool } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int, tracerProvider trace.TracerProvider) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) h := &handler{ reg: reg, @@ -90,6 +95,7 @@ func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg * log: log.Root(), batchRequestLimit: batchRequestLimit, batchResponseMaxSize: batchResponseMaxSize, + tracerProvider: tracerProvider, } if conn.remoteAddr() != "" { h.log = h.log.New("conn", conn.remoteAddr()) @@ -197,6 +203,7 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { // Process calls on a goroutine because they may block indefinitely: h.startCallProc(func(cp *callProc) { + cp.isBatch = true var ( timer *time.Timer cancel context.CancelFunc @@ -497,40 +504,65 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage if msg.isSubscribe() { return h.handleSubscribe(cp, msg) } - var callb *callback if msg.isUnsubscribe() { - callb = h.unsubscribeCb - } else { - // Check method name length - if len(msg.Method) > maxMethodNameLength { - return msg.errorResponse(&invalidRequestError{fmt.Sprintf("method name too long: %d > %d", len(msg.Method), maxMethodNameLength)}) + args, err := parsePositionalArguments(msg.Params, h.unsubscribeCb.argTypes) + if err != nil { + return msg.errorResponse(&invalidParamsError{err.Error()}) } - callb = h.reg.callback(msg.Method) + return h.runMethod(cp.ctx, msg, h.unsubscribeCb, args) } + + // Check method name length + if len(msg.Method) > maxMethodNameLength { + return msg.errorResponse(&invalidRequestError{fmt.Sprintf("method name too long: %d > %d", len(msg.Method), maxMethodNameLength)}) + } + callb, service, method := h.reg.callback(msg.Method) + + // If the method is not found, return an error. if callb == nil { return msg.errorResponse(&methodNotFoundError{method: msg.Method}) } + // Start root span for the request. + var err error + rpcInfo := telemetry.RPCInfo{ + System: "jsonrpc", + Service: service, + Method: method, + RequestID: string(msg.ID), + } + attrib := []telemetry.Attribute{ + telemetry.BoolAttribute("rpc.batch", cp.isBatch), + } + ctx, spanEnd := telemetry.StartServerSpan(cp.ctx, h.tracer(), rpcInfo, attrib...) + defer spanEnd(err) + + // Start tracing span before parsing arguments. + _, _, pSpanEnd := telemetry.StartSpanWithTracer(ctx, h.tracer(), "rpc.parsePositionalArguments") args, err := parsePositionalArguments(msg.Params, callb.argTypes) + pSpanEnd(err) if err != nil { return msg.errorResponse(&invalidParamsError{err.Error()}) } start := time.Now() - answer := h.runMethod(cp.ctx, msg, callb, args) + + // Start tracing span before running the method. + rctx, _, rSpanEnd := telemetry.StartSpanWithTracer(ctx, h.tracer(), "rpc.runMethod") + answer := h.runMethod(rctx, msg, callb, args) + if answer.Error != nil { + err = errors.New(answer.Error.Message) + } + rSpanEnd(err) // Collect the statistics for RPC calls if metrics is enabled. - // We only care about pure rpc call. Filter out subscription. - if callb != h.unsubscribeCb { - rpcRequestGauge.Inc(1) - if answer.Error != nil { - failedRequestGauge.Inc(1) - } else { - successfulRequestGauge.Inc(1) - } - rpcServingTimer.UpdateSince(start) - updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start)) + rpcRequestGauge.Inc(1) + if answer.Error != nil { + failedRequestGauge.Inc(1) + } else { + successfulRequestGauge.Inc(1) } - + rpcServingTimer.UpdateSince(start) + updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start)) return answer } @@ -568,17 +600,33 @@ func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMes n := &Notifier{h: h, namespace: namespace} cp.notifiers = append(cp.notifiers, n) ctx := context.WithValue(cp.ctx, notifierKey{}, n) - return h.runMethod(ctx, msg, callb, args) } +// tracer returns the OpenTelemetry Tracer for RPC call tracing. +func (h *handler) tracer() trace.Tracer { + if h.tracerProvider == nil { + // Default to global TracerProvider if none is set. + // Note: If no TracerProvider is set, the default is a no-op TracerProvider. + // See https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider + return otel.Tracer("") + } + return h.tracerProvider.Tracer("") +} + // runMethod runs the Go callback for an RPC method. -func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *callback, args []reflect.Value) *jsonrpcMessage { +func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *callback, args []reflect.Value, attributes ...telemetry.Attribute) *jsonrpcMessage { result, err := callb.call(ctx, msg.Method, args) if err != nil { return msg.errorResponse(err) } - return msg.response(result) + _, _, spanEnd := telemetry.StartSpanWithTracer(ctx, h.tracer(), "rpc.encodeJSONResponse", attributes...) + response := msg.response(result) + if response.Error != nil { + err = errors.New(response.Error.Message) + } + spanEnd(err) + return response } // unsubscribe is the callback function for all *_unsubscribe calls. diff --git a/rpc/server.go b/rpc/server.go index 599e31fb41..94d4a3e13e 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -25,6 +25,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/log" + "go.opentelemetry.io/otel/trace" ) const MetadataApi = "rpc" @@ -55,15 +56,17 @@ type Server struct { batchResponseLimit int httpBodyLimit int wsReadLimit int64 + tracerProvider trace.TracerProvider } // NewServer creates a new server instance with no registered handlers. func NewServer() *Server { server := &Server{ - idgen: randomIDGenerator(), - codecs: make(map[ServerCodec]struct{}), - httpBodyLimit: defaultBodyLimit, - wsReadLimit: wsDefaultReadLimit, + idgen: randomIDGenerator(), + codecs: make(map[ServerCodec]struct{}), + httpBodyLimit: defaultBodyLimit, + wsReadLimit: wsDefaultReadLimit, + tracerProvider: nil, } server.run.Store(true) // Register the default service providing meta information about the RPC service such @@ -129,6 +132,15 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption) { c.Close() } +// setTracerProvider configures the OpenTelemetry TracerProvider for RPC call tracing. +// Note: This method (and the TracerProvider field in the Server/Handler struct) is +// primarily intended for testing. In particular, it allows tests to configure an +// isolated TracerProvider without changing the global provider, avoiding +// interference between tests running in parallel. +func (s *Server) setTracerProvider(tp trace.TracerProvider) { + s.tracerProvider = tp +} + func (s *Server) trackCodec(codec ServerCodec) bool { s.mutex.Lock() defer s.mutex.Unlock() @@ -156,7 +168,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit) + h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit, s.tracerProvider) h.allowSubscribe = false defer h.close(io.EOF, nil) diff --git a/rpc/service.go b/rpc/service.go index 0f62d7eb7c..8462a5a59a 100644 --- a/rpc/service.go +++ b/rpc/service.go @@ -92,14 +92,14 @@ func (r *serviceRegistry) registerName(name string, rcvr interface{}) error { } // callback returns the callback corresponding to the given RPC method name. -func (r *serviceRegistry) callback(method string) *callback { +func (r *serviceRegistry) callback(method string) (cb *callback, service, methodName string) { before, after, found := strings.Cut(method, serviceMethodSeparator) if !found { - return nil + return nil, "", "" } r.mu.Lock() defer r.mu.Unlock() - return r.services[before].callbacks[after] + return r.services[before].callbacks[after], before, after } // subscription returns a subscription callback in the given service. diff --git a/rpc/tracing_test.go b/rpc/tracing_test.go new file mode 100644 index 0000000000..89cd31a075 --- /dev/null +++ b/rpc/tracing_test.go @@ -0,0 +1,192 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "context" + "net/http/httptest" + "testing" + + "go.opentelemetry.io/otel/attribute" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +// attributeMap converts a slice of attributes to a map. +func attributeMap(attrs []attribute.KeyValue) map[string]string { + m := make(map[string]string) + for _, a := range attrs { + switch a.Value.Type() { + case attribute.STRING: + m[string(a.Key)] = a.Value.AsString() + case attribute.BOOL: + if a.Value.AsBool() { + m[string(a.Key)] = "true" + } else { + m[string(a.Key)] = "false" + } + default: + m[string(a.Key)] = a.Value.Emit() + } + } + return m +} + +// newTracingServer creates a new server with tracing enabled. +func newTracingServer(t *testing.T) (*Server, *sdktrace.TracerProvider, *tracetest.InMemoryExporter) { + t.Helper() + exporter := tracetest.NewInMemoryExporter() + tp := sdktrace.NewTracerProvider(sdktrace.WithSyncer(exporter)) + t.Cleanup(func() { _ = tp.Shutdown(context.Background()) }) + server := newTestServer() + server.setTracerProvider(tp) + t.Cleanup(server.Stop) + return server, tp, exporter +} + +// TestTracingHTTP verifies that RPC spans are emitted when processing HTTP requests. +func TestTracingHTTP(t *testing.T) { + t.Parallel() + server, tracer, exporter := newTracingServer(t) + httpsrv := httptest.NewServer(server) + t.Cleanup(httpsrv.Close) + client, err := DialHTTP(httpsrv.URL) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + t.Cleanup(client.Close) + + // Make a successful RPC call. + var result echoResult + if err := client.Call(&result, "test_echo", "hello", 42, &echoArgs{S: "world"}); err != nil { + t.Fatalf("RPC call failed: %v", err) + } + + // Flush and verify that we emitted the expected span. + if err := tracer.ForceFlush(context.Background()); err != nil { + t.Fatalf("failed to flush: %v", err) + } + spans := exporter.GetSpans() + if len(spans) == 0 { + t.Fatal("no spans were emitted") + } + var rpcSpan *tracetest.SpanStub + for i := range spans { + if spans[i].Name == "jsonrpc.test/echo" { + rpcSpan = &spans[i] + break + } + } + if rpcSpan == nil { + t.Fatalf("jsonrpc.test/echo span not found.") + } + attrs := attributeMap(rpcSpan.Attributes) + if attrs["rpc.system"] != "jsonrpc" { + t.Errorf("expected rpc.system=jsonrpc, got %v", attrs["rpc.system"]) + } + if attrs["rpc.service"] != "test" { + t.Errorf("expected rpc.service=test, got %v", attrs["rpc.service"]) + } + if attrs["rpc.method"] != "echo" { + t.Errorf("expected rpc.method=echo, got %v", attrs["rpc.method"]) + } + if _, ok := attrs["rpc.jsonrpc.request_id"]; !ok { + t.Errorf("expected rpc.jsonrpc.request_id attribute to be set") + } +} + +// TestTracingBatchHTTP verifies that RPC spans are emitted for batched JSON-RPC calls over HTTP. +func TestTracingBatchHTTP(t *testing.T) { + t.Parallel() + server, tracer, exporter := newTracingServer(t) + httpsrv := httptest.NewServer(server) + t.Cleanup(httpsrv.Close) + client, err := DialHTTP(httpsrv.URL) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + t.Cleanup(client.Close) + + // Make a successful batch RPC call. + batch := []BatchElem{ + { + Method: "test_echo", + Args: []any{"hello", 42, &echoArgs{S: "world"}}, + Result: new(echoResult), + }, + { + Method: "test_echo", + Args: []any{"your", 7, &echoArgs{S: "mom"}}, + Result: new(echoResult), + }, + } + if err := client.BatchCall(batch); err != nil { + t.Fatalf("batch RPC call failed: %v", err) + } + + // Flush and verify we emitted spans for each batch element. + if err := tracer.ForceFlush(context.Background()); err != nil { + t.Fatalf("failed to flush: %v", err) + } + spans := exporter.GetSpans() + if len(spans) == 0 { + t.Fatal("no spans were emitted") + } + var found int + for i := range spans { + if spans[i].Name == "jsonrpc.test/echo" { + attrs := attributeMap(spans[i].Attributes) + if attrs["rpc.system"] == "jsonrpc" && + attrs["rpc.service"] == "test" && + attrs["rpc.method"] == "echo" && + attrs["rpc.batch"] == "true" { + found++ + } + } + } + if found != len(batch) { + t.Fatalf("expected %d matching batch spans, got %d", len(batch), found) + } +} + +// TestTracingSubscribeUnsubscribe verifies that subscribe and unsubscribe calls +// do not emit any spans. +// Note: This works because client.newClientConn() passes nil as the tracer provider. +func TestTracingSubscribeUnsubscribe(t *testing.T) { + t.Parallel() + server, tracer, exporter := newTracingServer(t) + client := DialInProc(server) + t.Cleanup(client.Close) + + // Subscribe to notifications. + sub, err := client.Subscribe(context.Background(), "nftest", make(chan int), "someSubscription", 1, 1) + if err != nil { + t.Fatalf("subscribe failed: %v", err) + } + + // Unsubscribe. + sub.Unsubscribe() + + // Flush and check that no spans were emitted. + if err := tracer.ForceFlush(context.Background()); err != nil { + t.Fatalf("failed to flush: %v", err) + } + spans := exporter.GetSpans() + if len(spans) != 0 { + t.Errorf("expected no spans for subscribe/unsubscribe, got %d", len(spans)) + } +} From e3e556b266ce0c645002f80195ac786dd5d9f2f8 Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Wed, 14 Jan 2026 15:03:48 -0600 Subject: [PATCH 238/277] rpc: extract OpenTelemetry trace context from request headers (#33599) This PR adds support for the extraction of OpenTelemetry trace context from incoming JSON-RPC request headers, allowing geth spans to be linked to upstream traces when present. --------- Co-authored-by: lightclient --- rpc/http.go | 6 ++++++ rpc/tracing_test.go | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/rpc/http.go b/rpc/http.go index a74f36a1b0..55f0abfa72 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -30,6 +30,9 @@ import ( "strconv" "sync" "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" ) const ( @@ -334,6 +337,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo) + // Extract trace context from incoming headers. + ctx = otel.GetTextMapPropagator().Extract(ctx, propagation.HeaderCarrier(r.Header)) + // All checks passed, create a codec that reads directly from the request body // until EOF, writes the response to w, and orders the server to process a // single request. diff --git a/rpc/tracing_test.go b/rpc/tracing_test.go index 89cd31a075..f32a647e6f 100644 --- a/rpc/tracing_test.go +++ b/rpc/tracing_test.go @@ -21,7 +21,9 @@ import ( "net/http/httptest" "testing" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/propagation" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" ) @@ -60,16 +62,33 @@ func newTracingServer(t *testing.T) (*Server, *sdktrace.TracerProvider, *tracete // TestTracingHTTP verifies that RPC spans are emitted when processing HTTP requests. func TestTracingHTTP(t *testing.T) { - t.Parallel() + // Not parallel: this test modifies the global otel TextMapPropagator. + + // Set up a propagator to extract W3C Trace Context headers. + originalPropagator := otel.GetTextMapPropagator() + otel.SetTextMapPropagator(propagation.TraceContext{}) + t.Cleanup(func() { otel.SetTextMapPropagator(originalPropagator) }) + server, tracer, exporter := newTracingServer(t) httpsrv := httptest.NewServer(server) t.Cleanup(httpsrv.Close) + + // Define the expected trace and span IDs for context propagation. + const ( + traceID = "4bf92f3577b34da6a3ce929d0e0e4736" + parentSpanID = "00f067aa0ba902b7" + traceparent = "00-" + traceID + "-" + parentSpanID + "-01" + ) + client, err := DialHTTP(httpsrv.URL) if err != nil { t.Fatalf("failed to dial: %v", err) } t.Cleanup(client.Close) + // Set trace context headers. + client.SetHeader("traceparent", traceparent) + // Make a successful RPC call. var result echoResult if err := client.Call(&result, "test_echo", "hello", 42, &echoArgs{S: "world"}); err != nil { @@ -92,8 +111,10 @@ func TestTracingHTTP(t *testing.T) { } } if rpcSpan == nil { - t.Fatalf("jsonrpc.test/echo span not found.") + t.Fatalf("jsonrpc.test/echo span not found") } + + // Verify span attributes. attrs := attributeMap(rpcSpan.Attributes) if attrs["rpc.system"] != "jsonrpc" { t.Errorf("expected rpc.system=jsonrpc, got %v", attrs["rpc.system"]) @@ -107,6 +128,17 @@ func TestTracingHTTP(t *testing.T) { if _, ok := attrs["rpc.jsonrpc.request_id"]; !ok { t.Errorf("expected rpc.jsonrpc.request_id attribute to be set") } + + // Verify the span's parent matches the traceparent header values. + if got := rpcSpan.Parent.TraceID().String(); got != traceID { + t.Errorf("parent trace ID mismatch: got %s, want %s", got, traceID) + } + if got := rpcSpan.Parent.SpanID().String(); got != parentSpanID { + t.Errorf("parent span ID mismatch: got %s, want %s", got, parentSpanID) + } + if !rpcSpan.Parent.IsRemote() { + t.Error("expected parent span context to be marked as remote") + } } // TestTracingBatchHTTP verifies that RPC spans are emitted for batched JSON-RPC calls over HTTP. From 494908a8523af0e67d22d7930df15787ca5776b2 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 15 Jan 2026 17:28:57 +0800 Subject: [PATCH 239/277] triedb/pathdb: change the bitmap to big endian (#33584) The bitmap is used in compact-encoded trie nodes to indicate which elements have been modified. The bitmap format has been updated to use big-endian encoding. Bit positions are numbered from 0 to 15, where position 0 corresponds to the most significant bit of b[0], and position 15 corresponds to the least significant bit of b[1]. --- triedb/pathdb/history_trienode_utils.go | 23 ++++++++++ triedb/pathdb/history_trienode_utils_test.go | 45 ++++++++++++++++++++ triedb/pathdb/nodes.go | 12 +----- 3 files changed, 70 insertions(+), 10 deletions(-) diff --git a/triedb/pathdb/history_trienode_utils.go b/triedb/pathdb/history_trienode_utils.go index 0513343404..241b8a7d3c 100644 --- a/triedb/pathdb/history_trienode_utils.go +++ b/triedb/pathdb/history_trienode_utils.go @@ -19,6 +19,7 @@ package pathdb import ( "encoding/binary" "fmt" + "math/bits" "slices" ) @@ -81,3 +82,25 @@ func isBitSet(b []byte, index int) bool { func setBit(b []byte, index int) { b[index/8] |= 1 << (7 - index%8) } + +// bitPosTwoBytes returns the positions of set bits in a 2-byte bitmap. +// +// The bitmap is interpreted as a big-endian uint16. Bit positions are +// numbered from 0 to 15, where position 0 corresponds to the most +// significant bit of b[0], and position 15 corresponds to the least +// significant bit of b[1]. +func bitPosTwoBytes(b []byte) []int { + if len(b) != 2 { + panic("expect 2 bytes") + } + var ( + pos []int + mask = binary.BigEndian.Uint16(b) + ) + for mask != 0 { + p := bits.LeadingZeros16(mask) + pos = append(pos, p) + mask &^= 1 << (15 - p) + } + return pos +} diff --git a/triedb/pathdb/history_trienode_utils_test.go b/triedb/pathdb/history_trienode_utils_test.go index 17eabb2a98..c3bd0d5b1f 100644 --- a/triedb/pathdb/history_trienode_utils_test.go +++ b/triedb/pathdb/history_trienode_utils_test.go @@ -18,6 +18,7 @@ package pathdb import ( "bytes" + "reflect" "testing" ) @@ -79,3 +80,47 @@ func TestBitmapSet(t *testing.T) { } } } + +func TestBitPositions(t *testing.T) { + suites := []struct { + input []byte + expect []int + }{ + { + []byte{0b10000000, 0x0}, []int{0}, + }, + { + []byte{0b01000000, 0x0}, []int{1}, + }, + { + []byte{0b00000001, 0x0}, []int{7}, + }, + { + []byte{0b00000000, 0b10000000}, []int{8}, + }, + { + []byte{0b00000000, 0b00000001}, []int{15}, + }, + { + []byte{0b10000000, 0b00000001}, []int{0, 15}, + }, + { + []byte{0b10000001, 0b00000001}, []int{0, 7, 15}, + }, + { + []byte{0b10000001, 0b10000001}, []int{0, 7, 8, 15}, + }, + { + []byte{0b11111111, 0b11111111}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + }, + { + []byte{0x0, 0x0}, nil, + }, + } + for _, tc := range suites { + got := bitPosTwoBytes(tc.input) + if !reflect.DeepEqual(got, tc.expect) { + t.Fatalf("Unexpected position set, want: %v, got: %v", tc.expect, got) + } + } +} diff --git a/triedb/pathdb/nodes.go b/triedb/pathdb/nodes.go index 4eede439e4..b7290ed235 100644 --- a/triedb/pathdb/nodes.go +++ b/triedb/pathdb/nodes.go @@ -500,8 +500,7 @@ func encodeNodeCompressed(addExtension bool, elements [][]byte, indices []int) [ flag |= 1 << 4 // Use the reserved flagE continue } - bitIndex := uint(pos % 8) - bitmap[pos/8] |= 1 << bitIndex + setBit(bitmap, pos) } enc = append(enc, flag) enc = append(enc, bitmap...) @@ -553,14 +552,7 @@ func decodeNodeCompressed(data []byte) ([][]byte, []int, error) { return nil, nil, errors.New("invalid data: too short") } bitmap := data[1:3] - for index, b := range bitmap { - for bitIdx := 0; bitIdx < 8; bitIdx++ { - if b&(1< Date: Thu, 15 Jan 2026 19:37:34 +0100 Subject: [PATCH 240/277] eth/fetcher: refactor test code (#33610) Remove a large amount of duplicate code from the tx_fetcher tests. --------- Signed-off-by: Csaba Kiraly Co-authored-by: lightclient --- eth/fetcher/tx_fetcher_test.go | 379 ++++++++------------------------- 1 file changed, 92 insertions(+), 287 deletions(-) diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 58f5fd3e3d..87fbe9f38c 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -87,6 +87,19 @@ type txFetcherTest struct { steps []interface{} } +// newTestTxFetcher creates a tx fetcher with noop callbacks, simulated clock, +// and deterministic randomness. +func newTestTxFetcher() *TxFetcher { + return NewTxFetcher( + func(common.Hash, byte) error { return nil }, + func(txs []*types.Transaction) []error { + return make([]error, len(txs)) + }, + func(string, []common.Hash) error { return nil }, + nil, + ) +} + // Tests that transaction announcements with associated metadata are added to a // waitlist, and none of them are scheduled for retrieval until the wait expires. // @@ -95,14 +108,7 @@ type txFetcherTest struct { // with all the useless extra fields. func TestTransactionFetcherWaiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Initial announcement to get something into the waitlist doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, @@ -297,14 +303,7 @@ func TestTransactionFetcherWaiting(t *testing.T) { // already scheduled. func TestTransactionFetcherSkipWaiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{ @@ -387,14 +386,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) { // and subsequent announces block or get allotted to someone else. func TestTransactionFetcherSingletonRequesting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, @@ -493,15 +485,12 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) { proceed := make(chan struct{}) testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(origin string, hashes []common.Hash) error { - <-proceed - return errors.New("peer disconnected") - }, - nil, - ) + f := newTestTxFetcher() + f.fetchTxs = func(origin string, hashes []common.Hash) error { + <-proceed + return errors.New("peer disconnected") + } + return f }, steps: []interface{}{ // Push an initial announcement through to the scheduled stage @@ -576,16 +565,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) { // are cleaned up. func TestTransactionFetcherCleanup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, @@ -620,16 +600,7 @@ func TestTransactionFetcherCleanup(t *testing.T) { // this was a bug)). func TestTransactionFetcherCleanupEmpty(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, @@ -663,16 +634,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) { // different peer, or self if they are after the cutoff point. func TestTransactionFetcherMissingRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{peer: "A", @@ -724,16 +686,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) { // delivered, the peer gets properly cleaned out from the internal state. func TestTransactionFetcherMissingCleanup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{peer: "A", @@ -773,16 +726,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) { // Tests that transaction broadcasts properly clean up announcements. func TestTransactionFetcherBroadcasts(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Set up three transactions to be in different stats, waiting, queued and fetching doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, @@ -829,14 +773,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) { // Tests that the waiting list timers properly reset and reschedule. func TestTransactionFetcherWaitTimerResets(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, isWaiting(map[string][]announce{ @@ -899,16 +836,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) { // out and be re-scheduled for someone else. func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Push an initial announcement through to the scheduled stage doTxNotify{ @@ -977,14 +905,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { // Tests that the fetching timeout timers properly reset and reschedule. func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, doWait{time: txArriveTimeout, step: true}, @@ -1055,14 +976,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) { }) } testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Announce all the transactions, wait a bit and ensure only a small // percentage gets requested @@ -1085,14 +999,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) { // be requested at a time, to keep the responses below a reasonable level. func TestTransactionFetcherBandwidthLimiting(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Announce mid size transactions from A to verify that multiple // ones can be piled into a single request. @@ -1202,14 +1109,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) { }) } testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Announce half of the transaction and wait for them to be scheduled doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2], types: typesA[:maxTxAnnounces/2], sizes: sizesA[:maxTxAnnounces/2]}, @@ -1270,24 +1170,21 @@ func TestTransactionFetcherDoSProtection(t *testing.T) { func TestTransactionFetcherUnderpricedDedup(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - errs := make([]error, len(txs)) - for i := 0; i < len(errs); i++ { - if i%3 == 0 { - errs[i] = txpool.ErrUnderpriced - } else if i%3 == 1 { - errs[i] = txpool.ErrReplaceUnderpriced - } else { - errs[i] = txpool.ErrTxGasPriceTooLow - } + f := newTestTxFetcher() + f.addTxs = func(txs []*types.Transaction) []error { + errs := make([]error, len(txs)) + for i := 0; i < len(errs); i++ { + if i%3 == 0 { + errs[i] = txpool.ErrUnderpriced + } else if i%3 == 1 { + errs[i] = txpool.ErrReplaceUnderpriced + } else { + errs[i] = txpool.ErrTxGasPriceTooLow } - return errs - }, - func(string, []common.Hash) error { return nil }, - nil, - ) + } + return errs + } + return f }, steps: []interface{}{ // Deliver a transaction through the fetcher, but reject as underpriced @@ -1371,18 +1268,15 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { } testTransactionFetcher(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - errs := make([]error, len(txs)) - for i := 0; i < len(errs); i++ { - errs[i] = txpool.ErrUnderpriced - } - return errs - }, - func(string, []common.Hash) error { return nil }, - nil, - ) + f := newTestTxFetcher() + f.addTxs = func(txs []*types.Transaction) []error { + errs := make([]error, len(txs)) + for i := 0; i < len(errs); i++ { + errs[i] = txpool.ErrUnderpriced + } + return errs + } + return f }, steps: append(steps, []interface{}{ // The preparation of the test has already been done in `steps`, add the last check @@ -1402,16 +1296,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { // Tests that unexpected deliveries don't corrupt the internal state. func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Deliver something out of the blue isWaiting(nil), @@ -1461,16 +1346,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { // live or dangling stages. func TestTransactionFetcherDrop(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Set up a few hashes into various stages doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, @@ -1535,16 +1411,7 @@ func TestTransactionFetcherDrop(t *testing.T) { // available peer. func TestTransactionFetcherDropRescheduling(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Set up a few hashes into various stages doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, @@ -1582,14 +1449,9 @@ func TestInvalidAnnounceMetadata(t *testing.T) { drop := make(chan string, 2) testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - func(peer string) { drop <- peer }, - ) + f := newTestTxFetcher() + f.dropPeer = func(peer string) { drop <- peer } + return f }, steps: []interface{}{ // Initial announcement to get something into the waitlist @@ -1664,16 +1526,7 @@ func TestInvalidAnnounceMetadata(t *testing.T) { // announced one. func TestTransactionFetcherFuzzCrash01(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Get a transaction into fetching mode and make it dangling with a broadcast doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, @@ -1692,16 +1545,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) { // concurrently announced one. func TestTransactionFetcherFuzzCrash02(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Get a transaction into fetching mode and make it dangling with a broadcast doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, @@ -1722,16 +1566,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) { // with a concurrent notify. func TestTransactionFetcherFuzzCrash03(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Get a transaction into fetching mode and make it dangling with a broadcast doTxNotify{ @@ -1762,17 +1597,12 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { - <-proceed - return errors.New("peer disconnected") - }, - nil, - ) + f := newTestTxFetcher() + f.fetchTxs = func(string, []common.Hash) error { + <-proceed + return errors.New("peer disconnected") + } + return f }, steps: []interface{}{ // Get a transaction into fetching mode and make it dangling with a broadcast @@ -1796,14 +1626,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) { // once they are announced in the network. func TestBlobTransactionAnnounce(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - nil, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ // Initial announcement to get something into the waitlist doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, @@ -1864,16 +1687,7 @@ func TestBlobTransactionAnnounce(t *testing.T) { func TestTransactionFetcherDropAlternates(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ - init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) - }, + init: newTestTxFetcher, steps: []interface{}{ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, doWait{time: txArriveTimeout, step: true}, @@ -1915,20 +1729,15 @@ func TestTransactionFetcherDropAlternates(t *testing.T) { func TestTransactionFetcherWrongMetadata(t *testing.T) { testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(_ common.Hash, kind byte) error { - switch kind { - case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: - return nil - } - return types.ErrTxTypeNotSupported - }, - func(txs []*types.Transaction) []error { - return make([]error, len(txs)) - }, - func(string, []common.Hash) error { return nil }, - nil, - ) + f := newTestTxFetcher() + f.validateMeta = func(name common.Hash, kind byte) error { + switch kind { + case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType: + return nil + } + return types.ErrTxTypeNotSupported + } + return f }, steps: []interface{}{ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{0xff, types.LegacyTxType}, sizes: []uint32{111, 222}}, @@ -1976,20 +1785,16 @@ func TestTransactionProtocolViolation(t *testing.T) { ) testTransactionFetcherParallel(t, txFetcherTest{ init: func() *TxFetcher { - return NewTxFetcher( - func(common.Hash, byte) error { return nil }, - func(txs []*types.Transaction) []error { - var errs []error - for range txs { - errs = append(errs, txpool.ErrKZGVerificationError) - } - return errs - }, - func(a string, b []common.Hash) error { - return nil - }, - func(peer string) { drop <- struct{}{} }, - ) + f := newTestTxFetcher() + f.addTxs = func(txs []*types.Transaction) []error { + var errs []error + for range txs { + errs = append(errs, txpool.ErrKZGVerificationError) + } + return errs + } + f.dropPeer = func(string) { drop <- struct{}{} } + return f }, steps: []interface{}{ // Initial announcement to get something into the waitlist From 23c3498836f66f2ec4f33efa3917be1968d6518a Mon Sep 17 00:00:00 2001 From: jwasinger Date: Fri, 16 Jan 2026 07:55:43 +0900 Subject: [PATCH 241/277] core/vm: check if read-only in gas handlers (#33281) This PR causes execution to terminate at the gas handler in the case of sstore/call if they are invoked in a static execution context. This aligns the behavior with EIP 7928 by ensuring that we don't record any state reads in the access list from an SSTORE/CALL in this circumstance. --------- Co-authored-by: lightclient --- core/vm/gas_table.go | 14 ++++++++++++++ core/vm/instructions.go | 12 ------------ core/vm/operations_acl.go | 20 +++++++++++++++++++- 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index c7c1274bf2..23a2cbbf4d 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -97,6 +97,9 @@ var ( ) func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + if evm.readOnly { + return 0, ErrWriteProtection + } var ( y, x = stack.Back(1), stack.Back(0) current, original = evm.StateDB.GetStateAndCommittedState(contract.Address(), x.Bytes32()) @@ -181,6 +184,9 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. // (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + if evm.readOnly { + return 0, ErrWriteProtection + } // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { return 0, errors.New("not enough gas for reentrancy sentry") @@ -374,6 +380,10 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize transfersValue = !stack.Back(2).IsZero() address = common.Address(stack.Back(1).Bytes20()) ) + if evm.readOnly && transfersValue { + return 0, ErrWriteProtection + } + if evm.chainRules.IsEIP158 { if transfersValue && evm.StateDB.Empty(address) { gas += params.CallNewAccountGas @@ -462,6 +472,10 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo } func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + if evm.readOnly { + return 0, ErrWriteProtection + } + var gas uint64 // EIP150 homestead gas reprice fork: if evm.chainRules.IsEIP150 { diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 6b04a2daff..91886d939d 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -518,9 +518,6 @@ func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { - if evm.readOnly { - return nil, ErrWriteProtection - } loc := scope.Stack.pop() val := scope.Stack.pop() evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) @@ -743,9 +740,6 @@ func opCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { // Get the arguments from the memory. args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64()) - if evm.readOnly && !value.IsZero() { - return nil, ErrWriteProtection - } if !value.IsZero() { gas += params.CallStipend } @@ -882,9 +876,6 @@ func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { - if evm.readOnly { - return nil, ErrWriteProtection - } beneficiary := scope.Stack.pop() balance := evm.StateDB.GetBalance(scope.Contract.Address()) evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct) @@ -901,9 +892,6 @@ func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { - if evm.readOnly { - return nil, ErrWriteProtection - } beneficiary := scope.Stack.pop() balance := evm.StateDB.GetBalance(scope.Contract.Address()) evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct) diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 085b018e4c..26ff411bd2 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -28,6 +28,9 @@ import ( func makeGasSStoreFunc(clearingRefund uint64) gasFunc { return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + if evm.readOnly { + return 0, ErrWriteProtection + } // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { return 0, errors.New("not enough gas for reentrancy sentry") @@ -226,6 +229,9 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { gas uint64 address = common.Address(stack.peek().Bytes20()) ) + if evm.readOnly { + return 0, ErrWriteProtection + } if !evm.StateDB.AddressInAccessList(address) { // If the caller cannot afford the cost, this change will be rolled back evm.StateDB.AddAddressToAccessList(address) @@ -244,12 +250,24 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { } var ( - gasCallEIP7702 = makeCallVariantGasCallEIP7702(gasCall) + innerGasCallEIP7702 = makeCallVariantGasCallEIP7702(gasCall) gasDelegateCallEIP7702 = makeCallVariantGasCallEIP7702(gasDelegateCall) gasStaticCallEIP7702 = makeCallVariantGasCallEIP7702(gasStaticCall) gasCallCodeEIP7702 = makeCallVariantGasCallEIP7702(gasCallCode) ) +func gasCallEIP7702(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // Return early if this call attempts to transfer value in a static context. + // Although it's checked in `gasCall`, EIP-7702 loads the target's code before + // to determine if it is resolving a delegation. This could incorrectly record + // the target in the block access list (BAL) if the call later fails. + transfersValue := !stack.Back(2).IsZero() + if evm.readOnly && transfersValue { + return 0, ErrWriteProtection + } + return innerGasCallEIP7702(evm, contract, stack, mem, memorySize) +} + func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc { return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { var ( From b6fb79cdf977e0652c531c0019b0dac1ae01a375 Mon Sep 17 00:00:00 2001 From: jwasinger Date: Fri, 16 Jan 2026 23:37:12 +0900 Subject: [PATCH 242/277] core/vm: in selfdestruct gas calculation, return early if there isn't enough gas to cover cold account access costs (#33450) There's no need to perform the subsequent state access on the target if we already know that we are out of gas. This aligns the state access behavior of selfdestruct with EIP-7928 --- core/vm/operations_acl.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 26ff411bd2..4b7b87503d 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -236,6 +236,10 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { // If the caller cannot afford the cost, this change will be rolled back evm.StateDB.AddAddressToAccessList(address) gas = params.ColdAccountAccessCostEIP2929 + + if contract.Gas < gas { + return gas, nil + } } // if empty and transfers value if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { From 715bf8e81e143255686a9e488a0fb87193cb2d17 Mon Sep 17 00:00:00 2001 From: jwasinger Date: Sat, 17 Jan 2026 07:10:08 +0900 Subject: [PATCH 243/277] core: invoke selfdestruct tracer hooks during finalisation (#32919) The core part of this PR that we need to adopt is to move the code and nonce change hook invocations to occur at tx finalization, instead of when the selfdestruct opcode is called. Additionally: * remove `SelfDestruct6780` now that it is essentially the same as `SelfDestruct` just gated by `is new contract` * don't duplicate `BalanceIncreaseSelfdestruct` (transfer to recipient of selfdestruct) in the hooked statedb and in the opcode handler for the selfdestruct opcode. * balance is burned immediately when the beneficiary of the selfdestruct is the sender, and the contract was created in the same transaction. Previously we emit two balance increases to the recipient (see above point), and a balance decrease from the sender. --------- Co-authored-by: Sina Mahmoodi Co-authored-by: Gary Rong Co-authored-by: lightclient --- core/state/statedb.go | 34 +- core/state/statedb_hooked.go | 94 ++- core/state/statedb_hooked_test.go | 7 +- .../gen_nonce_change_reason_stringer.go | 5 +- core/tracing/hooks.go | 3 + core/vm/instructions.go | 53 +- core/vm/interface.go | 14 +- .../tracetest/selfdestruct_state_test.go | 653 ++++++++++++++++++ .../selfdestruct_test_contracts/contractA.yul | 18 + .../selfdestruct_test_contracts/contractB.yul | 14 + .../contractSelfDestruct.yul | 12 + .../coordinator.yul | 20 + .../coordinatorSendAfter.yul | 27 + .../factoryRefund.yul | 28 + .../factorySelfDestructBalanceCheck.yul | 35 + 15 files changed, 918 insertions(+), 99 deletions(-) create mode 100644 eth/tracers/internal/tracetest/selfdestruct_state_test.go create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractA.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractB.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractSelfDestruct.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinator.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinatorSendAfter.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/factoryRefund.yul create mode 100644 eth/tracers/internal/tracetest/selfdestruct_test_contracts/factorySelfDestructBalanceCheck.yul diff --git a/core/state/statedb.go b/core/state/statedb.go index fbfb02e8e4..39160aa1c7 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -509,21 +509,13 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common } // SelfDestruct marks the given account as selfdestructed. -// This clears the account balance. // // The account's state object is still available until the state is committed, // getStateObject will return a non-nil account after SelfDestruct. -func (s *StateDB) SelfDestruct(addr common.Address) uint256.Int { +func (s *StateDB) SelfDestruct(addr common.Address) { stateObject := s.getStateObject(addr) - var prevBalance uint256.Int if stateObject == nil { - return prevBalance - } - prevBalance = *(stateObject.Balance()) - // Regardless of whether it is already destructed or not, we do have to - // journal the balance-change, if we set it to zero here. - if !stateObject.Balance().IsZero() { - stateObject.SetBalance(new(uint256.Int)) + return } // If it is already marked as self-destructed, we do not need to add it // for journalling a second time. @@ -531,18 +523,6 @@ func (s *StateDB) SelfDestruct(addr common.Address) uint256.Int { s.journal.destruct(addr) stateObject.markSelfdestructed() } - return prevBalance -} - -func (s *StateDB) SelfDestruct6780(addr common.Address) (uint256.Int, bool) { - stateObject := s.getStateObject(addr) - if stateObject == nil { - return uint256.Int{}, false - } - if stateObject.newContract { - return s.SelfDestruct(addr), true - } - return *(stateObject.Balance()), false } // SetTransientState sets transient storage for a given account. It @@ -670,6 +650,16 @@ func (s *StateDB) CreateContract(addr common.Address) { } } +// IsNewContract reports whether the contract at the given address was deployed +// during the current transaction. +func (s *StateDB) IsNewContract(addr common.Address) bool { + obj := s.getStateObject(addr) + if obj == nil { + return false + } + return obj.newContract +} + // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 33a2016784..4ffa69b419 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -52,6 +52,10 @@ func (s *hookedStateDB) CreateContract(addr common.Address) { s.inner.CreateContract(addr) } +func (s *hookedStateDB) IsNewContract(addr common.Address) bool { + return s.inner.IsNewContract(addr) +} + func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int { return s.inner.GetBalance(addr) } @@ -211,56 +215,8 @@ func (s *hookedStateDB) SetState(address common.Address, key common.Hash, value return prev } -func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int { - var prevCode []byte - var prevCodeHash common.Hash - - if s.hooks.OnCodeChange != nil || s.hooks.OnCodeChangeV2 != nil { - prevCode = s.inner.GetCode(address) - prevCodeHash = s.inner.GetCodeHash(address) - } - - prev := s.inner.SelfDestruct(address) - - if s.hooks.OnBalanceChange != nil && !prev.IsZero() { - s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct) - } - - if len(prevCode) > 0 { - if s.hooks.OnCodeChangeV2 != nil { - s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) - } else if s.hooks.OnCodeChange != nil { - s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) - } - } - - return prev -} - -func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, bool) { - var prevCode []byte - var prevCodeHash common.Hash - - if s.hooks.OnCodeChange != nil || s.hooks.OnCodeChangeV2 != nil { - prevCodeHash = s.inner.GetCodeHash(address) - prevCode = s.inner.GetCode(address) - } - - prev, changed := s.inner.SelfDestruct6780(address) - - if s.hooks.OnBalanceChange != nil && !prev.IsZero() { - s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct) - } - - if changed && len(prevCode) > 0 { - if s.hooks.OnCodeChangeV2 != nil { - s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) - } else if s.hooks.OnCodeChange != nil { - s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) - } - } - - return prev, changed +func (s *hookedStateDB) SelfDestruct(address common.Address) { + s.inner.SelfDestruct(address) } func (s *hookedStateDB) AddLog(log *types.Log) { @@ -272,17 +228,47 @@ func (s *hookedStateDB) AddLog(log *types.Log) { } func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) { - defer s.inner.Finalise(deleteEmptyObjects) - if s.hooks.OnBalanceChange == nil { + if s.hooks.OnBalanceChange == nil && s.hooks.OnNonceChangeV2 == nil && s.hooks.OnNonceChange == nil && s.hooks.OnCodeChangeV2 == nil && s.hooks.OnCodeChange == nil { + // Short circuit if no relevant hooks are set. + s.inner.Finalise(deleteEmptyObjects) return } + + // Iterate all dirty addresses and record self-destructs. for addr := range s.inner.journal.dirties { obj := s.inner.stateObjects[addr] - if obj != nil && obj.selfDestructed { - // If ether was sent to account post-selfdestruct it is burnt. + if obj == nil || !obj.selfDestructed { + // Not self-destructed, keep searching. + continue + } + // Bingo: state object was self-destructed, call relevant hooks. + + // If ether was sent to account post-selfdestruct, record as burnt. + if s.hooks.OnBalanceChange != nil { if bal := obj.Balance(); bal.Sign() != 0 { s.hooks.OnBalanceChange(addr, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn) } } + + // Nonce is set to reset on self-destruct. + if s.hooks.OnNonceChangeV2 != nil { + s.hooks.OnNonceChangeV2(addr, obj.Nonce(), 0, tracing.NonceChangeSelfdestruct) + } else if s.hooks.OnNonceChange != nil { + s.hooks.OnNonceChange(addr, obj.Nonce(), 0) + } + + // If an initcode invokes selfdestruct, do not emit a code change. + prevCodeHash := s.inner.GetCodeHash(addr) + if prevCodeHash == types.EmptyCodeHash { + continue + } + // Otherwise, trace the change. + if s.hooks.OnCodeChangeV2 != nil { + s.hooks.OnCodeChangeV2(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) + } else if s.hooks.OnCodeChange != nil { + s.hooks.OnCodeChange(addr, prevCodeHash, s.inner.GetCode(addr), types.EmptyCodeHash, nil) + } } + + s.inner.Finalise(deleteEmptyObjects) } diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go index 4d85e61679..6fe17ec1b4 100644 --- a/core/state/statedb_hooked_test.go +++ b/core/state/statedb_hooked_test.go @@ -49,6 +49,8 @@ func TestBurn(t *testing.T) { createAndDestroy := func(addr common.Address) { hooked.AddBalance(addr, uint256.NewInt(100), tracing.BalanceChangeUnspecified) hooked.CreateContract(addr) + // Simulate what the opcode handler does: clear balance before selfdestruct + hooked.SubBalance(addr, hooked.GetBalance(addr), tracing.BalanceDecreaseSelfdestruct) hooked.SelfDestruct(addr) // sanity-check that balance is now 0 if have, want := hooked.GetBalance(addr), new(uint256.Int); !have.Eq(want) { @@ -140,8 +142,8 @@ func TestHooks_OnCodeChangeV2(t *testing.T) { var result []string var wants = []string{ "0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728) ContractCreation", - "0xaa00000000000000000000000000000000000000.code: 0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728) -> (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) SelfDestruct", "0xbb00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1326 (0x3c54516221d604e623f358bc95996ca3242aaa109bddabcebda13db9b3f90dcb) ContractCreation", + "0xaa00000000000000000000000000000000000000.code: 0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728) -> (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) SelfDestruct", "0xbb00000000000000000000000000000000000000.code: 0x1326 (0x3c54516221d604e623f358bc95996ca3242aaa109bddabcebda13db9b3f90dcb) -> (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) SelfDestruct", } emitF := func(format string, a ...any) { @@ -157,7 +159,8 @@ func TestHooks_OnCodeChangeV2(t *testing.T) { sdb.SetCode(common.Address{0xbb}, []byte{0x13, 38}, tracing.CodeChangeContractCreation) sdb.CreateContract(common.Address{0xbb}) - sdb.SelfDestruct6780(common.Address{0xbb}) + sdb.SelfDestruct(common.Address{0xbb}) + sdb.Finalise(true) if len(result) != len(wants) { t.Fatalf("number of tracing events wrong, have %d want %d", len(result), len(wants)) diff --git a/core/tracing/gen_nonce_change_reason_stringer.go b/core/tracing/gen_nonce_change_reason_stringer.go index f775c1f3a6..cd19200db8 100644 --- a/core/tracing/gen_nonce_change_reason_stringer.go +++ b/core/tracing/gen_nonce_change_reason_stringer.go @@ -15,11 +15,12 @@ func _() { _ = x[NonceChangeNewContract-4] _ = x[NonceChangeAuthorization-5] _ = x[NonceChangeRevert-6] + _ = x[NonceChangeSelfdestruct-7] } -const _NonceChangeReason_name = "UnspecifiedGenesisEoACallContractCreatorNewContractAuthorizationRevert" +const _NonceChangeReason_name = "UnspecifiedGenesisEoACallContractCreatorNewContractAuthorizationRevertSelfdestruct" -var _NonceChangeReason_index = [...]uint8{0, 11, 18, 25, 40, 51, 64, 70} +var _NonceChangeReason_index = [...]uint8{0, 11, 18, 25, 40, 51, 64, 70, 82} func (i NonceChangeReason) String() string { if i >= NonceChangeReason(len(_NonceChangeReason_index)-1) { diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index d17b94cf9c..c85abe6482 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -432,6 +432,9 @@ const ( // NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure. // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). NonceChangeRevert NonceChangeReason = 6 + + // NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct + NonceChangeSelfdestruct NonceChangeReason = 7 ) // CodeChangeReason is used to indicate the reason for a code change. diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 91886d939d..958cf9dedc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -876,13 +876,25 @@ func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { - beneficiary := scope.Stack.pop() - balance := evm.StateDB.GetBalance(scope.Contract.Address()) - evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct) - evm.StateDB.SelfDestruct(scope.Contract.Address()) + var ( + this = scope.Contract.Address() + balance = evm.StateDB.GetBalance(this) + top = scope.Stack.pop() + beneficiary = common.Address(top.Bytes20()) + ) + + // The funds are burned immediately if the beneficiary is the caller itself, + // in this case, the beneficiary's balance is not increased. + if this != beneficiary { + evm.StateDB.AddBalance(beneficiary, balance, tracing.BalanceIncreaseSelfdestruct) + } + // Clear any leftover funds for the account being destructed. + evm.StateDB.SubBalance(this, balance, tracing.BalanceDecreaseSelfdestruct) + evm.StateDB.SelfDestruct(this) + if tracer := evm.Config.Tracer; tracer != nil { if tracer.OnEnter != nil { - tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig()) + tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), this, beneficiary, []byte{}, 0, balance.ToBig()) } if tracer.OnExit != nil { tracer.OnExit(evm.depth, []byte{}, 0, nil, false) @@ -892,14 +904,33 @@ func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { - beneficiary := scope.Stack.pop() - balance := evm.StateDB.GetBalance(scope.Contract.Address()) - evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct) - evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct) - evm.StateDB.SelfDestruct6780(scope.Contract.Address()) + var ( + this = scope.Contract.Address() + balance = evm.StateDB.GetBalance(this) + top = scope.Stack.pop() + beneficiary = common.Address(top.Bytes20()) + + newContract = evm.StateDB.IsNewContract(this) + ) + + // Contract is new and will actually be deleted. + if newContract { + if this != beneficiary { // Skip no-op transfer when self-destructing to self. + evm.StateDB.AddBalance(beneficiary, balance, tracing.BalanceIncreaseSelfdestruct) + } + evm.StateDB.SubBalance(this, balance, tracing.BalanceDecreaseSelfdestruct) + evm.StateDB.SelfDestruct(this) + } + + // Contract already exists, only do transfer if beneficiary is not self. + if !newContract && this != beneficiary { + evm.StateDB.SubBalance(this, balance, tracing.BalanceDecreaseSelfdestruct) + evm.StateDB.AddBalance(beneficiary, balance, tracing.BalanceIncreaseSelfdestruct) + } + if tracer := evm.Config.Tracer; tracer != nil { if tracer.OnEnter != nil { - tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig()) + tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), this, beneficiary, []byte{}, 0, balance.ToBig()) } if tracer.OnExit != nil { tracer.OnExit(evm.depth, []byte{}, 0, nil, false) diff --git a/core/vm/interface.go b/core/vm/interface.go index e2f6a65189..e285b18b0f 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -57,19 +57,17 @@ type StateDB interface { GetTransientState(addr common.Address, key common.Hash) common.Hash SetTransientState(addr common.Address, key, value common.Hash) - SelfDestruct(common.Address) uint256.Int + SelfDestruct(common.Address) HasSelfDestructed(common.Address) bool - // SelfDestruct6780 is post-EIP6780 selfdestruct, which means that it's a - // send-all-to-beneficiary, unless the contract was created in this same - // transaction, in which case it will be destructed. - // This method returns the prior balance, along with a boolean which is - // true iff the object was indeed destructed. - SelfDestruct6780(common.Address) (uint256.Int, bool) - // Exist reports whether the given account exists in state. // Notably this also returns true for self-destructed accounts within the current transaction. Exist(common.Address) bool + + // IsNewContract reports whether the contract at the given address was deployed + // during the current transaction. + IsNewContract(addr common.Address) bool + // Empty returns whether the given account is empty. Empty // is defined according to EIP161 (balance = nonce = code = 0). Empty(common.Address) bool diff --git a/eth/tracers/internal/tracetest/selfdestruct_state_test.go b/eth/tracers/internal/tracetest/selfdestruct_state_test.go new file mode 100644 index 0000000000..2c714b6dce --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_state_test.go @@ -0,0 +1,653 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracetest + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" +) + +// accountState represents the expected final state of an account +type accountState struct { + Balance *big.Int + Nonce uint64 + Code []byte + Exists bool +} + +// selfdestructStateTracer tracks state changes during selfdestruct operations +type selfdestructStateTracer struct { + env *tracing.VMContext + accounts map[common.Address]*accountState +} + +func newSelfdestructStateTracer() *selfdestructStateTracer { + return &selfdestructStateTracer{ + accounts: make(map[common.Address]*accountState), + } +} + +func (t *selfdestructStateTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + t.env = env +} + +func (t *selfdestructStateTracer) OnTxEnd(receipt *types.Receipt, err error) { + // Nothing to do +} + +func (t *selfdestructStateTracer) getOrCreateAccount(addr common.Address) *accountState { + if acc, ok := t.accounts[addr]; ok { + return acc + } + + // Initialize with current state from statedb + acc := &accountState{ + Balance: t.env.StateDB.GetBalance(addr).ToBig(), + Nonce: t.env.StateDB.GetNonce(addr), + Code: t.env.StateDB.GetCode(addr), + Exists: t.env.StateDB.Exist(addr), + } + t.accounts[addr] = acc + return acc +} + +func (t *selfdestructStateTracer) OnBalanceChange(addr common.Address, prev, new *big.Int, reason tracing.BalanceChangeReason) { + acc := t.getOrCreateAccount(addr) + acc.Balance = new +} + +func (t *selfdestructStateTracer) OnNonceChangeV2(addr common.Address, prev, new uint64, reason tracing.NonceChangeReason) { + acc := t.getOrCreateAccount(addr) + acc.Nonce = new + + // If this is a selfdestruct nonce change, mark account as not existing + if reason == tracing.NonceChangeSelfdestruct { + acc.Exists = false + } +} + +func (t *selfdestructStateTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) { + acc := t.getOrCreateAccount(addr) + acc.Code = code + + // If this is a selfdestruct code change, mark account as not existing + if reason == tracing.CodeChangeSelfDestruct { + acc.Exists = false + } +} + +func (t *selfdestructStateTracer) Hooks() *tracing.Hooks { + return &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnBalanceChange: t.OnBalanceChange, + OnNonceChangeV2: t.OnNonceChangeV2, + OnCodeChangeV2: t.OnCodeChangeV2, + } +} + +func (t *selfdestructStateTracer) Accounts() map[common.Address]*accountState { + return t.accounts +} + +// verifyAccountState compares actual and expected account state and reports any mismatches +func verifyAccountState(t *testing.T, addr common.Address, actual, expected *accountState) { + if actual.Balance.Cmp(expected.Balance) != 0 { + t.Errorf("address %s: balance mismatch: have %s, want %s", + addr.Hex(), actual.Balance, expected.Balance) + } + if actual.Nonce != expected.Nonce { + t.Errorf("address %s: nonce mismatch: have %d, want %d", + addr.Hex(), actual.Nonce, expected.Nonce) + } + if len(actual.Code) != len(expected.Code) { + t.Errorf("address %s: code length mismatch: have %d, want %d", + addr.Hex(), len(actual.Code), len(expected.Code)) + } + if actual.Exists != expected.Exists { + t.Errorf("address %s: exists mismatch: have %v, want %v", + addr.Hex(), actual.Exists, expected.Exists) + } +} + +// setupTestBlockchain creates a blockchain with the given genesis and transaction, +// returns the blockchain, the first block, and a statedb at genesis for testing +func setupTestBlockchain(t *testing.T, genesis *core.Genesis, tx *types.Transaction, useBeacon bool) (*core.BlockChain, *types.Block, *state.StateDB) { + var engine consensus.Engine + if useBeacon { + engine = beacon.New(ethash.NewFaker()) + } else { + engine = ethash.NewFaker() + } + + _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, 1, func(i int, b *core.BlockGen) { + b.AddTx(tx) + }) + db := rawdb.NewMemoryDatabase() + blockchain, err := core.NewBlockChain(db, genesis, engine, nil) + if err != nil { + t.Fatalf("failed to create blockchain: %v", err) + } + if _, err := blockchain.InsertChain(blocks); err != nil { + t.Fatalf("failed to insert chain: %v", err) + } + genesisBlock := blockchain.GetBlockByNumber(0) + if genesisBlock == nil { + t.Fatalf("failed to get genesis block") + } + statedb, err := blockchain.StateAt(genesisBlock.Root()) + if err != nil { + t.Fatalf("failed to get state: %v", err) + } + + return blockchain, blocks[0], statedb +} + +func TestSelfdestructStateTracer(t *testing.T) { + t.Parallel() + + const ( + // Gas limit high enough for all test scenarios (factory creation + multiple calls) + testGasLimit = 500000 + + // Common balance amounts used across tests + testBalanceInitial = 100 // Initial balance for contracts being tested + testBalanceSent = 50 // Amount sent back in sendback tests + testBalanceFactory = 200 // Factory needs extra balance for contract creation + ) + + // Helper to create *big.Int for wei amounts + wei := func(amount int64) *big.Int { + return big.NewInt(amount) + } + + // Test account (transaction sender) + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + caller = crypto.PubkeyToAddress(key.PublicKey) + ) + + // Simple selfdestruct test contracts + var ( + contract = common.HexToAddress("0x00000000000000000000000000000000000000bb") + recipient = common.HexToAddress("0x00000000000000000000000000000000000000cc") + ) + // Build selfdestruct code: PUSH20 SELFDESTRUCT + selfdestructCode := []byte{byte(vm.PUSH20)} + selfdestructCode = append(selfdestructCode, recipient.Bytes()...) + selfdestructCode = append(selfdestructCode, byte(vm.SELFDESTRUCT)) + + // Factory test contracts (create-and-destroy pattern) + var ( + factory = common.HexToAddress("0x00000000000000000000000000000000000000ff") + ) + // Factory code: creates a contract with 100 wei and calls it to trigger selfdestruct back to factory + // See selfdestruct_test_contracts/factory.yul for source + // Runtime bytecode compiled with: solc --strict-assembly --evm-version paris factory.yul --bin + // (Using paris to avoid PUSH0 opcode which is not available pre-Shanghai) + var ( + factoryCode = common.Hex2Bytes("6a6133ff6000526002601ef360a81b600052600080808080600b816064f05af100") + createdContractAddr = crypto.CreateAddress(factory, 0) // Address where factory creates the contract + ) + + // Sendback test contracts (A→B→A pattern) + // For the refund test: Coordinator calls A, then B + // A selfdestructs to B, B sends funds back to A + var ( + contractA = common.HexToAddress("0x00000000000000000000000000000000000000aa") + contractB = common.HexToAddress("0x00000000000000000000000000000000000000bb") + coordinator = common.HexToAddress("0x00000000000000000000000000000000000000cc") + ) + // Contract A: if msg.value > 0, accept funds; else selfdestruct to B + // See selfdestruct_test_contracts/contractA.yul for source + // Runtime bytecode compiled with: solc --strict-assembly --evm-version paris contractA.yul --bin + contractACode := common.Hex2Bytes("60003411600a5760bbff5b00") + + // Contract B: sends 50 wei back to contract A + // See selfdestruct_test_contracts/contractB.yul for source + // Runtime bytecode compiled with: solc --strict-assembly --evm-version paris contractB.yul --bin + contractBCode := common.Hex2Bytes("6000808080603260aa5af100") + + // Coordinator: calls A (A selfdestructs to B), then calls B (B sends funds to A) + // See selfdestruct_test_contracts/coordinator.yul for source + // Runtime bytecode compiled with: solc --strict-assembly --evm-version paris coordinator.yul --bin + coordinatorCode := common.Hex2Bytes("60008080808060aa818080808060bb955af1505af100") + + // Factory for create-and-refund test: creates A with 100 wei, calls A, calls B + // See selfdestruct_test_contracts/factoryRefund.yul for source + // Runtime bytecode compiled with: solc --strict-assembly --evm-version paris factoryRefund.yul --bin + var ( + factoryRefund = common.HexToAddress("0x00000000000000000000000000000000000000dd") + factoryRefundCode = common.Hex2Bytes("60008080808060bb78600c600d600039600c6000f3fe60003411600a5760bbff5b0082528180808080601960076064f05af1505af100") + createdContractAddrA = crypto.CreateAddress(factoryRefund, 0) // Address where factory creates contract A + ) + + // Self-destruct-to-self test contracts + var ( + contractSelfDestruct = common.HexToAddress("0x00000000000000000000000000000000000000aa") + coordinatorSendAfter = common.HexToAddress("0x00000000000000000000000000000000000000ee") + ) + // Contract that selfdestructs to self + // See selfdestruct_test_contracts/contractSelfDestruct.yul + contractSelfDestructCode := common.Hex2Bytes("30ff") + + // Coordinator: calls contract (triggers selfdestruct to self), stores balance, sends 50 wei, stores balance again + // See selfdestruct_test_contracts/coordinatorSendAfter.yul + coordinatorSendAfterCode := common.Hex2Bytes("60aa600080808080855af150803160005560008080806032855af1503160015500") + + // Factory with balance checking: creates contract, calls it, checks balances + // See selfdestruct_test_contracts/factorySelfDestructBalanceCheck.yul + var ( + factorySelfDestructBalanceCheck = common.HexToAddress("0x00000000000000000000000000000000000000fd") + factorySelfDestructBalanceCheckCode = common.Hex2Bytes("6e6002600d60003960026000f3fe30ff600052600f60116064f0600080808080855af150803160005560008080806032855af1503160015500") + createdContractAddrSelfBalanceCheck = crypto.CreateAddress(factorySelfDestructBalanceCheck, 0) + ) + + tests := []struct { + name string + description string + targetContract common.Address + genesis *core.Genesis + useBeacon bool + expectedResults map[common.Address]accountState + expectedStorage map[common.Address]map[uint64]*big.Int + }{ + { + name: "pre_6780_existing", + description: "Pre-EIP-6780: Existing contract selfdestructs to recipient. Contract should be destroyed and balance transferred.", + targetContract: contract, + genesis: &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contract: { + Balance: wei(testBalanceInitial), + Code: selfdestructCode, + }, + }, + }, + useBeacon: false, + expectedResults: map[common.Address]accountState{ + contract: { + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + recipient: { + Balance: wei(testBalanceInitial), // Received contract's balance + Nonce: 0, + Code: []byte{}, + Exists: true, + }, + }, + }, + { + name: "post_6780_existing", + description: "Post-EIP-6780: Existing contract selfdestructs to recipient. Balance transferred but contract NOT destroyed (code/storage remain).", + targetContract: contract, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contract: { + Balance: wei(testBalanceInitial), + Code: selfdestructCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + contract: { + Balance: wei(0), + Nonce: 0, + Code: selfdestructCode, + Exists: true, + }, + recipient: { + Balance: wei(testBalanceInitial), + Nonce: 0, + Code: []byte{}, + Exists: true, + }, + }, + }, + { + name: "pre_6780_create_destroy", + description: "Pre-EIP-6780: Factory creates contract with 100 wei, contract selfdestructs back to factory. Contract destroyed, factory gets refund.", + targetContract: factory, + genesis: &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + factory: { + Balance: wei(testBalanceFactory), + Code: factoryCode, + }, + }, + }, + useBeacon: false, + expectedResults: map[common.Address]accountState{ + factory: { + Balance: wei(testBalanceFactory), + Nonce: 1, + Code: factoryCode, + Exists: true, + }, + createdContractAddr: { + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + }, + }, + { + name: "post_6780_create_destroy", + description: "Post-EIP-6780: Factory creates contract with 100 wei, contract selfdestructs back to factory. Contract destroyed (EIP-6780 exception for same-tx creation).", + targetContract: factory, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + factory: { + Balance: wei(testBalanceFactory), + Code: factoryCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + factory: { + Balance: wei(testBalanceFactory), + Nonce: 1, + Code: factoryCode, + Exists: true, + }, + createdContractAddr: { + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + }, + }, + { + name: "pre_6780_sendback", + description: "Pre-EIP-6780: Contract A selfdestructs sending funds to B, then B sends funds back to A's address. Funds sent to destroyed address are burnt.", + targetContract: coordinator, + genesis: &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contractA: { + Balance: wei(testBalanceInitial), + Code: contractACode, + }, + contractB: { + Balance: wei(0), + Code: contractBCode, + }, + coordinator: { + Code: coordinatorCode, + }, + }, + }, + useBeacon: false, + expectedResults: map[common.Address]accountState{ + contractA: { + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + contractB: { + // 100 received - 50 sent back + Balance: wei(testBalanceSent), + Nonce: 0, + Code: contractBCode, + Exists: true, + }, + }, + }, + { + name: "post_6780_existing_sendback", + description: "Post-EIP-6780: Existing contract A selfdestructs to B, then B sends funds back to A. Funds are NOT burnt (A still exists post-6780).", + targetContract: coordinator, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contractA: { + Balance: wei(testBalanceInitial), + Code: contractACode, + }, + contractB: { + Balance: wei(0), + Code: contractBCode, + }, + coordinator: { + Code: coordinatorCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + contractA: { + Balance: wei(testBalanceSent), + Nonce: 0, + Code: contractACode, + Exists: true, + }, + contractB: { + Balance: wei(testBalanceSent), + Nonce: 0, + Code: contractBCode, + Exists: true, + }, + }, + }, + { + name: "post_6780_create_destroy_sendback", + description: "Post-EIP-6780: Factory creates A, A selfdestructs to B, B sends funds back to A. Funds are burnt (A was destroyed via EIP-6780 exception).", + targetContract: factoryRefund, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contractB: { + Balance: wei(0), + Code: contractBCode, + }, + factoryRefund: { + Balance: wei(testBalanceFactory), + Code: factoryRefundCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + createdContractAddrA: { + // Funds sent back are burnt! + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + contractB: { + Balance: wei(testBalanceSent), + Nonce: 0, + Code: contractBCode, + Exists: true, + }, + }, + }, + { + name: "post_6780_existing_to_self", + description: "Post-EIP-6780: Pre-existing contract selfdestructs to itself. Balance NOT burnt (selfdestruct-to-self is no-op for existing contracts).", + targetContract: coordinatorSendAfter, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + contractSelfDestruct: { + Balance: wei(testBalanceInitial), + Code: contractSelfDestructCode, + }, + coordinatorSendAfter: { + Balance: wei(testBalanceInitial), + Code: coordinatorSendAfterCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + contractSelfDestruct: { + Balance: wei(150), + Nonce: 0, + Code: contractSelfDestructCode, + Exists: true, + }, + coordinatorSendAfter: { + Balance: wei(testBalanceSent), + Nonce: 0, + Code: coordinatorSendAfterCode, + Exists: true, + }, + }, + expectedStorage: map[common.Address]map[uint64]*big.Int{ + coordinatorSendAfter: { + 0: wei(testBalanceInitial), + 1: wei(150), + }, + }, + }, + { + name: "post_6780_create_destroy_to_self", + description: "Post-EIP-6780: Factory creates contract, contract selfdestructs to itself. Balance IS burnt and contract destroyed (EIP-6780 exception for same-tx creation).", + targetContract: factorySelfDestructBalanceCheck, + genesis: &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + caller: {Balance: big.NewInt(params.Ether)}, + factorySelfDestructBalanceCheck: { + Balance: wei(testBalanceFactory), + Code: factorySelfDestructBalanceCheckCode, + }, + }, + }, + useBeacon: true, + expectedResults: map[common.Address]accountState{ + createdContractAddrSelfBalanceCheck: { + Balance: wei(0), + Nonce: 0, + Code: []byte{}, + Exists: false, + }, + factorySelfDestructBalanceCheck: { + Balance: wei(testBalanceSent), + Nonce: 1, + Code: factorySelfDestructBalanceCheckCode, + Exists: true, + }, + }, + expectedStorage: map[common.Address]map[uint64]*big.Int{ + factorySelfDestructBalanceCheck: { + 0: wei(0), + 1: wei(0), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var ( + signer = types.HomesteadSigner{} + tx *types.Transaction + err error + ) + + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 0, + To: &tt.targetContract, + Value: big.NewInt(0), + Gas: testGasLimit, + GasPrice: big.NewInt(params.InitialBaseFee * 2), + Data: nil, + }), signer, key) + if err != nil { + t.Fatalf("failed to sign transaction: %v", err) + } + + blockchain, block, statedb := setupTestBlockchain(t, tt.genesis, tx, tt.useBeacon) + defer blockchain.Stop() + + tracer := newSelfdestructStateTracer() + hookedState := state.NewHookedState(statedb, tracer.Hooks()) + msg, err := core.TransactionToMessage(tx, signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + context := core.NewEVMBlockContext(block.Header(), blockchain, nil) + evm := vm.NewEVM(context, hookedState, tt.genesis.Config, vm.Config{Tracer: tracer.Hooks()}) + usedGas := uint64(0) + _, err = core.ApplyTransactionWithEVM(msg, new(core.GasPool).AddGas(tx.Gas()), statedb, block.Number(), block.Hash(), block.Time(), tx, &usedGas, evm) + if err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + + results := tracer.Accounts() + + // Verify storage + for addr, expectedSlots := range tt.expectedStorage { + for slot, expectedValue := range expectedSlots { + actualValue := statedb.GetState(addr, common.BigToHash(big.NewInt(int64(slot)))) + if actualValue.Big().Cmp(expectedValue) != 0 { + t.Errorf("address %s slot %d: storage mismatch: have %s, want %s", + addr.Hex(), slot, actualValue.Big(), expectedValue) + } + } + } + + // Verify results + for addr, expected := range tt.expectedResults { + actual, ok := results[addr] + if !ok { + t.Errorf("address %s missing from results", addr.Hex()) + continue + } + verifyAccountState(t, addr, actual, &expected) + } + }) + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractA.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractA.yul new file mode 100644 index 0000000000..109551f26e --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractA.yul @@ -0,0 +1,18 @@ +object "ContractA" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + // If receiving funds (msg.value > 0), just accept them and return + if gt(callvalue(), 0) { + stop() + } + + // Otherwise, selfdestruct to B (transfers balance immediately, then stops execution) + let contractB := 0x00000000000000000000000000000000000000bb + selfdestruct(contractB) + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractB.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractB.yul new file mode 100644 index 0000000000..c737355fb6 --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractB.yul @@ -0,0 +1,14 @@ +object "ContractB" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + // Send 50 wei back to contract A + let contractA := 0x00000000000000000000000000000000000000aa + let success := call(gas(), contractA, 50, 0, 0, 0, 0) + stop() + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractSelfDestruct.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractSelfDestruct.yul new file mode 100644 index 0000000000..73884c5dd4 --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/contractSelfDestruct.yul @@ -0,0 +1,12 @@ +object "ContractSelfDestruct" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + // Simply selfdestruct to self + selfdestruct(address()) + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinator.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinator.yul new file mode 100644 index 0000000000..54bd5c08f3 --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinator.yul @@ -0,0 +1,20 @@ +object "Coordinator" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + let contractA := 0x00000000000000000000000000000000000000aa + let contractB := 0x00000000000000000000000000000000000000bb + + // First, call A (A will selfdestruct to B) + pop(call(gas(), contractA, 0, 0, 0, 0, 0)) + + // Then, call B (B will send funds back to A) + pop(call(gas(), contractB, 0, 0, 0, 0, 0)) + + stop() + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinatorSendAfter.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinatorSendAfter.yul new file mode 100644 index 0000000000..9473d1f3ef --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/coordinatorSendAfter.yul @@ -0,0 +1,27 @@ +object "CoordinatorSendAfter" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + let contractAddr := 0x00000000000000000000000000000000000000aa + + // Call contract (triggers selfdestruct to self, burning its balance) + pop(call(gas(), contractAddr, 0, 0, 0, 0, 0)) + + // Check contract's balance immediately after selfdestruct + // Store in slot 0 to verify it's 0 (proving immediate burn) + sstore(0, balance(contractAddr)) + + // Send 50 wei to the contract (after it selfdestructed) + pop(call(gas(), contractAddr, 50, 0, 0, 0, 0)) + + // Check balance again after sending funds + // Store in slot 1 to verify it's 50 (new funds not burnt) + sstore(1, balance(contractAddr)) + + stop() + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factoryRefund.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factoryRefund.yul new file mode 100644 index 0000000000..f52a46fcc3 --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factoryRefund.yul @@ -0,0 +1,28 @@ +object "FactoryRefund" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + let contractB := 0x00000000000000000000000000000000000000bb + + // Store the deploy bytecode for contract A in memory + // Full deploy bytecode from: solc --strict-assembly --evm-version paris contractA.yul --bin + // Including the 0xfe separator: 600c600d600039600c6000f3fe60003411600a5760bbff5b00 + // That's 25 bytes, padded to 32 bytes with 7 zero bytes at the front + mstore(0, 0x0000000000000000000000000000600c600d600039600c6000f3fe60003411600a5760bbff5b00) + + // CREATE contract A with 100 wei, using 25 bytes starting at position 7 + let contractA := create(100, 7, 25) + + // Call contract A (triggers selfdestruct to B) + pop(call(gas(), contractA, 0, 0, 0, 0, 0)) + + // Call contract B (B sends 50 wei back to A) + pop(call(gas(), contractB, 0, 0, 0, 0, 0)) + + stop() + } + } +} diff --git a/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factorySelfDestructBalanceCheck.yul b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factorySelfDestructBalanceCheck.yul new file mode 100644 index 0000000000..46f4628419 --- /dev/null +++ b/eth/tracers/internal/tracetest/selfdestruct_test_contracts/factorySelfDestructBalanceCheck.yul @@ -0,0 +1,35 @@ +object "FactorySelfDestructBalanceCheck" { + code { + datacopy(0, dataoffset("Runtime"), datasize("Runtime")) + return(0, datasize("Runtime")) + } + object "Runtime" { + code { + // Get the full deploy bytecode for ContractSelfDestruct + // Compiled with: solc --strict-assembly --evm-version paris contractSelfDestruct.yul --bin + // Full bytecode: 6002600d60003960026000f3fe30ff + // That's 15 bytes total, padded to 32 bytes with 17 zero bytes at front + mstore(0, 0x0000000000000000000000000000000000000000006002600d60003960026000f3fe30ff) + + // CREATE contract with 100 wei, using deploy bytecode + // The bytecode is 15 bytes, starts at position 17 in the 32-byte word + let contractAddr := create(100, 17, 15) + + // Call the created contract (triggers selfdestruct to self) + pop(call(gas(), contractAddr, 0, 0, 0, 0, 0)) + + // Check contract's balance immediately after selfdestruct + // Store in slot 0 to verify it's 0 (proving immediate burn) + sstore(0, balance(contractAddr)) + + // Send 50 wei to the contract (after it selfdestructed) + pop(call(gas(), contractAddr, 50, 0, 0, 0, 0)) + + // Check balance again after sending funds + // Store in slot 1 to verify it's 0 (funds sent to destroyed contract are burnt) + sstore(1, balance(contractAddr)) + + stop() + } + } +} From 588dd94aadca36d8a55a44457ff31dd480073a97 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sat, 17 Jan 2026 20:28:37 +0800 Subject: [PATCH 244/277] triedb/pathdb: implement trienode history indexing scheme (#33551) This PR implements the indexing scheme for trie node history. Check https://github.com/ethereum/go-ethereum/pull/33399 for more details --- cmd/keeper/go.mod | 1 + triedb/pathdb/database_test.go | 4 +- triedb/pathdb/history.go | 79 +++- triedb/pathdb/history_indexer.go | 34 +- triedb/pathdb/history_reader.go | 75 +-- triedb/pathdb/history_reader_test.go | 4 +- triedb/pathdb/history_state.go | 8 +- triedb/pathdb/history_trienode.go | 32 +- triedb/pathdb/history_trienode_test.go | 46 -- triedb/pathdb/history_trienode_utils.go | 238 ++++++++++ triedb/pathdb/history_trienode_utils_test.go | 458 +++++++++++++++++++ triedb/pathdb/reader.go | 4 +- 12 files changed, 870 insertions(+), 113 deletions(-) diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index cee1ce05a7..21cdfe8c33 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -33,6 +33,7 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.39.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 8cca7b1b3c..2d1819d08f 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -950,7 +950,7 @@ func TestDatabaseIndexRecovery(t *testing.T) { var ( dIndex int roots = env.roots - hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer) + hr = newStateHistoryReader(env.db.diskdb, env.db.stateFreezer) ) for i, root := range roots { if root == dRoot { @@ -1011,7 +1011,7 @@ func TestDatabaseIndexRecovery(t *testing.T) { // Ensure the truncated state histories become accessible bRoot = env.db.tree.bottom().rootHash() - hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer) + hr = newStateHistoryReader(env.db.diskdb, env.db.stateFreezer) for i, root := range roots { if root == bRoot { break diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index d78999f218..86224ea5b2 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -121,6 +121,20 @@ func (ident stateIdent) String() string { return ident.addressHash.Hex() + ident.path } +func (ident stateIdent) bloomSize() int { + if ident.typ == typeAccount { + return 0 + } + if ident.typ == typeStorage { + return 0 + } + scheme := accountIndexScheme + if ident.addressHash != (common.Hash{}) { + scheme = storageIndexScheme + } + return scheme.getBitmapSize(len(ident.path)) +} + // newAccountIdent constructs a state identifier for an account. func newAccountIdent(addressHash common.Hash) stateIdent { return stateIdent{ @@ -143,6 +157,8 @@ func newStorageIdent(addressHash common.Hash, storageHash common.Hash) stateIden // newTrienodeIdent constructs a state identifier for a trie node. // The address denotes the address hash of the associated account; // the path denotes the path of the node within the trie; +// +// nolint:unused func newTrienodeIdent(addressHash common.Hash, path string) stateIdent { return stateIdent{ typ: typeTrienode, @@ -180,17 +196,62 @@ func newStorageIdentQuery(address common.Address, addressHash common.Hash, stora } } -// newTrienodeIdentQuery constructs a state identifier for a trie node. -// the addressHash denotes the address hash of the associated account; -// the path denotes the path of the node within the trie; -// -// nolint:unused -func newTrienodeIdentQuery(addrHash common.Hash, path []byte) stateIdentQuery { - return stateIdentQuery{ - stateIdent: newTrienodeIdent(addrHash, string(path)), +// indexElem defines the element for indexing. +type indexElem interface { + key() stateIdent + ext() []uint16 +} + +type accountIndexElem struct { + addressHash common.Hash +} + +func (a accountIndexElem) key() stateIdent { + return stateIdent{ + typ: typeAccount, + addressHash: a.addressHash, } } +func (a accountIndexElem) ext() []uint16 { + return nil +} + +type storageIndexElem struct { + addressHash common.Hash + storageHash common.Hash +} + +func (a storageIndexElem) key() stateIdent { + return stateIdent{ + typ: typeStorage, + addressHash: a.addressHash, + storageHash: a.storageHash, + } +} + +func (a storageIndexElem) ext() []uint16 { + return nil +} + +type trienodeIndexElem struct { + owner common.Hash + path string + data []uint16 +} + +func (a trienodeIndexElem) key() stateIdent { + return stateIdent{ + typ: typeTrienode, + addressHash: a.owner, + path: a.path, + } +} + +func (a trienodeIndexElem) ext() []uint16 { + return a.data +} + // history defines the interface of historical data, shared by stateHistory // and trienodeHistory. type history interface { @@ -198,7 +259,7 @@ type history interface { typ() historyType // forEach returns an iterator to traverse the state entries in the history. - forEach() iter.Seq[stateIdent] + forEach() iter.Seq[indexElem] } var ( diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index ddb4a293cc..18d71f6dae 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" ) @@ -121,18 +122,20 @@ func deleteIndexMetadata(db ethdb.KeyValueWriter, typ historyType) { // batchIndexer is responsible for performing batch indexing or unindexing // of historical data (e.g., state or trie node changes) atomically. type batchIndexer struct { - index map[stateIdent][]uint64 // List of history IDs for tracked state entry - pending int // Number of entries processed in the current batch. - delete bool // Operation mode: true for unindex, false for index. - lastID uint64 // ID of the most recently processed history. - typ historyType // Type of history being processed (e.g., state or trienode). - db ethdb.KeyValueStore // Key-value database used to store or delete index data. + index map[stateIdent][]uint64 // List of history IDs for tracked state entry + ext map[stateIdent][][]uint16 // List of extension for each state element + pending int // Number of entries processed in the current batch. + delete bool // Operation mode: true for unindex, false for index. + lastID uint64 // ID of the most recently processed history. + typ historyType // Type of history being processed (e.g., state or trienode). + db ethdb.KeyValueStore // Key-value database used to store or delete index data. } // newBatchIndexer constructs the batch indexer with the supplied mode. func newBatchIndexer(db ethdb.KeyValueStore, delete bool, typ historyType) *batchIndexer { return &batchIndexer{ index: make(map[stateIdent][]uint64), + ext: make(map[stateIdent][][]uint16), delete: delete, typ: typ, db: db, @@ -142,8 +145,10 @@ func newBatchIndexer(db ethdb.KeyValueStore, delete bool, typ historyType) *batc // process traverses the state entries within the provided history and tracks the mutation // records for them. func (b *batchIndexer) process(h history, id uint64) error { - for ident := range h.forEach() { - b.index[ident] = append(b.index[ident], id) + for elem := range h.forEach() { + key := elem.key() + b.index[key] = append(b.index[key], id) + b.ext[key] = append(b.ext[key], elem.ext()) b.pending++ } b.lastID = id @@ -190,14 +195,15 @@ func (b *batchIndexer) finish(force bool) error { indexed = metadata.Last } for ident, list := range b.index { + ext := b.ext[ident] eg.Go(func() error { if !b.delete { - iw, err := newIndexWriter(b.db, ident, indexed, 0) + iw, err := newIndexWriter(b.db, ident, indexed, ident.bloomSize()) if err != nil { return err } - for _, n := range list { - if err := iw.append(n, nil); err != nil { + for i, n := range list { + if err := iw.append(n, ext[i]); err != nil { return err } } @@ -205,7 +211,7 @@ func (b *batchIndexer) finish(force bool) error { iw.finish(batch) }) } else { - id, err := newIndexDeleter(b.db, ident, indexed, 0) + id, err := newIndexDeleter(b.db, ident, indexed, ident.bloomSize()) if err != nil { return err } @@ -239,8 +245,10 @@ func (b *batchIndexer) finish(force bool) error { return err } log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "size", common.StorageSize(batchSize), "elapsed", common.PrettyDuration(time.Since(start))) + b.pending = 0 - b.index = make(map[stateIdent][]uint64) + maps.Clear(b.index) + maps.Clear(b.ext) return nil } diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index 69e7d5bd22..04cd869d2b 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -99,16 +99,17 @@ func (r *indexReaderWithLimitTag) readGreaterThan(id uint64, lastID uint64) (uin return r.reader.readGreaterThan(id) } -// historyReader is the structure to access historic state data. -type historyReader struct { +// stateHistoryReader is the structure to access historic state data. +type stateHistoryReader struct { disk ethdb.KeyValueReader freezer ethdb.AncientReader readers map[string]*indexReaderWithLimitTag } -// newHistoryReader constructs the history reader with the supplied db. -func newHistoryReader(disk ethdb.KeyValueReader, freezer ethdb.AncientReader) *historyReader { - return &historyReader{ +// newStateHistoryReader constructs the history reader with the supplied db +// for accessing historical states. +func newStateHistoryReader(disk ethdb.KeyValueReader, freezer ethdb.AncientReader) *stateHistoryReader { + return &stateHistoryReader{ disk: disk, freezer: freezer, readers: make(map[string]*indexReaderWithLimitTag), @@ -117,7 +118,7 @@ func newHistoryReader(disk ethdb.KeyValueReader, freezer ethdb.AncientReader) *h // readAccountMetadata resolves the account metadata within the specified // state history. -func (r *historyReader) readAccountMetadata(address common.Address, historyID uint64) ([]byte, error) { +func (r *stateHistoryReader) readAccountMetadata(address common.Address, historyID uint64) ([]byte, error) { blob := rawdb.ReadStateAccountIndex(r.freezer, historyID) if len(blob) == 0 { return nil, fmt.Errorf("account index is truncated, historyID: %d", historyID) @@ -143,7 +144,7 @@ func (r *historyReader) readAccountMetadata(address common.Address, historyID ui // readStorageMetadata resolves the storage slot metadata within the specified // state history. -func (r *historyReader) readStorageMetadata(storageKey common.Hash, storageHash common.Hash, historyID uint64, slotOffset, slotNumber int) ([]byte, error) { +func (r *stateHistoryReader) readStorageMetadata(storageKey common.Hash, storageHash common.Hash, historyID uint64, slotOffset, slotNumber int) ([]byte, error) { data, err := rawdb.ReadStateStorageIndex(r.freezer, historyID, slotIndexSize*slotOffset, slotIndexSize*slotNumber) if err != nil { msg := fmt.Sprintf("id: %d, slot-offset: %d, slot-length: %d", historyID, slotOffset, slotNumber) @@ -178,7 +179,7 @@ func (r *historyReader) readStorageMetadata(storageKey common.Hash, storageHash } // readAccount retrieves the account data from the specified state history. -func (r *historyReader) readAccount(address common.Address, historyID uint64) ([]byte, error) { +func (r *stateHistoryReader) readAccount(address common.Address, historyID uint64) ([]byte, error) { metadata, err := r.readAccountMetadata(address, historyID) if err != nil { return nil, err @@ -194,7 +195,7 @@ func (r *historyReader) readAccount(address common.Address, historyID uint64) ([ } // readStorage retrieves the storage slot data from the specified state history. -func (r *historyReader) readStorage(address common.Address, storageKey common.Hash, storageHash common.Hash, historyID uint64) ([]byte, error) { +func (r *stateHistoryReader) readStorage(address common.Address, storageKey common.Hash, storageHash common.Hash, historyID uint64) ([]byte, error) { metadata, err := r.readAccountMetadata(address, historyID) if err != nil { return nil, err @@ -224,35 +225,16 @@ func (r *historyReader) readStorage(address common.Address, storageKey common.Ha // stateID: represents the ID of the state of the specified version; // lastID: represents the ID of the latest/newest state history; // latestValue: represents the state value at the current disk layer with ID == lastID; -func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint64, latestValue []byte) ([]byte, error) { - tail, err := r.freezer.Tail() +func (r *stateHistoryReader) read(state stateIdentQuery, stateID uint64, lastID uint64, latestValue []byte) ([]byte, error) { + lastIndexed, err := checkStateAvail(state.stateIdent, typeStateHistory, r.freezer, stateID, lastID, r.disk) if err != nil { return nil, err - } // firstID = tail+1 - - // stateID+1 == firstID is allowed, as all the subsequent state histories - // are present with no gap inside. - if stateID < tail { - return nil, fmt.Errorf("historical state has been pruned, first: %d, state: %d", tail+1, stateID) } - - // To serve the request, all state histories from stateID+1 to lastID - // must be indexed. It's not supposed to happen unless system is very - // wrong. - metadata := loadIndexMetadata(r.disk, toHistoryType(state.typ)) - if metadata == nil || metadata.Last < lastID { - indexed := "null" - if metadata != nil { - indexed = fmt.Sprintf("%d", metadata.Last) - } - return nil, fmt.Errorf("state history is not fully indexed, requested: %d, indexed: %s", stateID, indexed) - } - // Construct the index reader to locate the corresponding history for // state retrieval ir, ok := r.readers[state.String()] if !ok { - ir, err = newIndexReaderWithLimitTag(r.disk, state.stateIdent, metadata.Last, 0) + ir, err = newIndexReaderWithLimitTag(r.disk, state.stateIdent, lastIndexed, 0) if err != nil { return nil, err } @@ -277,3 +259,34 @@ func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint6 } return r.readStorage(state.address, state.storageKey, state.storageHash, historyID) } + +// checkStateAvail determines whether the requested historical state is available +// for accessing. What's more, it also returns the ID of the latest indexed history +// entry for subsequent usage. +func checkStateAvail(state stateIdent, exptyp historyType, freezer ethdb.AncientReader, stateID uint64, lastID uint64, db ethdb.KeyValueReader) (uint64, error) { + if toHistoryType(state.typ) != exptyp { + return 0, fmt.Errorf("unsupported history type: %d, want: %v", toHistoryType(state.typ), exptyp) + } + // firstID = tail+1 + tail, err := freezer.Tail() + if err != nil { + return 0, err + } + // stateID+1 == firstID is allowed, as all the subsequent history entries + // are present with no gap inside. + if stateID < tail { + return 0, fmt.Errorf("historical state has been pruned, first: %d, state: %d", tail+1, stateID) + } + // To serve the request, all history entries from stateID+1 to lastID + // must be indexed. It's not supposed to happen unless system is very + // wrong. + metadata := loadIndexMetadata(db, exptyp) + if metadata == nil || metadata.Last < lastID { + indexed := "null" + if metadata != nil { + indexed = fmt.Sprintf("%d", metadata.Last) + } + return 0, fmt.Errorf("history is not fully indexed, requested: %d, indexed: %s", stateID, indexed) + } + return metadata.Last, nil +} diff --git a/triedb/pathdb/history_reader_test.go b/triedb/pathdb/history_reader_test.go index 3e1a545ff3..b69fba68cb 100644 --- a/triedb/pathdb/history_reader_test.go +++ b/triedb/pathdb/history_reader_test.go @@ -50,7 +50,7 @@ func stateAvail(id uint64, env *tester) bool { return id+1 >= firstID } -func checkHistoricalState(env *tester, root common.Hash, id uint64, hr *historyReader) error { +func checkHistoricalState(env *tester, root common.Hash, id uint64, hr *stateHistoryReader) error { if !stateAvail(id, env) { return nil } @@ -157,7 +157,7 @@ func testHistoryReader(t *testing.T, historyLimit uint64) { var ( roots = env.roots dl = env.db.tree.bottom() - hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer) + hr = newStateHistoryReader(env.db.diskdb, env.db.stateFreezer) ) for i, root := range roots { if root == dl.rootHash() { diff --git a/triedb/pathdb/history_state.go b/triedb/pathdb/history_state.go index bc21915dba..23428b1a54 100644 --- a/triedb/pathdb/history_state.go +++ b/triedb/pathdb/history_state.go @@ -283,11 +283,11 @@ func (h *stateHistory) typ() historyType { // forEach implements the history interface, returning an iterator to traverse the // state entries in the history. -func (h *stateHistory) forEach() iter.Seq[stateIdent] { - return func(yield func(stateIdent) bool) { +func (h *stateHistory) forEach() iter.Seq[indexElem] { + return func(yield func(indexElem) bool) { for _, addr := range h.accountList { addrHash := crypto.Keccak256Hash(addr.Bytes()) - if !yield(newAccountIdent(addrHash)) { + if !yield(accountIndexElem{addrHash}) { return } for _, slotKey := range h.storageList[addr] { @@ -298,7 +298,7 @@ func (h *stateHistory) forEach() iter.Seq[stateIdent] { if h.meta.version != stateHistoryV0 { slotHash = crypto.Keccak256Hash(slotKey.Bytes()) } - if !yield(newStorageIdent(addrHash, slotHash)) { + if !yield(storageIndexElem{addrHash, slotHash}) { return } } diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 6c0c0fe8cc..67be9de491 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -166,11 +166,35 @@ func (h *trienodeHistory) typ() historyType { // forEach implements the history interface, returning an iterator to traverse the // state entries in the history. -func (h *trienodeHistory) forEach() iter.Seq[stateIdent] { - return func(yield func(stateIdent) bool) { +func (h *trienodeHistory) forEach() iter.Seq[indexElem] { + return func(yield func(indexElem) bool) { for _, owner := range h.owners { - for _, path := range h.nodeList[owner] { - if !yield(newTrienodeIdent(owner, path)) { + var ( + scheme *indexScheme + paths = h.nodeList[owner] + indexes = make(map[string]map[uint16]struct{}) + ) + if owner == (common.Hash{}) { + scheme = accountIndexScheme + } else { + scheme = storageIndexScheme + } + for _, leaf := range findLeafPaths(paths) { + chunks, ids := scheme.splitPath(leaf) + for i := 0; i < len(chunks); i++ { + if _, exists := indexes[chunks[i]]; !exists { + indexes[chunks[i]] = make(map[uint16]struct{}) + } + indexes[chunks[i]][ids[i]] = struct{}{} + } + } + for chunk, ids := range indexes { + elem := trienodeIndexElem{ + owner: owner, + path: chunk, + data: slices.Collect(maps.Keys(ids)), + } + if !yield(elem) { return } } diff --git a/triedb/pathdb/history_trienode_test.go b/triedb/pathdb/history_trienode_test.go index 0c0422f00f..8f9b9c2600 100644 --- a/triedb/pathdb/history_trienode_test.go +++ b/triedb/pathdb/history_trienode_test.go @@ -534,52 +534,6 @@ func TestTrienodeHistoryReaderNilKey(t *testing.T) { } } -// TestTrienodeHistoryReaderIterator tests the iterator functionality -func TestTrienodeHistoryReaderIterator(t *testing.T) { - h := makeTrienodeHistory() - - // Count expected entries - expectedCount := 0 - expectedNodes := make(map[stateIdent]bool) - for owner, nodeList := range h.nodeList { - expectedCount += len(nodeList) - for _, node := range nodeList { - expectedNodes[stateIdent{ - typ: typeTrienode, - addressHash: owner, - path: node, - }] = true - } - } - - // Test the iterator - actualCount := 0 - for x := range h.forEach() { - _ = x - actualCount++ - } - if actualCount != expectedCount { - t.Fatalf("Iterator count mismatch: expected %d, got %d", expectedCount, actualCount) - } - - // Test that iterator yields expected state identifiers - seen := make(map[stateIdent]bool) - for ident := range h.forEach() { - if ident.typ != typeTrienode { - t.Fatal("Iterator should only yield trienode history identifiers") - } - key := stateIdent{typ: ident.typ, addressHash: ident.addressHash, path: ident.path} - if seen[key] { - t.Fatal("Iterator yielded duplicate identifier") - } - seen[key] = true - - if !expectedNodes[key] { - t.Fatalf("Unexpected yielded identifier %v", key) - } - } -} - // TestCommonPrefixLen tests the commonPrefixLen helper function func TestCommonPrefixLen(t *testing.T) { tests := []struct { diff --git a/triedb/pathdb/history_trienode_utils.go b/triedb/pathdb/history_trienode_utils.go index 241b8a7d3c..11107494bb 100644 --- a/triedb/pathdb/history_trienode_utils.go +++ b/triedb/pathdb/history_trienode_utils.go @@ -21,6 +21,7 @@ import ( "fmt" "math/bits" "slices" + "strings" ) // commonPrefixLen returns the length of the common prefix shared by a and b. @@ -34,6 +35,243 @@ func commonPrefixLen(a, b []byte) int { return n } +// findLeafPaths scans a lexicographically sorted list of paths and returns +// the subset of paths that represent leaves. +// +// A path is considered a leaf if: +// - it is the last element in the list, or +// - the next path does not have the current path as its prefix. +// +// In other words, a leaf is a path that has no children extending it. +// +// Example: +// +// Input: ["a", "ab", "abc", "b", "ba"] +// Output: ["abc", "ba"] +// +// The input must be sorted; otherwise the result is undefined. +func findLeafPaths(paths []string) []string { + var leaves []string + for i := 0; i < len(paths); i++ { + if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) { + leaves = append(leaves, paths[i]) + } + } + return leaves +} + +// hexPathNodeID computes a numeric node ID from the given path. The path is +// interpreted as a sequence of base-16 digits, where each byte of the input +// is treated as one hexadecimal digit in a big-endian number. +// +// The resulting node ID is constructed as: +// +// ID = 1 + 16 + 16^2 + ... + 16^(n-1) + value +// +// where n is the number of bytes in the path, and `value` is the base-16 +// interpretation of the byte sequence. +// +// The offset (1 + 16 + 16^2 + ... + 16^(n-1)) ensures that all IDs of shorter +// paths occupy a lower numeric range, preserving lexicographic ordering between +// differently-length paths. +// +// The numeric node ID is represented by the uint16 with the assumption the length +// of path won't be greater than 3. +func hexPathNodeID(path string) uint16 { + var ( + offset = uint16(0) + pow = uint16(1) + value = uint16(0) + bytes = []byte(path) + ) + for i := 0; i < len(bytes); i++ { + offset += pow + pow *= 16 + } + for i := 0; i < len(bytes); i++ { + value = value*16 + uint16(bytes[i]) + } + return offset + value +} + +// bitmapSize computes the number of bytes required for the marker bitmap +// corresponding to the remaining portion of a path after a cut point. +// The marker is a bitmap where each bit represents the presence of a +// possible element in the remaining path segment. +func bitmapSize(levels int) int { + // Compute: total = 1 + 16 + 16^2 + ... + 16^(segLen-1) + var ( + bits = 0 + pow = 1 + ) + for i := 0; i < levels; i++ { + bits += pow + pow *= 16 + } + // A small adjustment is applied to exclude the root element of this path + // segment, since any existing element would already imply the mutation of + // the root element. This trick can save us 1 byte for each bitmap which is + // non-trivial. + bits -= 1 + return bits / 8 +} + +// indexScheme defines how trie nodes are split into chunks and index them +// at chunk level. +// +// skipRoot indicates whether the root node should be excluded from indexing. +// cutPoints specifies the key length of chunks (in nibbles) extracted from +// each path. +type indexScheme struct { + // skipRoot indicates whether the root node should be excluded from indexing. + // In the account trie, the root is mutated on every state transition, so + // indexing it provides no value. + skipRoot bool + + // cutPoints defines the key lengths of chunks at different positions. + // A single trie node path may span multiple chunks vertically. + cutPoints []int + + // bitmaps specifies the required bitmap size for each chunk. The key is the + // chunk key length, and the value is the corresponding bitmap size. + bitmaps map[int]int +} + +var ( + // Account trie is split into chunks like this: + // + // - root node is excluded from indexing + // - nodes at level1 to level2 are grouped as 16 chunks + // - all other nodes are grouped 3 levels per chunk + // + // Level1 [0] ... [f] 16 chunks + // Level3 [000] ... [fff] 4096 chunks + // Level6 [000000] ... [fffffff] 16777216 chunks + // + // For the chunks at level1, there are 17 nodes per chunk. + // + // chunk-level 0 [ 0 ] 1 node + // chunk-level 1 [ 1 ] … [ 16 ] 16 nodes + // + // For the non-level1 chunks, there are 273 nodes per chunk, + // regardless of the chunk's depth in the trie. + // + // chunk-level 0 [ 0 ] 1 node + // chunk-level 1 [ 1 ] … [ 16 ] 16 nodes + // chunk-level 2 [ 17 ] … … [ 272 ] 256 nodes + accountIndexScheme = newIndexScheme(true) + + // Storage trie is split into chunks like this: (3 levels per chunk) + // + // Level0 [ ROOT ] 1 chunk + // Level3 [000] ... [fff] 4096 chunks + // Level6 [000000] ... [fffffff] 16777216 chunks + // + // Within each chunk, there are 273 nodes in total, regardless of + // the chunk's depth in the trie. + // + // chunk-level 0 [ 0 ] 1 node + // chunk-level 1 [ 1 ] … [ 16 ] 16 nodes + // chunk-level 2 [ 17 ] … … [ 272 ] 256 nodes + storageIndexScheme = newIndexScheme(false) +) + +// newIndexScheme initializes the index scheme. +func newIndexScheme(skipRoot bool) *indexScheme { + var ( + cuts []int + bitmaps = make(map[int]int) + ) + for v := 0; v <= 64; v += 3 { + var ( + levels int + length int + ) + if v == 0 && skipRoot { + length = 1 + levels = 2 + } else { + length = v + levels = 3 + } + cuts = append(cuts, length) + bitmaps[length] = bitmapSize(levels) + } + return &indexScheme{ + skipRoot: skipRoot, + cutPoints: cuts, + bitmaps: bitmaps, + } +} + +// getBitmapSize returns the required bytes for bitmap with chunk's position. +func (s *indexScheme) getBitmapSize(pathLen int) int { + return s.bitmaps[pathLen] +} + +// chunkSpan returns how many chunks should be spanned with the given path. +func (s *indexScheme) chunkSpan(length int) int { + var n int + for _, cut := range s.cutPoints { + if length >= cut { + n++ + continue + } + } + return n +} + +// splitPath applies the indexScheme to the given path and returns two lists: +// +// - chunkIDs: the progressive chunk IDs cuts defined by the scheme +// - innerIDs: the computed node ID for the path segment following each cut +// +// The scheme defines a set of cut points that partition the path. For each cut: +// +// - chunkIDs[i] is path[:cutPoints[i]] +// - innerIDs[i] is the node ID of the segment path[cutPoints[i] : nextCut-1] +func (s *indexScheme) splitPath(path string) ([]string, []uint16) { + // Special case: the root node of the account trie is mutated in every + // state transition, so its mutation records can be ignored. + n := len(path) + if n == 0 && s.skipRoot { + return nil, nil + } + var ( + // Determine how many chunks are spanned by the path + chunks = s.chunkSpan(n) + chunkIDs = make([]string, 0, chunks) + nodeIDs = make([]uint16, 0, chunks) + ) + for i := 0; i < chunks; i++ { + position := s.cutPoints[i] + chunkIDs = append(chunkIDs, path[:position]) + + var limit int + if i != chunks-1 { + limit = s.cutPoints[i+1] - 1 + } else { + limit = len(path) + } + nodeIDs = append(nodeIDs, hexPathNodeID(path[position:limit])) + } + return chunkIDs, nodeIDs +} + +// splitPathLast returns the path prefix of the deepest chunk spanned by the +// given path, along with its corresponding internal node ID. If the path +// spans no chunks, it returns an empty prefix and 0. +// +// nolint:unused +func (s *indexScheme) splitPathLast(path string) (string, uint16) { + chunkIDs, nodeIDs := s.splitPath(path) + if len(chunkIDs) == 0 { + return "", 0 + } + n := len(chunkIDs) + return chunkIDs[n-1], nodeIDs[n-1] +} + // encodeIDs sorts the given list of uint16 IDs and encodes them into a // compact byte slice using variable-length unsigned integer encoding. func encodeIDs(ids []uint16) []byte { diff --git a/triedb/pathdb/history_trienode_utils_test.go b/triedb/pathdb/history_trienode_utils_test.go index c3bd0d5b1f..32bd91166d 100644 --- a/triedb/pathdb/history_trienode_utils_test.go +++ b/triedb/pathdb/history_trienode_utils_test.go @@ -22,6 +22,464 @@ import ( "testing" ) +func TestHexPathNodeID(t *testing.T) { + t.Parallel() + + var suites = []struct { + input string + exp uint16 + }{ + { + input: "", + exp: 0, + }, + { + input: string([]byte{0x0}), + exp: 1, + }, + { + input: string([]byte{0xf}), + exp: 16, + }, + { + input: string([]byte{0x0, 0x0}), + exp: 17, + }, + { + input: string([]byte{0x0, 0xf}), + exp: 32, + }, + { + input: string([]byte{0x1, 0x0}), + exp: 33, + }, + { + input: string([]byte{0x1, 0xf}), + exp: 48, + }, + { + input: string([]byte{0xf, 0xf}), + exp: 272, + }, + { + input: string([]byte{0xf, 0xf, 0xf}), + exp: 4368, + }, + } + for _, suite := range suites { + got := hexPathNodeID(suite.input) + if got != suite.exp { + t.Fatalf("Unexpected node ID for %v: got %d, want %d", suite.input, got, suite.exp) + } + } +} + +func TestFindLeafPaths(t *testing.T) { + t.Parallel() + + tests := []struct { + input []string + expect []string + }{ + { + input: nil, + expect: nil, + }, + { + input: []string{"a"}, + expect: []string{"a"}, + }, + { + input: []string{"", "0", "00", "01", "1"}, + expect: []string{ + "00", + "01", + "1", + }, + }, + { + input: []string{"10", "100", "11", "2"}, + expect: []string{ + "100", + "11", + "2", + }, + }, + { + input: []string{"10", "100000000", "11", "111111111", "2"}, + expect: []string{ + "100000000", + "111111111", + "2", + }, + }, + } + for _, test := range tests { + res := findLeafPaths(test.input) + if !reflect.DeepEqual(res, test.expect) { + t.Fatalf("Unexpected result: %v, expected %v", res, test.expect) + } + } +} + +func TestSplitAccountPath(t *testing.T) { + t.Parallel() + + var suites = []struct { + input string + expPrefix []string + expID []uint16 + }{ + // Length = 0 + { + "", nil, nil, + }, + // Length = 1 + { + string([]byte{0x0}), + []string{ + string([]byte{0x0}), + }, + []uint16{ + 0, + }, + }, + { + string([]byte{0x1}), + []string{ + string([]byte{0x1}), + }, + []uint16{ + 0, + }, + }, + { + string([]byte{0xf}), + []string{ + string([]byte{0xf}), + }, + []uint16{ + 0, + }, + }, + // Length = 2 + { + string([]byte{0x0, 0x0}), + []string{ + string([]byte{0x0}), + }, + []uint16{ + 1, + }, + }, + { + string([]byte{0x0, 0x1}), + []string{ + string([]byte{0x0}), + }, + []uint16{ + 2, + }, + }, + { + string([]byte{0x0, 0xf}), + []string{ + string([]byte{0x0}), + }, + []uint16{ + 16, + }, + }, + { + string([]byte{0xf, 0xf}), + []string{ + string([]byte{0xf}), + }, + []uint16{ + 16, + }, + }, + // Length = 3 + { + string([]byte{0x0, 0x0, 0x0}), + []string{ + string([]byte{0x0}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 1, 0, + }, + }, + // Length = 3 + { + string([]byte{0xf, 0xf, 0xf}), + []string{ + string([]byte{0xf}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 16, 0, + }, + }, + // Length = 4 + { + string([]byte{0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{0x0}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 1, 1, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{0xf}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 16, 16, + }, + }, + // Length = 5 + { + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{0x0}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 1, 17, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{0xf}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 16, 272, + }, + }, + // Length = 6 + { + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{0x0}), + string([]byte{0x0, 0x0, 0x0}), + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + }, + []uint16{ + 1, 17, 0, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{0xf}), + string([]byte{0xf, 0xf, 0xf}), + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf, 0xf}), + }, + []uint16{ + 16, 272, 0, + }, + }, + } + for _, suite := range suites { + prefix, id := accountIndexScheme.splitPath(suite.input) + if !reflect.DeepEqual(prefix, suite.expPrefix) { + t.Fatalf("Unexpected prefix for %v: got %v, want %v", suite.input, prefix, suite.expPrefix) + } + if !reflect.DeepEqual(id, suite.expID) { + t.Fatalf("Unexpected ID for %v: got %v, want %v", suite.input, id, suite.expID) + } + } +} + +func TestSplitStoragePath(t *testing.T) { + t.Parallel() + + var suites = []struct { + input string + expPrefix []string + expID []uint16 + }{ + // Length = 0 + { + "", + []string{ + string([]byte{}), + }, + []uint16{ + 0, + }, + }, + // Length = 1 + { + string([]byte{0x0}), + []string{ + string([]byte{}), + }, + []uint16{ + 1, + }, + }, + { + string([]byte{0x1}), + []string{ + string([]byte{}), + }, + []uint16{ + 2, + }, + }, + { + string([]byte{0xf}), + []string{ + string([]byte{}), + }, + []uint16{ + 16, + }, + }, + // Length = 2 + { + string([]byte{0x0, 0x0}), + []string{ + string([]byte{}), + }, + []uint16{ + 17, + }, + }, + { + string([]byte{0x0, 0x1}), + []string{ + string([]byte{}), + }, + []uint16{ + 18, + }, + }, + { + string([]byte{0x0, 0xf}), + []string{ + string([]byte{}), + }, + []uint16{ + 32, + }, + }, + { + string([]byte{0xf, 0xf}), + []string{ + string([]byte{}), + }, + []uint16{ + 272, + }, + }, + // Length = 3 + { + string([]byte{0x0, 0x0, 0x0}), + []string{ + string([]byte{}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 17, 0, + }, + }, + // Length = 3 + { + string([]byte{0xf, 0xf, 0xf}), + []string{ + string([]byte{}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 272, 0, + }, + }, + // Length = 4 + { + string([]byte{0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 17, 1, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 272, 16, + }, + }, + // Length = 5 + { + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{}), + string([]byte{0x0, 0x0, 0x0}), + }, + []uint16{ + 17, 17, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{}), + string([]byte{0xf, 0xf, 0xf}), + }, + []uint16{ + 272, 272, + }, + }, + // Length = 6 + { + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + []string{ + string([]byte{}), + string([]byte{0x0, 0x0, 0x0}), + string([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + }, + []uint16{ + 17, 17, 0, + }, + }, + { + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf, 0xf}), + []string{ + string([]byte{}), + string([]byte{0xf, 0xf, 0xf}), + string([]byte{0xf, 0xf, 0xf, 0xf, 0xf, 0xf}), + }, + []uint16{ + 272, 272, 0, + }, + }, + } + for i, suite := range suites { + prefix, id := storageIndexScheme.splitPath(suite.input) + if !reflect.DeepEqual(prefix, suite.expPrefix) { + t.Fatalf("Test %d, unexpected prefix for %v: got %v, want %v", i, suite.input, prefix, suite.expPrefix) + } + if !reflect.DeepEqual(id, suite.expID) { + t.Fatalf("Test %d, unexpected ID for %v: got %v, want %v", i, suite.input, id, suite.expID) + } + } +} + func TestIsAncestor(t *testing.T) { suites := []struct { x, y uint16 diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go index 842ac0972e..c76d88b594 100644 --- a/triedb/pathdb/reader.go +++ b/triedb/pathdb/reader.go @@ -200,7 +200,7 @@ func (db *Database) StateReader(root common.Hash) (database.StateReader, error) // historical state. type HistoricalStateReader struct { db *Database - reader *historyReader + reader *stateHistoryReader id uint64 } @@ -234,7 +234,7 @@ func (db *Database) HistoricReader(root common.Hash) (*HistoricalStateReader, er return &HistoricalStateReader{ id: *id, db: db, - reader: newHistoryReader(db.diskdb, db.stateFreezer), + reader: newStateHistoryReader(db.diskdb, db.stateFreezer), }, nil } From add1890a572a01ab7c9424082d794f1ba9475a44 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Sat, 17 Jan 2026 21:23:48 +0800 Subject: [PATCH 245/277] triedb/pathdb: enable trienode history (#32621) It's the part-4 for trienode history. The trienode history persistence has been enabled with this PR by flag `history.trienode ` --- cmd/geth/chaincmd.go | 1 + cmd/geth/main.go | 1 + cmd/utils/flags.go | 28 ++++-- core/blockchain.go | 6 ++ eth/backend.go | 1 + eth/ethconfig/config.go | 2 + eth/ethconfig/gen_config.go | 6 ++ triedb/pathdb/buffer.go | 10 +-- triedb/pathdb/config.go | 16 +++- triedb/pathdb/database.go | 143 +++++++++++++----------------- triedb/pathdb/disklayer.go | 74 ++++++++++++---- triedb/pathdb/history.go | 131 +++++++++++++++++++++++++++ triedb/pathdb/history_trienode.go | 1 - triedb/pathdb/journal.go | 6 +- triedb/pathdb/metrics.go | 7 +- 15 files changed, 308 insertions(+), 125 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 55316c14ab..0af0a61602 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -120,6 +120,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.LogNoHistoryFlag, utils.LogExportCheckpointsFlag, utils.StateHistoryFlag, + utils.TrienodeHistoryFlag, }, utils.DatabaseFlags, debug.Flags), Before: func(ctx *cli.Context) error { flags.MigrateGlobalFlags(ctx) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index db4b569c89..9440759289 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -94,6 +94,7 @@ var ( utils.LogNoHistoryFlag, utils.LogExportCheckpointsFlag, utils.StateHistoryFlag, + utils.TrienodeHistoryFlag, utils.LightKDFFlag, utils.EthRequiredBlocksFlag, utils.LegacyWhitelistFlag, // deprecated diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 2b64761e00..660c986ac9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -295,6 +295,12 @@ var ( Value: ethconfig.Defaults.StateHistory, Category: flags.StateCategory, } + TrienodeHistoryFlag = &cli.Int64Flag{ + Name: "history.trienode", + Usage: "Number of recent blocks to retain trienode history for, only relevant in state.scheme=path (default/negative = disabled, 0 = entire chain)", + Value: ethconfig.Defaults.TrienodeHistory, + Category: flags.StateCategory, + } TransactionHistoryFlag = &cli.Uint64Flag{ Name: "history.transactions", Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", @@ -1699,6 +1705,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(StateHistoryFlag.Name) { cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) } + if ctx.IsSet(TrienodeHistoryFlag.Name) { + cfg.TrienodeHistory = ctx.Int64(TrienodeHistoryFlag.Name) + } if ctx.IsSet(StateSchemeFlag.Name) { cfg.StateScheme = ctx.String(StateSchemeFlag.Name) } @@ -2299,15 +2308,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh Fatalf("%v", err) } options := &core.BlockChainConfig{ - TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, - NoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), - TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, - ArchiveMode: ctx.String(GCModeFlag.Name) == "archive", - TrieTimeLimit: ethconfig.Defaults.TrieTimeout, - SnapshotLimit: ethconfig.Defaults.SnapshotCache, - Preimages: ctx.Bool(CachePreimagesFlag.Name), - StateScheme: scheme, - StateHistory: ctx.Uint64(StateHistoryFlag.Name), + TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, + NoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), + TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, + ArchiveMode: ctx.String(GCModeFlag.Name) == "archive", + TrieTimeLimit: ethconfig.Defaults.TrieTimeout, + SnapshotLimit: ethconfig.Defaults.SnapshotCache, + Preimages: ctx.Bool(CachePreimagesFlag.Name), + StateScheme: scheme, + StateHistory: ctx.Uint64(StateHistoryFlag.Name), + TrienodeHistory: ctx.Int64(TrienodeHistoryFlag.Name), // Disable transaction indexing/unindexing. TxLookupLimit: -1, diff --git a/core/blockchain.go b/core/blockchain.go index e71f97b7b9..fc0e70c271 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -177,6 +177,11 @@ type BlockChainConfig struct { // If set to 0, all state histories across the entire chain will be retained; StateHistory uint64 + // Number of blocks from the chain head for which trienode histories are retained. + // If set to 0, all trienode histories across the entire chain will be retained; + // If set to -1, no trienode history will be retained; + TrienodeHistory int64 + // State snapshot related options SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory SnapshotNoBuild bool // Whether the background generation is allowed @@ -255,6 +260,7 @@ func (cfg *BlockChainConfig) triedbConfig(isVerkle bool) *triedb.Config { if cfg.StateScheme == rawdb.PathScheme { config.PathDB = &pathdb.Config{ StateHistory: cfg.StateHistory, + TrienodeHistory: cfg.TrienodeHistory, EnableStateIndexing: cfg.ArchiveMode, TrieCleanSize: cfg.TrieCleanLimit * 1024 * 1024, StateCleanSize: cfg.SnapshotLimit * 1024 * 1024, diff --git a/eth/backend.go b/eth/backend.go index cae2aabe30..932d1a2515 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -230,6 +230,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { SnapshotLimit: config.SnapshotCache, Preimages: config.Preimages, StateHistory: config.StateHistory, + TrienodeHistory: config.TrienodeHistory, StateScheme: scheme, ChainHistoryMode: config.HistoryMode, TxLookupLimit: int64(min(config.TransactionHistory, math.MaxInt64)), diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index d6ed2c2576..72123c41b3 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -56,6 +56,7 @@ var Defaults = Config{ TransactionHistory: 2350000, LogHistory: 2350000, StateHistory: params.FullImmutabilityThreshold, + TrienodeHistory: -1, DatabaseCache: 512, TrieCleanCache: 154, TrieDirtyCache: 256, @@ -108,6 +109,7 @@ type Config struct { LogNoHistory bool `toml:",omitempty"` // No log search index is maintained. LogExportCheckpoints string // export log index checkpoints to file StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. + TrienodeHistory int64 `toml:",omitempty"` // Number of blocks from the chain head for which trienode histories are retained // State scheme represents the scheme used to store ethereum states and trie // nodes on top. It can be 'hash', 'path', or none which means use the scheme diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 97c5db3ecd..ed6c9b0197 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -31,6 +31,7 @@ func (c Config) MarshalTOML() (interface{}, error) { LogNoHistory bool `toml:",omitempty"` LogExportCheckpoints string StateHistory uint64 `toml:",omitempty"` + TrienodeHistory int64 `toml:",omitempty"` StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SlowBlockThreshold time.Duration `toml:",omitempty"` @@ -81,6 +82,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.LogNoHistory = c.LogNoHistory enc.LogExportCheckpoints = c.LogExportCheckpoints enc.StateHistory = c.StateHistory + enc.TrienodeHistory = c.TrienodeHistory enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks enc.SlowBlockThreshold = c.SlowBlockThreshold @@ -135,6 +137,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { LogNoHistory *bool `toml:",omitempty"` LogExportCheckpoints *string StateHistory *uint64 `toml:",omitempty"` + TrienodeHistory *int64 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SlowBlockThreshold *time.Duration `toml:",omitempty"` @@ -216,6 +219,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.StateHistory != nil { c.StateHistory = *dec.StateHistory } + if dec.TrienodeHistory != nil { + c.TrienodeHistory = *dec.TrienodeHistory + } if dec.StateScheme != nil { c.StateScheme = *dec.StateScheme } diff --git a/triedb/pathdb/buffer.go b/triedb/pathdb/buffer.go index 138962110f..853e1090b3 100644 --- a/triedb/pathdb/buffer.go +++ b/triedb/pathdb/buffer.go @@ -132,7 +132,7 @@ func (b *buffer) size() uint64 { // flush persists the in-memory dirty trie node into the disk if the configured // memory threshold is reached. Note, all data must be written atomically. -func (b *buffer) flush(root common.Hash, db ethdb.KeyValueStore, freezer ethdb.AncientWriter, progress []byte, nodesCache, statesCache *fastcache.Cache, id uint64, postFlush func()) { +func (b *buffer) flush(root common.Hash, db ethdb.KeyValueStore, freezers []ethdb.AncientWriter, progress []byte, nodesCache, statesCache *fastcache.Cache, id uint64, postFlush func()) { if b.done != nil { panic("duplicated flush operation") } @@ -165,11 +165,9 @@ func (b *buffer) flush(root common.Hash, db ethdb.KeyValueStore, freezer ethdb.A // // This step is crucial to guarantee that the corresponding state history remains // available for state rollback. - if freezer != nil { - if err := freezer.SyncAncient(); err != nil { - b.flushErr = err - return - } + if err := syncHistory(freezers...); err != nil { + b.flushErr = err + return } nodes := b.nodes.write(batch, nodesCache) accounts, slots := b.states.write(batch, progress, statesCache) diff --git a/triedb/pathdb/config.go b/triedb/pathdb/config.go index 3745a63edd..0da8604b6c 100644 --- a/triedb/pathdb/config.go +++ b/triedb/pathdb/config.go @@ -53,6 +53,7 @@ var ( // Defaults contains default settings for Ethereum mainnet. var Defaults = &Config{ StateHistory: params.FullImmutabilityThreshold, + TrienodeHistory: -1, EnableStateIndexing: false, TrieCleanSize: defaultTrieCleanSize, StateCleanSize: defaultStateCleanSize, @@ -61,14 +62,16 @@ var Defaults = &Config{ // ReadOnly is the config in order to open database in read only mode. var ReadOnly = &Config{ - ReadOnly: true, - TrieCleanSize: defaultTrieCleanSize, - StateCleanSize: defaultStateCleanSize, + ReadOnly: true, + TrienodeHistory: -1, + TrieCleanSize: defaultTrieCleanSize, + StateCleanSize: defaultStateCleanSize, } // Config contains the settings for database. type Config struct { StateHistory uint64 // Number of recent blocks to maintain state history for, 0: full chain + TrienodeHistory int64 // Number of recent blocks to maintain trienode history for, 0: full chain, negative: disable EnableStateIndexing bool // Whether to enable state history indexing for external state access TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie data StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data @@ -108,6 +111,13 @@ func (c *Config) fields() []interface{} { } else { list = append(list, "state-history", fmt.Sprintf("last %d blocks", c.StateHistory)) } + if c.TrienodeHistory >= 0 { + if c.TrienodeHistory == 0 { + list = append(list, "trie-history", "entire chain") + } else { + list = append(list, "trie-history", fmt.Sprintf("last %d blocks", c.TrienodeHistory)) + } + } if c.EnableStateIndexing { list = append(list, "index-history", true) } diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 131747978c..f7c0ba1398 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -137,6 +137,9 @@ type Database struct { stateFreezer ethdb.ResettableAncientStore // Freezer for storing state histories, nil possible in tests stateIndexer *historyIndexer // History indexer historical state data, nil possible + trienodeFreezer ethdb.ResettableAncientStore // Freezer for storing trienode histories, nil possible in tests + trienodeIndexer *historyIndexer // History indexer for historical trienode data + lock sync.RWMutex // Lock to prevent mutations from happening at the same time } @@ -169,11 +172,14 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { // and in-memory layer journal. db.tree = newLayerTree(db.loadLayers()) - // Repair the state history, which might not be aligned with the state - // in the key-value store due to an unclean shutdown. - if err := db.repairHistory(); err != nil { - log.Crit("Failed to repair state history", "err", err) + // Repair the history, which might not be aligned with the persistent + // state in the key-value store due to an unclean shutdown. + states, trienodes, err := repairHistory(db.diskdb, isVerkle, db.config.ReadOnly, db.tree.bottom().stateID(), db.config.TrienodeHistory >= 0) + if err != nil { + log.Crit("Failed to repair history", "err", err) } + db.stateFreezer, db.trienodeFreezer = states, trienodes + // Disable database in case node is still in the initial state sync stage. if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { if err := db.Disable(); err != nil { @@ -187,11 +193,8 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { if err := db.setStateGenerator(); err != nil { log.Crit("Failed to setup the generator", "err", err) } - // TODO (rjl493456442) disable the background indexing in read-only mode - if db.stateFreezer != nil && db.config.EnableStateIndexing { - db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID(), typeStateHistory) - log.Info("Enabled state history indexing") - } + db.setHistoryIndexer() + fields := config.fields() if db.isVerkle { fields = append(fields, "verkle", true) @@ -200,59 +203,28 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { return db } -// repairHistory truncates leftover state history objects, which may occur due -// to an unclean shutdown or other unexpected reasons. -func (db *Database) repairHistory() error { - // Open the freezer for state history. This mechanism ensures that - // only one database instance can be opened at a time to prevent - // accidental mutation. - ancient, err := db.diskdb.AncientDatadir() - if err != nil { - // TODO error out if ancient store is disabled. A tons of unit tests - // disable the ancient store thus the error here will immediately fail - // all of them. Fix the tests first. - return nil +// setHistoryIndexer initializes the indexers for both state history and +// trienode history if available. Note that this function may be called while +// existing indexers are still running, so they must be closed beforehand. +func (db *Database) setHistoryIndexer() { + // TODO (rjl493456442) disable the background indexing in read-only mode + if !db.config.EnableStateIndexing { + return } - freezer, err := rawdb.NewStateFreezer(ancient, db.isVerkle, db.readOnly) - if err != nil { - log.Crit("Failed to open state history freezer", "err", err) - } - db.stateFreezer = freezer - - // Reset the entire state histories if the trie database is not initialized - // yet. This action is necessary because these state histories are not - // expected to exist without an initialized trie database. - id := db.tree.bottom().stateID() - if id == 0 { - frozen, err := db.stateFreezer.Ancients() - if err != nil { - log.Crit("Failed to retrieve head of state history", "err", err) + if db.stateFreezer != nil { + if db.stateIndexer != nil { + db.stateIndexer.close() } - if frozen != 0 { - // Purge all state history indexing data first - batch := db.diskdb.NewBatch() - rawdb.DeleteStateHistoryIndexMetadata(batch) - rawdb.DeleteStateHistoryIndexes(batch) - if err := batch.Write(); err != nil { - log.Crit("Failed to purge state history index", "err", err) - } - if err := db.stateFreezer.Reset(); err != nil { - log.Crit("Failed to reset state histories", "err", err) - } - log.Info("Truncated extraneous state history") + db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID(), typeStateHistory) + log.Info("Enabled state history indexing") + } + if db.trienodeFreezer != nil { + if db.trienodeIndexer != nil { + db.trienodeIndexer.close() } - return nil + db.trienodeIndexer = newHistoryIndexer(db.diskdb, db.trienodeFreezer, db.tree.bottom().stateID(), typeTrienodeHistory) + log.Info("Enabled trienode history indexing") } - // Truncate the extra state histories above in freezer in case it's not - // aligned with the disk layer. It might happen after a unclean shutdown. - pruned, err := truncateFromHead(db.stateFreezer, typeStateHistory, id) - if err != nil { - log.Crit("Failed to truncate extra state histories", "err", err) - } - if pruned != 0 { - log.Warn("Truncated extra state histories", "number", pruned) - } - return nil } // setStateGenerator loads the state generation progress marker and potentially @@ -333,8 +305,13 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6 if err := db.modifyAllowed(); err != nil { return err } - // TODO(rjl493456442) tracking the origins in the following PRs. - if err := db.tree.add(root, parentRoot, block, NewNodeSetWithOrigin(nodes.Nodes(), nil), states); err != nil { + var nodesWithOrigins *nodeSetWithOrigin + if db.config.TrienodeHistory >= 0 { + nodesWithOrigins = NewNodeSetWithOrigin(nodes.NodeAndOrigins()) + } else { + nodesWithOrigins = NewNodeSetWithOrigin(nodes.Nodes(), nil) + } + if err := db.tree.add(root, parentRoot, block, nodesWithOrigins, states); err != nil { return err } // Keep 128 diff layers in the memory, persistent layer is 129th. @@ -422,18 +399,9 @@ func (db *Database) Enable(root common.Hash) error { // all root->id mappings should be removed as well. Since // mappings can be huge and might take a while to clear // them, just leave them in disk and wait for overwriting. - if db.stateFreezer != nil { - // Purge all state history indexing data first - batch.Reset() - rawdb.DeleteStateHistoryIndexMetadata(batch) - rawdb.DeleteStateHistoryIndexes(batch) - if err := batch.Write(); err != nil { - return err - } - if err := db.stateFreezer.Reset(); err != nil { - return err - } - } + purgeHistory(db.stateFreezer, db.diskdb, typeStateHistory) + purgeHistory(db.trienodeFreezer, db.diskdb, typeTrienodeHistory) + // Re-enable the database as the final step. db.waitSync = false rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished) @@ -446,11 +414,8 @@ func (db *Database) Enable(root common.Hash) error { // To ensure the history indexer always matches the current state, we must: // 1. Close any existing indexer // 2. Re-initialize the indexer so it starts indexing from the new state root. - if db.stateIndexer != nil && db.stateFreezer != nil && db.config.EnableStateIndexing { - db.stateIndexer.close() - db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID(), typeStateHistory) - log.Info("Re-enabled state history indexing") - } + db.setHistoryIndexer() + log.Info("Rebuilt trie database", "root", root) return nil } @@ -506,6 +471,12 @@ func (db *Database) Recover(root common.Hash) error { if err != nil { return err } + if db.trienodeFreezer != nil { + _, err = truncateFromHead(db.trienodeFreezer, typeTrienodeHistory, dl.stateID()) + if err != nil { + return err + } + } log.Debug("Recovered state", "root", root, "elapsed", common.PrettyDuration(time.Since(start))) return nil } @@ -566,11 +537,21 @@ func (db *Database) Close() error { if db.stateIndexer != nil { db.stateIndexer.close() } - // Close the attached state history freezer. - if db.stateFreezer == nil { - return nil + if db.trienodeIndexer != nil { + db.trienodeIndexer.close() } - return db.stateFreezer.Close() + // Close the attached state history freezer. + if db.stateFreezer != nil { + if err := db.stateFreezer.Close(); err != nil { + return err + } + } + if db.trienodeFreezer != nil { + if err := db.trienodeFreezer.Close(); err != nil { + return err + } + } + return nil } // Size returns the current storage size of the memory cache in front of the diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index b9c308c5b6..d6e997e044 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -323,36 +324,60 @@ func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *no return newDiffLayer(dl, root, id, block, nodes, states) } -// writeStateHistory stores the state history and indexes if indexing is +// writeHistory stores the specified history and indexes if indexing is // permitted. // // What's more, this function also returns a flag indicating whether the // buffer flushing is required, ensuring the persistent state ID is always // greater than or equal to the first history ID. -func (dl *diskLayer) writeStateHistory(diff *diffLayer) (bool, error) { - // Short circuit if state history is not permitted - if dl.db.stateFreezer == nil { +func (dl *diskLayer) writeHistory(typ historyType, diff *diffLayer) (bool, error) { + var ( + limit uint64 + freezer ethdb.AncientStore + indexer *historyIndexer + writeFunc func(writer ethdb.AncientWriter, dl *diffLayer) error + ) + switch typ { + case typeStateHistory: + freezer = dl.db.stateFreezer + indexer = dl.db.stateIndexer + writeFunc = writeStateHistory + limit = dl.db.config.StateHistory + case typeTrienodeHistory: + freezer = dl.db.trienodeFreezer + indexer = dl.db.trienodeIndexer + writeFunc = writeTrienodeHistory + + // Skip the history commit if the trienode history is not permitted + if dl.db.config.TrienodeHistory < 0 { + return false, nil + } + limit = uint64(dl.db.config.TrienodeHistory) + default: + panic(fmt.Sprintf("unknown history type: %v", typ)) + } + // Short circuit if the history freezer is nil + if freezer == nil { return false, nil } // Bail out with an error if writing the state history fails. // This can happen, for example, if the device is full. - err := writeStateHistory(dl.db.stateFreezer, diff) + err := writeFunc(freezer, diff) if err != nil { return false, err } - // Notify the state history indexer for newly created history - if dl.db.stateIndexer != nil { - if err := dl.db.stateIndexer.extend(diff.stateID()); err != nil { + // Notify the history indexer for newly created history + if indexer != nil { + if err := indexer.extend(diff.stateID()); err != nil { return false, err } } // Determine if the persisted history object has exceeded the // configured limitation. - limit := dl.db.config.StateHistory if limit == 0 { return false, nil } - tail, err := dl.db.stateFreezer.Tail() + tail, err := freezer.Tail() if err != nil { return false, err } // firstID = tail+1 @@ -375,14 +400,14 @@ func (dl *diskLayer) writeStateHistory(diff *diffLayer) (bool, error) { // These measures ensure the persisted state ID always remains greater // than or equal to the first history ID. if persistentID := rawdb.ReadPersistentStateID(dl.db.diskdb); persistentID < newFirst { - log.Debug("Skip tail truncation", "persistentID", persistentID, "tailID", tail+1, "headID", diff.stateID(), "limit", limit) + log.Debug("Skip tail truncation", "type", typ, "persistentID", persistentID, "tailID", tail+1, "headID", diff.stateID(), "limit", limit) return true, nil } - pruned, err := truncateFromTail(dl.db.stateFreezer, typeStateHistory, newFirst-1) + pruned, err := truncateFromTail(freezer, typ, newFirst-1) if err != nil { return false, err } - log.Debug("Pruned state history", "items", pruned, "tailid", newFirst) + log.Debug("Pruned history", "type", typ, "items", pruned, "tailid", newFirst) return false, nil } @@ -396,10 +421,22 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { // Construct and store the state history first. If crash happens after storing // the state history but without flushing the corresponding states(journal), // the stored state history will be truncated from head in the next restart. - flush, err := dl.writeStateHistory(bottom) + flushA, err := dl.writeHistory(typeStateHistory, bottom) if err != nil { return nil, err } + // Construct and store the trienode history first. If crash happens after + // storing the trienode history but without flushing the corresponding + // states(journal), the stored trienode history will be truncated from head + // in the next restart. + flushB, err := dl.writeHistory(typeTrienodeHistory, bottom) + if err != nil { + return nil, err + } + // Since the state history and trienode history may be configured with different + // lengths, the buffer will be flushed once either of them meets its threshold. + flush := flushA || flushB + // Mark the diskLayer as stale before applying any mutations on top. dl.stale = true @@ -448,7 +485,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { // Freeze the live buffer and schedule background flushing dl.frozen = combined - dl.frozen.flush(bottom.root, dl.db.diskdb, dl.db.stateFreezer, progress, dl.nodes, dl.states, bottom.stateID(), func() { + dl.frozen.flush(bottom.root, dl.db.diskdb, []ethdb.AncientWriter{dl.db.stateFreezer, dl.db.trienodeFreezer}, progress, dl.nodes, dl.states, bottom.stateID(), func() { // Resume the background generation if it's not completed yet. // The generator is assumed to be available if the progress is // not nil. @@ -504,12 +541,17 @@ func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) { dl.stale = true - // Unindex the corresponding state history + // Unindex the corresponding history if dl.db.stateIndexer != nil { if err := dl.db.stateIndexer.shorten(dl.id); err != nil { return nil, err } } + if dl.db.trienodeIndexer != nil { + if err := dl.db.trienodeIndexer.shorten(dl.id); err != nil { + return nil, err + } + } // State change may be applied to node buffer, or the persistent // state, depends on if node buffer is empty or not. If the node // buffer is not empty, it means that the state transition that diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 86224ea5b2..9efaa3ab24 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -22,6 +22,7 @@ import ( "iter" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -323,3 +324,133 @@ func truncateFromTail(store ethdb.AncientStore, typ historyType, ntail uint64) ( // Associated root->id mappings are left in the database. return int(ntail - otail), nil } + +// purgeHistory resets the history and also purges the associated index data. +func purgeHistory(store ethdb.ResettableAncientStore, disk ethdb.KeyValueStore, typ historyType) { + if store == nil { + return + } + frozen, err := store.Ancients() + if err != nil { + log.Crit("Failed to retrieve head of history", "type", typ, "err", err) + } + if frozen == 0 { + return + } + // Purge all state history indexing data first + batch := disk.NewBatch() + if typ == typeStateHistory { + rawdb.DeleteStateHistoryIndexMetadata(batch) + rawdb.DeleteStateHistoryIndexes(batch) + } else { + rawdb.DeleteTrienodeHistoryIndexMetadata(batch) + rawdb.DeleteTrienodeHistoryIndexes(batch) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to purge history index", "type", typ, "err", err) + } + if err := store.Reset(); err != nil { + log.Crit("Failed to reset history", "type", typ, "err", err) + } + log.Info("Truncated extraneous history", "type", typ) +} + +// syncHistory explicitly sync the provided history stores. +func syncHistory(stores ...ethdb.AncientWriter) error { + for _, store := range stores { + if store == nil { + continue + } + if err := store.SyncAncient(); err != nil { + return err + } + } + return nil +} + +// repairHistory truncates any leftover history objects in either the state +// history or the trienode history, which may occur due to an unclean shutdown +// or other unexpected events. +// +// Additionally, this mechanism ensures that the state history and trienode +// history remain aligned. Since the trienode history is optional and not +// required by regular users, a gap between the trienode history and the +// persistent state may appear if the trienode history was disabled during the +// previous run. This process detects and resolves such gaps, preventing +// unexpected panics. +func repairHistory(db ethdb.Database, isVerkle bool, readOnly bool, stateID uint64, enableTrienode bool) (ethdb.ResettableAncientStore, ethdb.ResettableAncientStore, error) { + ancient, err := db.AncientDatadir() + if err != nil { + // TODO error out if ancient store is disabled. A tons of unit tests + // disable the ancient store thus the error here will immediately fail + // all of them. Fix the tests first. + return nil, nil, nil + } + // State history is mandatory as it is the key component that ensures + // resilience to deep reorgs. + states, err := rawdb.NewStateFreezer(ancient, isVerkle, readOnly) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + + // Trienode history is optional and only required for building archive + // node with state proofs. + var trienodes ethdb.ResettableAncientStore + if enableTrienode { + trienodes, err = rawdb.NewTrienodeFreezer(ancient, isVerkle, readOnly) + if err != nil { + log.Crit("Failed to open trienode history freezer", "err", err) + } + } + + // Reset the both histories if the trie database is not initialized yet. + // This action is necessary because these histories are not expected + // to exist without an initialized trie database. + if stateID == 0 { + purgeHistory(states, db, typeStateHistory) + purgeHistory(trienodes, db, typeTrienodeHistory) + return states, trienodes, nil + } + // Truncate excessive history entries in either the state history or + // the trienode history, ensuring both histories remain aligned with + // the state. + head, err := states.Ancients() + if err != nil { + return nil, nil, err + } + if stateID > head { + return nil, nil, fmt.Errorf("gap between state [#%d] and state history [#%d]", stateID, head) + } + if trienodes != nil { + th, err := trienodes.Ancients() + if err != nil { + return nil, nil, err + } + if stateID > th { + return nil, nil, fmt.Errorf("gap between state [#%d] and trienode history [#%d]", stateID, th) + } + if th != head { + log.Info("Histories are not aligned with each other", "state", head, "trienode", th) + head = min(head, th) + } + } + head = min(head, stateID) + + // Truncate the extra history elements above in freezer in case it's not + // aligned with the state. It might happen after an unclean shutdown. + truncate := func(store ethdb.AncientStore, typ historyType, nhead uint64) { + if store == nil { + return + } + pruned, err := truncateFromHead(store, typ, nhead) + if err != nil { + log.Crit("Failed to truncate extra histories", "typ", typ, "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra histories", "typ", typ, "number", pruned) + } + } + truncate(states, typeStateHistory, head) + truncate(trienodes, typeTrienodeHistory, head) + return states, trienodes, nil +} diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 67be9de491..c584ac696c 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -672,7 +672,6 @@ func (r *trienodeHistoryReader) read(owner common.Hash, path string) ([]byte, er } // writeTrienodeHistory persists the trienode history associated with the given diff layer. -// nolint:unused func writeTrienodeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { start := time.Now() h := newTrienodeHistory(dl.rootHash(), dl.parent.rootHash(), dl.block, dl.nodes.nodeOrigin) diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 02bdef5d34..efcc3f2549 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -338,10 +338,8 @@ func (db *Database) Journal(root common.Hash) error { // but the ancient store is not properly closed, resulting in recent writes // being lost. After a restart, the ancient store would then be misaligned // with the disk layer, causing data corruption. - if db.stateFreezer != nil { - if err := db.stateFreezer.SyncAncient(); err != nil { - return err - } + if err := syncHistory(db.stateFreezer, db.trienodeFreezer); err != nil { + return err } // Store the journal into the database and return var ( diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go index 31c40053fc..c4d6be28f7 100644 --- a/triedb/pathdb/metrics.go +++ b/triedb/pathdb/metrics.go @@ -73,11 +73,8 @@ var ( stateHistoryDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/state/bytes/data", nil) stateHistoryIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/state/bytes/index", nil) - //nolint:unused - trienodeHistoryBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/time", nil) - //nolint:unused - trienodeHistoryDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/trienode/bytes/data", nil) - //nolint:unused + trienodeHistoryBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/time", nil) + trienodeHistoryDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/trienode/bytes/data", nil) trienodeHistoryIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/trienode/bytes/index", nil) stateIndexHistoryTimer = metrics.NewRegisteredResettingTimer("pathdb/history/state/index/time", nil) From 3d78da9171cc4e6ecc013ce1c9a4df911cdaa88d Mon Sep 17 00:00:00 2001 From: Mael Regnery Date: Sat, 17 Jan 2026 14:34:08 +0100 Subject: [PATCH 246/277] rpc: add a rpc.rangelimit flag (#33163) Adding an RPC flag to limit the block range size for eth_getLogs and eth_newFilter requests. closing https://github.com/ethereum/go-ethereum/issues/24508 --------- Co-authored-by: MariusVanDerWijden --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 10 +++++ eth/ethconfig/config.go | 4 ++ eth/ethconfig/gen_config.go | 6 +++ eth/filters/api.go | 6 ++- eth/filters/filter.go | 10 ++++- eth/filters/filter_system.go | 1 + eth/filters/filter_test.go | 78 ++++++++++++++++++++++++++---------- graphql/graphql.go | 2 +- 9 files changed, 92 insertions(+), 26 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9440759289..e838a846a1 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -194,6 +194,7 @@ var ( utils.BatchResponseMaxSize, utils.RPCTxSyncDefaultTimeoutFlag, utils.RPCTxSyncMaxTimeoutFlag, + utils.RPCGlobalRangeLimitFlag, } metricsFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 660c986ac9..fe8375454f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -642,6 +642,12 @@ var ( Value: ethconfig.Defaults.TxSyncMaxTimeout, Category: flags.APICategory, } + RPCGlobalRangeLimitFlag = &cli.Uint64Flag{ + Name: "rpc.rangelimit", + Usage: "Maximum block range (end - begin) allowed for range queries (0 = unlimited)", + Value: ethconfig.Defaults.RangeLimit, + Category: flags.APICategory, + } // Authenticated RPC HTTP settings AuthListenFlag = &cli.StringFlag{ Name: "authrpc.addr", @@ -1762,6 +1768,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(RPCTxSyncMaxTimeoutFlag.Name) { cfg.TxSyncMaxTimeout = ctx.Duration(RPCTxSyncMaxTimeoutFlag.Name) } + if ctx.IsSet(RPCGlobalRangeLimitFlag.Name) { + cfg.RangeLimit = ctx.Uint64(RPCGlobalRangeLimitFlag.Name) + } if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 { // If snap-sync is requested, this flag is also required if cfg.SyncMode == ethconfig.SnapSync { @@ -2106,6 +2115,7 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf filterSystem := filters.NewFilterSystem(backend, filters.Config{ LogCacheSize: ethcfg.FilterLogCacheSize, LogQueryLimit: ethcfg.LogQueryLimit, + RangeLimit: ethcfg.RangeLimit, }) stack.RegisterAPIs([]rpc.API{{ Namespace: "eth", diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 72123c41b3..9e967e45cc 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -74,6 +74,7 @@ var Defaults = Config{ TxSyncDefaultTimeout: 20 * time.Second, TxSyncMaxTimeout: 1 * time.Minute, SlowBlockThreshold: time.Second * 2, + RangeLimit: 0, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -196,6 +197,9 @@ type Config struct { // EIP-7966: eth_sendRawTransactionSync timeouts TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"` + + // RangeLimit restricts the maximum range (end - start) for range queries. + RangeLimit uint64 `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index ed6c9b0197..44b8c6306c 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -66,6 +66,7 @@ func (c Config) MarshalTOML() (interface{}, error) { OverrideVerkle *uint64 `toml:",omitempty"` TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"` + RangeLimit uint64 `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -117,6 +118,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.OverrideVerkle = c.OverrideVerkle enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout + enc.RangeLimit = c.RangeLimit return &enc, nil } @@ -172,6 +174,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { OverrideVerkle *uint64 `toml:",omitempty"` TxSyncDefaultTimeout *time.Duration `toml:",omitempty"` TxSyncMaxTimeout *time.Duration `toml:",omitempty"` + RangeLimit *uint64 `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -324,5 +327,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TxSyncMaxTimeout != nil { c.TxSyncMaxTimeout = *dec.TxSyncMaxTimeout } + if dec.RangeLimit != nil { + c.RangeLimit = *dec.RangeLimit + } return nil } diff --git a/eth/filters/api.go b/eth/filters/api.go index 4ed7e5be0a..f4bed35b26 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -89,6 +89,7 @@ type FilterAPI struct { filters map[rpc.ID]*filter timeout time.Duration logQueryLimit int + rangeLimit uint64 } // NewFilterAPI returns a new FilterAPI instance. @@ -99,6 +100,7 @@ func NewFilterAPI(system *FilterSystem) *FilterAPI { filters: make(map[rpc.ID]*filter), timeout: system.cfg.Timeout, logQueryLimit: system.cfg.LogQueryLimit, + rangeLimit: system.cfg.RangeLimit, } go api.timeoutLoop(system.cfg.Timeout) @@ -479,7 +481,7 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type return nil, &history.PrunedHistoryError{} } // Construct the range filter - filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics, api.rangeLimit) } // Run the filter and return all the logs @@ -531,7 +533,7 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo end = f.crit.ToBlock.Int64() } // Construct the range filter - filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics, api.rangeLimit) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 10afc84fe9..9915f28128 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -19,6 +19,7 @@ package filters import ( "context" "errors" + "fmt" "math" "math/big" "slices" @@ -44,15 +45,17 @@ type Filter struct { begin, end int64 // Range interval if filtering multiple blocks rangeLogsTestHook chan rangeLogsTestEvent + rangeLimit uint64 } // NewRangeFilter creates a new filter which uses a bloom filter on blocks to // figure out whether a particular block is interesting or not. -func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { +func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash, rangeLimit uint64) *Filter { // Create a generic filter and convert it into a range filter filter := newFilter(sys, addresses, topics) filter.begin = begin filter.end = end + filter.rangeLimit = rangeLimit return filter } @@ -143,6 +146,9 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if err != nil { return nil, err } + if f.rangeLimit != 0 && (end-begin) > f.rangeLimit { + return nil, fmt.Errorf("exceed maximum block range: %d", f.rangeLimit) + } return f.rangeLogs(ctx, begin, end) } @@ -494,7 +500,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*typ // filterLogs creates a slice of logs matching the given criteria. func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log { - var check = func(log *types.Log) bool { + check := func(log *types.Log) bool { if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber { return false } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 8b9bce47b9..1f92c4e36f 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -44,6 +44,7 @@ type Config struct { LogCacheSize int // maximum number of cached blocks (default: 32) Timeout time.Duration // how long filters stay active (default: 5min) LogQueryLimit int // maximum number of addresses allowed in filter criteria (default: 1000) + RangeLimit uint64 // maximum block range allowed in filter criteria (default: 0) } func (cfg Config) withDefaults() Config { diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index f44ada20b1..63727200f7 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -109,7 +109,7 @@ func benchmarkFilters(b *testing.B, history uint64, noHistory bool) { backend.startFilterMaps(history, noHistory, filtermaps.DefaultParams) defer backend.stopFilterMaps() - filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil) + filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil, 0) for b.Loop() { filter.begin = 0 logs, _ := filter.Logs(context.Background()) @@ -317,70 +317,70 @@ func testFilters(t *testing.T, history uint64, noHistory bool) { want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","blockTimestamp":"0x1e","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}), + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}, 0), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","blockTimestamp":"0x14","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","blockTimestamp":"0x1e","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","blockTimestamp":"0x2710","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}), + f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}, 0), }, { - f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}), + f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}, 0), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","blockTimestamp":"0x2706","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}), + f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}, 0), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","blockTimestamp":"0x1e","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), + f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}, 0), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","blockTimestamp":"0x14","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","blockTimestamp":"0x14","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","blockTimestamp":"0x1e","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}, 0), }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil, 0), }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}, 0), }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, 0), want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","blockTimestamp":"0x2710","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, 0), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","blockTimestamp":"0x2706","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","blockTimestamp":"0x2710","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil, 0), want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","blockTimestamp":"0x2706","logIndex":"0x0","removed":false}]`, }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil, 0), }, { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, 0), err: "safe header not found", }, { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil, 0), err: "safe header not found", }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil, 0), err: "safe header not found", }, { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, 0), err: errPendingLogsUnsupported.Error(), }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, 0), err: errPendingLogsUnsupported.Error(), }, { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, 0), err: errPendingLogsUnsupported.Error(), }, } { @@ -403,7 +403,7 @@ func testFilters(t *testing.T, history uint64, noHistory bool) { } t.Run("timeout", func(t *testing.T) { - f := sys.NewRangeFilter(0, rpc.LatestBlockNumber.Int64(), nil, nil) + f := sys.NewRangeFilter(0, rpc.LatestBlockNumber.Int64(), nil, nil, 0) ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour)) defer cancel() _, err := f.Logs(ctx) @@ -464,7 +464,7 @@ func TestRangeLogs(t *testing.T) { newFilter := func(begin, end int64) { testCase++ event = 0 - filter = sys.NewRangeFilter(begin, end, addresses, nil) + filter = sys.NewRangeFilter(begin, end, addresses, nil, 0) filter.rangeLogsTestHook = make(chan rangeLogsTestEvent) go func(filter *Filter) { filter.Logs(context.Background()) @@ -601,3 +601,39 @@ func TestRangeLogs(t *testing.T) { expEvent(rangeLogsTestReorg, 400, 901) expEvent(rangeLogsTestDone, 0, 0) } + +func TestRangeLimit(t *testing.T) { + db := rawdb.NewMemoryDatabase() + backend, sys := newTestFilterSystem(db, Config{}) + defer db.Close() + + gspec := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: types.GenesisAlloc{}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + _, err := gspec.Commit(db, triedb.NewDatabase(db, nil), nil) + if err != nil { + t.Fatal(err) + } + chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) + options := core.DefaultConfig().WithStateScheme(rawdb.HashScheme) + options.TxLookupLimit = 0 + bc, err := core.NewBlockChain(db, gspec, ethash.NewFaker(), options) + if err != nil { + t.Fatal(err) + } + _, err = bc.InsertChain(chain) + if err != nil { + t.Fatal(err) + } + backend.startFilterMaps(0, false, filtermaps.DefaultParams) + defer backend.stopFilterMaps() + + // Set rangeLimit to 5, but request a range of 9 (end - begin = 9, from 0 to 9) + filter := sys.NewRangeFilter(0, 9, nil, nil, 5) + _, err = filter.Logs(context.Background()) + if err == nil || !strings.Contains(err.Error(), "exceed maximum block range") { + t.Fatalf("expected range limit error, got %v", err) + } +} diff --git a/graphql/graphql.go b/graphql/graphql.go index 244d6926a2..f5eec210a5 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1433,7 +1433,7 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria topics = *args.Filter.Topics } // Construct the range filter - filter := r.filterSystem.NewRangeFilter(begin, end, addresses, topics) + filter := r.filterSystem.NewRangeFilter(begin, end, addresses, topics, 0) return runFilter(ctx, r, filter) } From 0495350388354e36b149ee232d14c9d8e8c44409 Mon Sep 17 00:00:00 2001 From: Lessa <230214854+adblesss@users.noreply.github.com> Date: Sat, 17 Jan 2026 10:20:19 -0500 Subject: [PATCH 247/277] accounts/abi/bind/v2: replace rng in test (#33612) Replace deprecated rand.Seed() with rand.New(rand.NewSource()) in dep_tree_test.go. --- accounts/abi/bind/v2/dep_tree_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts/abi/bind/v2/dep_tree_test.go b/accounts/abi/bind/v2/dep_tree_test.go index e686e3fec4..b2470d8a16 100644 --- a/accounts/abi/bind/v2/dep_tree_test.go +++ b/accounts/abi/bind/v2/dep_tree_test.go @@ -158,10 +158,10 @@ func testLinkCase(tcInput linkTestCaseInput) error { overrideAddrs = make(map[rune]common.Address) ) // generate deterministic addresses for the override set. - rand.Seed(42) + rng := rand.New(rand.NewSource(42)) for contract := range tcInput.overrides { var addr common.Address - rand.Read(addr[:]) + rng.Read(addr[:]) overrideAddrs[contract] = addr overridesAddrs[addr] = struct{}{} } From e78be59dc96bc0246f7ada6fa0982ac41e5e8332 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Sat, 17 Jan 2026 08:22:00 -0700 Subject: [PATCH 248/277] build: remove circleci config (#33616) This doesn't seem to be used anymore. --- circle.yml | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 circle.yml diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 39ff5d83c6..0000000000 --- a/circle.yml +++ /dev/null @@ -1,32 +0,0 @@ -machine: - services: - - docker - -dependencies: - cache_directories: - - "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds - - "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds - override: - # Restore all previously cached docker images - - mkdir -p ~/.docker - - for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done - - # Pull in and hive, restore cached ethash DAGs and do a dry run - - go get -u github.com/karalabe/hive - - (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash) - - (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/) - - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6) - - # Cache all the docker images and the ethash DAGs - - for img in `docker images | grep -v "^" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done - - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash - -test: - override: - # Build Geth and move into a known folder - - make geth - - cp ./build/bin/geth $HOME/geth - - # Run hive and move all generated logs into the public artifacts folder - - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.) - - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS From ef815c59a207d50668afb343811ed7ff02cc640b Mon Sep 17 00:00:00 2001 From: maskpp Date: Sun, 18 Jan 2026 16:07:28 +0800 Subject: [PATCH 249/277] rlp: improve SplitListValues allocation efficiency (#33554) --- rlp/raw.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/rlp/raw.go b/rlp/raw.go index 114037df78..a696cb18c9 100644 --- a/rlp/raw.go +++ b/rlp/raw.go @@ -158,7 +158,12 @@ func SplitListValues(b []byte) ([][]byte, error) { if err != nil { return nil, err } - var elements [][]byte + n, err := CountValues(b) + if err != nil { + return nil, err + } + var elements = make([][]byte, 0, n) + for len(b) > 0 { _, tagsize, size, err := readKind(b) if err != nil { From 500931bc82808b63c1377d5e5713e23721bf4060 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 19 Jan 2026 20:43:14 +0800 Subject: [PATCH 250/277] core/vm: add read only protection for opcodes (#33637) This PR reverts a part of changes brought by https://github.com/ethereum/go-ethereum/pull/33281/changes Specifically, read-only protection should always be enforced at the opcode level, regardless of whether the check has already been performed during gas metering. It should act as a gatekeeper, otherwise, it is easy to introduce errors by adding new gas measurement logic without consistently applying the read-only protection. --- core/vm/instructions.go | 15 ++++++++++++--- core/vm/operations_acl.go | 4 +++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 958cf9dedc..baf6df8117 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -518,6 +518,9 @@ func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + if evm.readOnly { + return nil, ErrWriteProtection + } loc := scope.Stack.pop() val := scope.Stack.pop() evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) @@ -740,6 +743,9 @@ func opCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { // Get the arguments from the memory. args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64()) + if evm.readOnly && !value.IsZero() { + return nil, ErrWriteProtection + } if !value.IsZero() { gas += params.CallStipend } @@ -876,13 +882,15 @@ func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + if evm.readOnly { + return nil, ErrWriteProtection + } var ( this = scope.Contract.Address() balance = evm.StateDB.GetBalance(this) top = scope.Stack.pop() beneficiary = common.Address(top.Bytes20()) ) - // The funds are burned immediately if the beneficiary is the caller itself, // in this case, the beneficiary's balance is not increased. if this != beneficiary { @@ -904,15 +912,16 @@ func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { } func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { + if evm.readOnly { + return nil, ErrWriteProtection + } var ( this = scope.Contract.Address() balance = evm.StateDB.GetBalance(this) top = scope.Stack.pop() beneficiary = common.Address(top.Bytes20()) - newContract = evm.StateDB.IsNewContract(this) ) - // Contract is new and will actually be deleted. if newContract { if this != beneficiary { // Skip no-op transfer when self-destructing to self. diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 4b7b87503d..ce394d9384 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -237,8 +237,10 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { evm.StateDB.AddAddressToAccessList(address) gas = params.ColdAccountAccessCostEIP2929 + // Terminate the gas measurement if the leftover gas is not sufficient, + // it can effectively prevent accessing the states in the following steps if contract.Gas < gas { - return gas, nil + return 0, ErrOutOfGas } } // if empty and transfers value From d0af257aa20fe9d3e244570ee4abb9a78ff3b9c4 Mon Sep 17 00:00:00 2001 From: cui Date: Mon, 19 Jan 2026 20:45:31 +0800 Subject: [PATCH 251/277] triedb/pathdb: double check the list availability before regeneration (#33622) Co-authored-by: rjl493456442 --- triedb/pathdb/states.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go index dc737c3b53..c84e2dc60c 100644 --- a/triedb/pathdb/states.go +++ b/triedb/pathdb/states.go @@ -170,12 +170,13 @@ func (s *stateSet) accountList() []common.Hash { if list != nil { return list } - // No old sorted account list exists, generate a new one. It's possible that - // multiple threads waiting for the write lock may regenerate the list - // multiple times, which is acceptable. s.listLock.Lock() defer s.listLock.Unlock() + // Double check after acquiring the write lock + if list = s.accountListSorted; list != nil { + return list + } list = slices.SortedFunc(maps.Keys(s.accountData), common.Hash.Cmp) s.accountListSorted = list return list @@ -200,12 +201,13 @@ func (s *stateSet) storageList(accountHash common.Hash) []common.Hash { } s.listLock.RUnlock() - // No old sorted account list exists, generate a new one. It's possible that - // multiple threads waiting for the write lock may regenerate the list - // multiple times, which is acceptable. s.listLock.Lock() defer s.listLock.Unlock() + // Double check after acquiring the write lock + if list := s.storageListSorted[accountHash]; list != nil { + return list + } list := slices.SortedFunc(maps.Keys(s.storageData[accountHash]), common.Hash.Cmp) s.storageListSorted[accountHash] = list return list From d58f6291a2b16350799b6bb31103fc74bf36113a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 20 Jan 2026 10:33:09 +0100 Subject: [PATCH 252/277] internal/debug: add integration with Grafana Pyroscope (#33623) This adds support for Grafana Pyroscope, a continuous profiling solution. The client is configured similarly to metrics, i.e. run geth --pyroscope --pyroscope.server=https://... This commit is a resubmit of #33261 with some changes. --------- Co-authored-by: Carlos Bermudez Porto --- cmd/keeper/go.sum | 4 +- go.mod | 4 +- go.sum | 10 ++- internal/debug/flags.go | 14 ++++ internal/debug/pyroscope.go | 134 ++++++++++++++++++++++++++++++++++++ 5 files changed, 161 insertions(+), 5 deletions(-) create mode 100644 internal/debug/pyroscope.go diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index b93969cc60..62f10968e2 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -63,8 +63,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= diff --git a/go.mod b/go.mod index 7bfb6d25d7..306b08ff1a 100644 --- a/go.mod +++ b/go.mod @@ -121,10 +121,12 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect + github.com/grafana/pyroscope-go v1.2.7 + github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.16.0 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/go.sum b/go.sum index c9978a3d9e..dad819e09d 100644 --- a/go.sum +++ b/go.sum @@ -188,6 +188,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac= +github.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= @@ -223,8 +227,8 @@ github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4 github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -340,6 +344,8 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 30b0ddb3be..b4e55c46c1 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -162,6 +162,11 @@ var Flags = []cli.Flag{ blockprofilerateFlag, cpuprofileFlag, traceFlag, + pyroscopeFlag, + pyroscopeServerFlag, + pyroscopeAuthUsernameFlag, + pyroscopeAuthPasswordFlag, + pyroscopeTagsFlag, } var ( @@ -298,6 +303,14 @@ func Setup(ctx *cli.Context) error { // It cannot be imported because it will cause a cyclical dependency. StartPProf(address, !ctx.IsSet("metrics.addr")) } + + // Pyroscope profiling + if ctx.Bool(pyroscopeFlag.Name) { + if err := startPyroscope(ctx); err != nil { + return err + } + } + if len(logFile) > 0 || rotation { log.Info("Logging configured", context...) } @@ -321,6 +334,7 @@ func StartPProf(address string, withMetrics bool) { // Exit stops all running profiles, flushing their output to the // respective file. func Exit() { + stopPyroscope() Handler.StopCPUProfile() Handler.StopGoTrace() if logOutputFile != nil { diff --git a/internal/debug/pyroscope.go b/internal/debug/pyroscope.go new file mode 100644 index 0000000000..d0804cb891 --- /dev/null +++ b/internal/debug/pyroscope.go @@ -0,0 +1,134 @@ +// Copyright 2026 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package debug + +import ( + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/log" + "github.com/grafana/pyroscope-go" + "github.com/urfave/cli/v2" +) + +var ( + pyroscopeFlag = &cli.BoolFlag{ + Name: "pyroscope", + Usage: "Enable Pyroscope profiling", + Value: false, + Category: flags.LoggingCategory, + } + pyroscopeServerFlag = &cli.StringFlag{ + Name: "pyroscope.server", + Usage: "Pyroscope server URL to push profiling data to", + Value: "http://localhost:4040", + Category: flags.LoggingCategory, + } + pyroscopeAuthUsernameFlag = &cli.StringFlag{ + Name: "pyroscope.username", + Usage: "Pyroscope basic authentication username", + Value: "", + Category: flags.LoggingCategory, + } + pyroscopeAuthPasswordFlag = &cli.StringFlag{ + Name: "pyroscope.password", + Usage: "Pyroscope basic authentication password", + Value: "", + Category: flags.LoggingCategory, + } + pyroscopeTagsFlag = &cli.StringFlag{ + Name: "pyroscope.tags", + Usage: "Comma separated list of key=value tags to add to profiling data", + Value: "", + Category: flags.LoggingCategory, + } +) + +// This holds the globally-configured Pyroscope instance. +var pyroscopeProfiler *pyroscope.Profiler + +func startPyroscope(ctx *cli.Context) error { + server := ctx.String(pyroscopeServerFlag.Name) + authUsername := ctx.String(pyroscopeAuthUsernameFlag.Name) + authPassword := ctx.String(pyroscopeAuthPasswordFlag.Name) + + rawTags := ctx.String(pyroscopeTagsFlag.Name) + tags := make(map[string]string) + for tag := range strings.SplitSeq(rawTags, ",") { + tag = strings.TrimSpace(tag) + if tag == "" { + continue + } + k, v, _ := strings.Cut(tag, "=") + tags[k] = v + } + + config := pyroscope.Config{ + ApplicationName: "geth", + ServerAddress: server, + BasicAuthUser: authUsername, + BasicAuthPassword: authPassword, + Logger: &pyroscopeLogger{Logger: log.Root()}, + Tags: tags, + ProfileTypes: []pyroscope.ProfileType{ + // Enabling all profile types + pyroscope.ProfileCPU, + pyroscope.ProfileAllocObjects, + pyroscope.ProfileAllocSpace, + pyroscope.ProfileInuseObjects, + pyroscope.ProfileInuseSpace, + pyroscope.ProfileGoroutines, + pyroscope.ProfileMutexCount, + pyroscope.ProfileMutexDuration, + pyroscope.ProfileBlockCount, + pyroscope.ProfileBlockDuration, + }, + } + + profiler, err := pyroscope.Start(config) + if err != nil { + return err + } + pyroscopeProfiler = profiler + log.Info("Enabled Pyroscope") + return nil +} + +func stopPyroscope() { + if pyroscopeProfiler != nil { + pyroscopeProfiler.Stop() + pyroscopeProfiler = nil + } +} + +// Small wrapper for log.Logger to satisfy pyroscope.Logger interface +type pyroscopeLogger struct { + Logger log.Logger +} + +func (l *pyroscopeLogger) Infof(format string, v ...any) { + l.Logger.Info(fmt.Sprintf("Pyroscope: "+format, v...)) +} + +func (l *pyroscopeLogger) Debugf(format string, v ...any) { + l.Logger.Debug(fmt.Sprintf("Pyroscope: "+format, v...)) +} + +func (l *pyroscopeLogger) Errorf(format string, v ...any) { + l.Logger.Error(fmt.Sprintf("Pyroscope: "+format, v...)) +} From 46d804776b4e93eda4c4da14fb8e2fd77d4670ea Mon Sep 17 00:00:00 2001 From: DeFi Junkie Date: Tue, 20 Jan 2026 14:04:23 +0300 Subject: [PATCH 253/277] accounts/scwallet: fix panic in decryptAPDU (#33606) Validate ciphertext length in decryptAPDU, preventing runtime panics on invalid input. --- accounts/scwallet/securechannel.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index b3a7be8df0..1e0230dc45 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -300,6 +300,10 @@ func (s *SecureChannelSession) decryptAPDU(data []byte) ([]byte, error) { return nil, err } + if len(data) == 0 || len(data)%aes.BlockSize != 0 { + return nil, fmt.Errorf("invalid ciphertext length: %d", len(data)) + } + ret := make([]byte, len(data)) crypter := cipher.NewCBCDecrypter(a, s.iv) From 2eb1ccc6c40d16722a5ee91dc23aa1255e5c2f06 Mon Sep 17 00:00:00 2001 From: forkfury Date: Tue, 20 Jan 2026 13:36:07 +0100 Subject: [PATCH 254/277] core/state: ensure deterministic hook emission order in Finalise (#33644) Fixes #33630 Sort self-destructed addresses before emitting hooks in Finalise() to ensure deterministic ordering and fix flaky test TestHooks_OnCodeChangeV2. --------- Co-authored-by: jwasinger --- core/state/statedb_hooked.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 4ffa69b419..48794a3f41 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -17,7 +17,9 @@ package state import ( + "bytes" "math/big" + "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/stateless" @@ -234,13 +236,24 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) { return } - // Iterate all dirty addresses and record self-destructs. + // Collect all self-destructed addresses first, then sort them to ensure + // that state change hooks will be invoked in deterministic + // order when the accounts are deleted below + var selfDestructedAddrs []common.Address for addr := range s.inner.journal.dirties { obj := s.inner.stateObjects[addr] if obj == nil || !obj.selfDestructed { // Not self-destructed, keep searching. continue } + selfDestructedAddrs = append(selfDestructedAddrs, addr) + } + sort.Slice(selfDestructedAddrs, func(i, j int) bool { + return bytes.Compare(selfDestructedAddrs[i][:], selfDestructedAddrs[j][:]) < 0 + }) + + for _, addr := range selfDestructedAddrs { + obj := s.inner.stateObjects[addr] // Bingo: state object was self-destructed, call relevant hooks. // If ether was sent to account post-selfdestruct, record as burnt. From 54ab4e3c7dae9b06e84a866a5ea816f0c6081f31 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Wed, 21 Jan 2026 02:23:03 +0100 Subject: [PATCH 255/277] core/txpool/legacypool: add metric for accounts in txpool (#33646) This PR adds metrics that count the number of accounts having transactions in the txpool. Together with the transaction count this can be used as a simple indicator of the diversity of transactions in the pool. Note: as an alternative implementation, we could use a periodic or event driven update of these Gauges using len. I've preferred this implementation to match what we have for the pool sizes. --------- Signed-off-by: Csaba Kiraly --- core/txpool/legacypool/legacypool.go | 9 +++++++++ core/txpool/legacypool/queue.go | 3 +++ 2 files changed, 12 insertions(+) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 5f8dd4fac8..60494b5130 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -114,6 +114,9 @@ var ( queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + pendingAddrsGauge = metrics.NewRegisteredGauge("txpool/pending/accounts", nil) + queuedAddrsGauge = metrics.NewRegisteredGauge("txpool/queued/accounts", nil) + reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) @@ -844,6 +847,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { pool.pending[addr] = newList(true) + pendingAddrsGauge.Inc(1) } list := pool.pending[addr] @@ -1083,6 +1087,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo // If no more pending transactions are left, remove the list if pending.Empty() { delete(pool.pending, addr) + pendingAddrsGauge.Dec(1) } // Postpone any invalidated transactions for _, tx := range invalids { @@ -1580,6 +1585,7 @@ func (pool *LegacyPool) demoteUnexecutables() { // Delete the entire pending entry if it became empty. if list.Empty() { delete(pool.pending, addr) + pendingAddrsGauge.Dec(1) if _, ok := pool.queue.get(addr); !ok { pool.reserver.Release(addr) } @@ -1839,6 +1845,9 @@ func (pool *LegacyPool) Clear() { pool.pending = make(map[common.Address]*list) pool.queue = newQueue(pool.config, pool.signer) pool.pendingNonces = newNoncer(pool.currentState) + + pendingAddrsGauge.Update(0) + queuedAddrsGauge.Update(0) } // HasPendingAuth returns a flag indicating whether there are pending diff --git a/core/txpool/legacypool/queue.go b/core/txpool/legacypool/queue.go index a889debe37..918a219ab6 100644 --- a/core/txpool/legacypool/queue.go +++ b/core/txpool/legacypool/queue.go @@ -114,6 +114,7 @@ func (q *queue) remove(addr common.Address, tx *types.Transaction) { if future.Empty() { delete(q.queued, addr) delete(q.beats, addr) + queuedAddrsGauge.Dec(1) } } } @@ -123,6 +124,7 @@ func (q *queue) add(tx *types.Transaction) (*common.Hash, error) { from, _ := types.Sender(q.signer, tx) // already validated if q.queued[from] == nil { q.queued[from] = newList(false) + queuedAddrsGauge.Inc(1) } inserted, old := q.queued[from].Add(tx, q.config.PriceBump) if !inserted { @@ -200,6 +202,7 @@ func (q *queue) promoteExecutables(accounts []common.Address, gasLimit uint64, c if list.Empty() { delete(q.queued, addr) delete(q.beats, addr) + queuedAddrsGauge.Dec(1) removedAddresses = append(removedAddresses, addr) } } From 8fad02ac63a1c0972f47b7c5b41d5577fa4d2932 Mon Sep 17 00:00:00 2001 From: DeFi Junkie Date: Wed, 21 Jan 2026 08:57:02 +0300 Subject: [PATCH 256/277] core/types: fix panic on invalid signature length (#33647) Replace panic with error return in decodeSignature to prevent crashes on invalid inputs, and update callers to propagate the error. --- core/types/transaction_signing.go | 20 +++++++++++++------- core/types/transaction_signing_test.go | 25 +++++++++++++++++++++++++ core/types/tx_setcode.go | 5 ++++- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index ef8fb194d5..5a624191cf 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -282,7 +282,10 @@ func (s *modernSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *bi if tx.inner.chainID().Sign() != 0 && tx.inner.chainID().Cmp(s.chainID) != 0 { return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, tx.inner.chainID(), s.chainID) } - R, S, _ = decodeSignature(sig) + R, S, _, err = decodeSignature(sig) + if err != nil { + return nil, nil, nil, err + } V = big.NewInt(int64(sig[64])) return R, S, V, nil } @@ -373,7 +376,10 @@ func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big if tx.Type() != LegacyTxType { return nil, nil, nil, ErrTxTypeNotSupported } - R, S, V = decodeSignature(sig) + R, S, V, err = decodeSignature(sig) + if err != nil { + return nil, nil, nil, err + } if s.chainId.Sign() != 0 { V = big.NewInt(int64(sig[64] + 35)) V.Add(V, s.chainIdMul) @@ -442,8 +448,8 @@ func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v * if tx.Type() != LegacyTxType { return nil, nil, nil, ErrTxTypeNotSupported } - r, s, v = decodeSignature(sig) - return r, s, v, nil + r, s, v, err = decodeSignature(sig) + return r, s, v, err } // Hash returns the hash to be signed by the sender. @@ -459,14 +465,14 @@ func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { }) } -func decodeSignature(sig []byte) (r, s, v *big.Int) { +func decodeSignature(sig []byte) (r, s, v *big.Int, err error) { if len(sig) != crypto.SignatureLength { - panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength)) + return nil, nil, nil, fmt.Errorf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength) } r = new(big.Int).SetBytes(sig[:32]) s = new(big.Int).SetBytes(sig[32:64]) v = new(big.Int).SetBytes([]byte{sig[64] + 27}) - return r, s, v + return r, s, v, nil } func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) { diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go index 02a65fda13..252593e87b 100644 --- a/core/types/transaction_signing_test.go +++ b/core/types/transaction_signing_test.go @@ -200,3 +200,28 @@ func Benchmark_modernSigner_Equal(b *testing.B) { } } } + +func TestSignatureValuesError(t *testing.T) { + // 1. Setup a valid transaction + tx := NewTransaction(0, common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil) + signer := HomesteadSigner{} + + // 2. Call WithSignature with invalid length sig (not 65 bytes) + invalidSig := make([]byte, 64) + + func() { + defer func() { + if r := recover(); r != nil { + t.Fatalf("Panicked for invalid signature length, expected error: %v", r) + } + }() + _, err := tx.WithSignature(signer, invalidSig) + if err == nil { + t.Fatal("Expected error for invalid signature length, got nil") + } else { + // This is just a sanity check to ensure we got an error, + // the exact error message is verified in unit tests elsewhere if needed. + t.Logf("Got expected error: %v", err) + } + }() +} diff --git a/core/types/tx_setcode.go b/core/types/tx_setcode.go index f2281d4ae7..9487c9cc81 100644 --- a/core/types/tx_setcode.go +++ b/core/types/tx_setcode.go @@ -94,7 +94,10 @@ func SignSetCode(prv *ecdsa.PrivateKey, auth SetCodeAuthorization) (SetCodeAutho if err != nil { return SetCodeAuthorization{}, err } - r, s, _ := decodeSignature(sig) + r, s, _, err := decodeSignature(sig) + if err != nil { + return SetCodeAuthorization{}, err + } return SetCodeAuthorization{ ChainID: auth.ChainID, Address: auth.Address, From 35922bcd33ce1c3ed09cdc31ee66b80724a1fdab Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Wed, 21 Jan 2026 09:00:57 +0100 Subject: [PATCH 257/277] core/txpool/legacypool: reset gauges on clear (#33654) Signed-off-by: Csaba Kiraly Co-authored-by: rjl493456442 --- core/txpool/legacypool/legacypool.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 60494b5130..eb1fe23d5f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1846,6 +1846,10 @@ func (pool *LegacyPool) Clear() { pool.queue = newQueue(pool.config, pool.signer) pool.pendingNonces = newNoncer(pool.currentState) + // Reset gauges + pendingGauge.Update(0) + queuedGauge.Update(0) + slotsGauge.Update(0) pendingAddrsGauge.Update(0) queuedAddrsGauge.Update(0) } From 1022c7637dcd0cc063105f2709cadf0b88c50ae9 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 22 Jan 2026 09:19:27 +0800 Subject: [PATCH 258/277] core, eth, internal, triedb/pathdb: enable eth_getProofs for history (#32727) This PR enables the `eth_getProofs ` endpoint against the historical states. --- cmd/geth/chaincmd.go | 1 + cmd/geth/main.go | 1 + cmd/keeper/go.mod | 1 - cmd/utils/flags.go | 30 ++- core/blockchain.go | 28 ++- core/rawdb/database.go | 21 +- core/state/database_history.go | 199 +++++++++++++++--- eth/backend.go | 25 +-- eth/ethconfig/config.go | 60 +++--- eth/ethconfig/gen_config.go | 6 + internal/ethapi/api.go | 6 +- rlp/raw.go | 5 + triedb/database.go | 13 +- triedb/pathdb/config.go | 51 ++++- triedb/pathdb/disklayer.go | 5 +- triedb/pathdb/history.go | 2 - triedb/pathdb/history_index_iterator.go | 45 +++++ triedb/pathdb/history_index_iterator_test.go | 46 +++++ triedb/pathdb/history_indexer.go | 5 +- triedb/pathdb/history_reader.go | 201 +++++++++++++++++++ triedb/pathdb/history_trienode.go | 13 +- triedb/pathdb/metrics.go | 5 +- triedb/pathdb/nodes.go | 21 +- triedb/pathdb/reader.go | 87 ++++++++ 24 files changed, 756 insertions(+), 121 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 0af0a61602..9d04dd0f1b 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -121,6 +121,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.LogExportCheckpointsFlag, utils.StateHistoryFlag, utils.TrienodeHistoryFlag, + utils.TrienodeHistoryFullValueCheckpointFlag, }, utils.DatabaseFlags, debug.Flags), Before: func(ctx *cli.Context) error { flags.MigrateGlobalFlags(ctx) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e838a846a1..9aabaaba98 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -95,6 +95,7 @@ var ( utils.LogExportCheckpointsFlag, utils.StateHistoryFlag, utils.TrienodeHistoryFlag, + utils.TrienodeHistoryFullValueCheckpointFlag, utils.LightKDFFlag, utils.EthRequiredBlocksFlag, utils.LegacyWhitelistFlag, // deprecated diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 21cdfe8c33..cee1ce05a7 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -33,7 +33,6 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.39.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index fe8375454f..844397b734 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -301,6 +301,12 @@ var ( Value: ethconfig.Defaults.TrienodeHistory, Category: flags.StateCategory, } + TrienodeHistoryFullValueCheckpointFlag = &cli.UintFlag{ + Name: "history.trienode.full-value-checkpoint", + Usage: "The frequency of full-value encoding. Every n-th node is stored in full-value format; all other nodes are stored as diffs relative to their predecessor", + Value: uint(ethconfig.Defaults.NodeFullValueCheckpoint), + Category: flags.StateCategory, + } TransactionHistoryFlag = &cli.Uint64Flag{ Name: "history.transactions", Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", @@ -1714,6 +1720,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(TrienodeHistoryFlag.Name) { cfg.TrienodeHistory = ctx.Int64(TrienodeHistoryFlag.Name) } + if ctx.IsSet(TrienodeHistoryFullValueCheckpointFlag.Name) { + cfg.NodeFullValueCheckpoint = uint32(ctx.Uint(TrienodeHistoryFullValueCheckpointFlag.Name)) + } if ctx.IsSet(StateSchemeFlag.Name) { cfg.StateScheme = ctx.String(StateSchemeFlag.Name) } @@ -2318,16 +2327,17 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh Fatalf("%v", err) } options := &core.BlockChainConfig{ - TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, - NoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), - TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, - ArchiveMode: ctx.String(GCModeFlag.Name) == "archive", - TrieTimeLimit: ethconfig.Defaults.TrieTimeout, - SnapshotLimit: ethconfig.Defaults.SnapshotCache, - Preimages: ctx.Bool(CachePreimagesFlag.Name), - StateScheme: scheme, - StateHistory: ctx.Uint64(StateHistoryFlag.Name), - TrienodeHistory: ctx.Int64(TrienodeHistoryFlag.Name), + TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, + NoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), + TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, + ArchiveMode: ctx.String(GCModeFlag.Name) == "archive", + TrieTimeLimit: ethconfig.Defaults.TrieTimeout, + SnapshotLimit: ethconfig.Defaults.SnapshotCache, + Preimages: ctx.Bool(CachePreimagesFlag.Name), + StateScheme: scheme, + StateHistory: ctx.Uint64(StateHistoryFlag.Name), + TrienodeHistory: ctx.Int64(TrienodeHistoryFlag.Name), + NodeFullValueCheckpoint: uint32(ctx.Uint(TrienodeHistoryFullValueCheckpointFlag.Name)), // Disable transaction indexing/unindexing. TxLookupLimit: -1, diff --git a/core/blockchain.go b/core/blockchain.go index fc0e70c271..8741b8b937 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -182,6 +182,12 @@ type BlockChainConfig struct { // If set to -1, no trienode history will be retained; TrienodeHistory int64 + // The frequency of full-value encoding. For example, a value of 16 means + // that, on average, for a given trie node across its 16 consecutive historical + // versions, only one version is stored in full format, while the others + // are stored in diff mode for storage compression. + NodeFullValueCheckpoint uint32 + // State snapshot related options SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory SnapshotNoBuild bool // Whether the background generation is allowed @@ -259,18 +265,22 @@ func (cfg *BlockChainConfig) triedbConfig(isVerkle bool) *triedb.Config { } if cfg.StateScheme == rawdb.PathScheme { config.PathDB = &pathdb.Config{ - StateHistory: cfg.StateHistory, - TrienodeHistory: cfg.TrienodeHistory, - EnableStateIndexing: cfg.ArchiveMode, - TrieCleanSize: cfg.TrieCleanLimit * 1024 * 1024, - StateCleanSize: cfg.SnapshotLimit * 1024 * 1024, - JournalDirectory: cfg.TrieJournalDirectory, - + TrieCleanSize: cfg.TrieCleanLimit * 1024 * 1024, + StateCleanSize: cfg.SnapshotLimit * 1024 * 1024, // TODO(rjl493456442): The write buffer represents the memory limit used // for flushing both trie data and state data to disk. The config name // should be updated to eliminate the confusion. - WriteBufferSize: cfg.TrieDirtyLimit * 1024 * 1024, - NoAsyncFlush: cfg.TrieNoAsyncFlush, + WriteBufferSize: cfg.TrieDirtyLimit * 1024 * 1024, + JournalDirectory: cfg.TrieJournalDirectory, + + // Historical state configurations + StateHistory: cfg.StateHistory, + TrienodeHistory: cfg.TrienodeHistory, + EnableStateIndexing: cfg.ArchiveMode, + FullValueCheckpoint: cfg.NodeFullValueCheckpoint, + + // Testing configurations + NoAsyncFlush: cfg.TrieNoAsyncFlush, } } return config diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 8260391802..a5335ea56b 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -429,7 +429,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { filterMapBlockLV stat // Path-mode archive data - stateIndex stat + stateIndex stat + trienodeIndex stat // Verkle statistics verkleTries stat @@ -524,8 +525,19 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { bloomBits.add(size) // Path-based historic state indexes - case bytes.HasPrefix(key, StateHistoryIndexPrefix) && len(key) >= len(StateHistoryIndexPrefix)+common.HashLength: + case bytes.HasPrefix(key, StateHistoryAccountMetadataPrefix) && len(key) == len(StateHistoryAccountMetadataPrefix)+common.HashLength: stateIndex.add(size) + case bytes.HasPrefix(key, StateHistoryStorageMetadataPrefix) && len(key) == len(StateHistoryStorageMetadataPrefix)+2*common.HashLength: + stateIndex.add(size) + case bytes.HasPrefix(key, StateHistoryAccountBlockPrefix) && len(key) == len(StateHistoryAccountBlockPrefix)+common.HashLength+4: + stateIndex.add(size) + case bytes.HasPrefix(key, StateHistoryStorageBlockPrefix) && len(key) == len(StateHistoryStorageBlockPrefix)+2*common.HashLength+4: + stateIndex.add(size) + + case bytes.HasPrefix(key, TrienodeHistoryMetadataPrefix) && len(key) >= len(TrienodeHistoryMetadataPrefix)+common.HashLength: + trienodeIndex.add(size) + case bytes.HasPrefix(key, TrienodeHistoryBlockPrefix) && len(key) >= len(TrienodeHistoryBlockPrefix)+common.HashLength+4: + trienodeIndex.add(size) // Verkle trie data is detected, determine the sub-category case bytes.HasPrefix(key, VerklePrefix): @@ -622,12 +634,13 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Key-Value store", "Path trie state lookups", stateLookups.sizeString(), stateLookups.countString()}, {"Key-Value store", "Path trie account nodes", accountTries.sizeString(), accountTries.countString()}, {"Key-Value store", "Path trie storage nodes", storageTries.sizeString(), storageTries.countString()}, - {"Key-Value store", "Path state history indexes", stateIndex.sizeString(), stateIndex.countString()}, {"Key-Value store", "Verkle trie nodes", verkleTries.sizeString(), verkleTries.countString()}, {"Key-Value store", "Verkle trie state lookups", verkleStateLookups.sizeString(), verkleStateLookups.countString()}, {"Key-Value store", "Trie preimages", preimages.sizeString(), preimages.countString()}, {"Key-Value store", "Account snapshot", accountSnaps.sizeString(), accountSnaps.countString()}, {"Key-Value store", "Storage snapshot", storageSnaps.sizeString(), storageSnaps.countString()}, + {"Key-Value store", "Historical state index", stateIndex.sizeString(), stateIndex.countString()}, + {"Key-Value store", "Historical trie index", trienodeIndex.sizeString(), trienodeIndex.countString()}, {"Key-Value store", "Beacon sync headers", beaconHeaders.sizeString(), beaconHeaders.countString()}, {"Key-Value store", "Clique snapshots", cliqueSnaps.sizeString(), cliqueSnaps.countString()}, {"Key-Value store", "Singleton metadata", metadata.sizeString(), metadata.countString()}, @@ -672,7 +685,7 @@ var knownMetadataKeys = [][]byte{ snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey, - filterMapsRangeKey, headStateHistoryIndexKey, VerkleTransitionStatePrefix, + filterMapsRangeKey, headStateHistoryIndexKey, headTrienodeHistoryIndexKey, VerkleTransitionStatePrefix, } // printChainMetadata prints out chain metadata to stderr. diff --git a/core/state/database_history.go b/core/state/database_history.go index f9c4a69f2f..7a2be8fe4f 100644 --- a/core/state/database_history.go +++ b/core/state/database_history.go @@ -17,40 +17,39 @@ package state import ( - "errors" + "fmt" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" + "github.com/ethereum/go-ethereum/triedb/database" "github.com/ethereum/go-ethereum/triedb/pathdb" ) -// historicReader wraps a historical state reader defined in path database, -// providing historic state serving over the path scheme. -// -// TODO(rjl493456442): historicReader is not thread-safe and does not fully -// comply with the StateReader interface requirements, needs to be fixed. -// Currently, it is only used in a non-concurrent context, so it is safe for now. -type historicReader struct { +// historicStateReader implements StateReader, wrapping a historical state reader +// defined in path database and provide historic state serving over the path scheme. +type historicStateReader struct { reader *pathdb.HistoricalStateReader + lock sync.Mutex // Lock for protecting concurrent read } -// newHistoricReader constructs a reader for historic state serving. -func newHistoricReader(r *pathdb.HistoricalStateReader) *historicReader { - return &historicReader{reader: r} +// newHistoricStateReader constructs a reader for historical state serving. +func newHistoricStateReader(r *pathdb.HistoricalStateReader) *historicStateReader { + return &historicStateReader{reader: r} } // Account implements StateReader, retrieving the account specified by the address. -// -// An error will be returned if the associated snapshot is already stale or -// the requested account is not yet covered by the snapshot. -// -// The returned account might be nil if it's not existent. -func (r *historicReader) Account(addr common.Address) (*types.StateAccount, error) { +func (r *historicStateReader) Account(addr common.Address) (*types.StateAccount, error) { + r.lock.Lock() + defer r.lock.Unlock() + account, err := r.reader.Account(addr) if err != nil { return nil, err @@ -80,7 +79,10 @@ func (r *historicReader) Account(addr common.Address) (*types.StateAccount, erro // the requested storage slot is not yet covered by the snapshot. // // The returned storage slot might be empty if it's not existent. -func (r *historicReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) { +func (r *historicStateReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) { + r.lock.Lock() + defer r.lock.Unlock() + blob, err := r.reader.Storage(addr, key) if err != nil { return common.Hash{}, err @@ -97,6 +99,125 @@ func (r *historicReader) Storage(addr common.Address, key common.Hash) (common.H return slot, nil } +// historicTrieOpener is a wrapper of pathdb.HistoricalNodeReader, implementing +// the database.NodeDatabase by adding NodeReader function. +type historicTrieOpener struct { + root common.Hash + reader *pathdb.HistoricalNodeReader +} + +// newHistoricTrieOpener constructs the historical trie opener. +func newHistoricTrieOpener(root common.Hash, reader *pathdb.HistoricalNodeReader) *historicTrieOpener { + return &historicTrieOpener{ + root: root, + reader: reader, + } +} + +// NodeReader implements database.NodeDatabase, returning a node reader of a +// specified state. +func (o *historicTrieOpener) NodeReader(root common.Hash) (database.NodeReader, error) { + if root != o.root { + return nil, fmt.Errorf("state %x is not available", root) + } + return o.reader, nil +} + +// historicalTrieReader wraps a historical node reader defined in path database, +// providing historical node serving over the path scheme. +type historicalTrieReader struct { + root common.Hash + opener *historicTrieOpener + tr Trie + + subRoots map[common.Address]common.Hash // Set of storage roots, cached when the account is resolved + subTries map[common.Address]Trie // Group of storage tries, cached when it's resolved + lock sync.Mutex // Lock for protecting concurrent read +} + +// newHistoricalTrieReader constructs a reader for historical trie node serving. +func newHistoricalTrieReader(root common.Hash, r *pathdb.HistoricalNodeReader) (*historicalTrieReader, error) { + opener := newHistoricTrieOpener(root, r) + tr, err := trie.NewStateTrie(trie.StateTrieID(root), opener) + if err != nil { + return nil, err + } + return &historicalTrieReader{ + root: root, + opener: opener, + tr: tr, + subRoots: make(map[common.Address]common.Hash), + subTries: make(map[common.Address]Trie), + }, nil +} + +// account is the inner version of Account and assumes the r.lock is already held. +func (r *historicalTrieReader) account(addr common.Address) (*types.StateAccount, error) { + account, err := r.tr.GetAccount(addr) + if err != nil { + return nil, err + } + if account == nil { + r.subRoots[addr] = types.EmptyRootHash + } else { + r.subRoots[addr] = account.Root + } + return account, nil +} + +// Account implements StateReader, retrieving the account specified by the address. +// +// An error will be returned if the associated snapshot is already stale or +// the requested account is not yet covered by the snapshot. +// +// The returned account might be nil if it's not existent. +func (r *historicalTrieReader) Account(addr common.Address) (*types.StateAccount, error) { + r.lock.Lock() + defer r.lock.Unlock() + + return r.account(addr) +} + +// Storage implements StateReader, retrieving the storage slot specified by the +// address and slot key. +// +// An error will be returned if the associated snapshot is already stale or +// the requested storage slot is not yet covered by the snapshot. +// +// The returned storage slot might be empty if it's not existent. +func (r *historicalTrieReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) { + r.lock.Lock() + defer r.lock.Unlock() + + tr, found := r.subTries[addr] + if !found { + root, ok := r.subRoots[addr] + + // The storage slot is accessed without account caching. It's unexpected + // behavior but try to resolve the account first anyway. + if !ok { + _, err := r.account(addr) + if err != nil { + return common.Hash{}, err + } + root = r.subRoots[addr] + } + var err error + tr, err = trie.NewStateTrie(trie.StorageTrieID(r.root, crypto.Keccak256Hash(addr.Bytes()), root), r.opener) + if err != nil { + return common.Hash{}, err + } + r.subTries[addr] = tr + } + ret, err := tr.GetStorage(addr, key.Bytes()) + if err != nil { + return common.Hash{}, err + } + var value common.Hash + value.SetBytes(ret) + return value, nil +} + // HistoricDB is the implementation of Database interface, with the ability to // access historical state. type HistoricDB struct { @@ -118,22 +239,54 @@ func NewHistoricDatabase(disk ethdb.KeyValueStore, triedb *triedb.Database) *His // Reader implements Database interface, returning a reader of the specific state. func (db *HistoricDB) Reader(stateRoot common.Hash) (Reader, error) { - hr, err := db.triedb.HistoricReader(stateRoot) + var readers []StateReader + sr, err := db.triedb.HistoricStateReader(stateRoot) + if err == nil { + readers = append(readers, newHistoricStateReader(sr)) + } + nr, err := db.triedb.HistoricNodeReader(stateRoot) + if err == nil { + tr, err := newHistoricalTrieReader(stateRoot, nr) + if err == nil { + readers = append(readers, tr) + } + } + if len(readers) == 0 { + return nil, fmt.Errorf("historical state %x is not available", stateRoot) + } + combined, err := newMultiStateReader(readers...) if err != nil { return nil, err } - return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), newHistoricReader(hr)), nil + return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), combined), nil } // OpenTrie opens the main account trie. It's not supported by historic database. func (db *HistoricDB) OpenTrie(root common.Hash) (Trie, error) { - return nil, errors.New("not implemented") + nr, err := db.triedb.HistoricNodeReader(root) + if err != nil { + return nil, err + } + tr, err := trie.NewStateTrie(trie.StateTrieID(root), newHistoricTrieOpener(root, nr)) + if err != nil { + return nil, err + } + return tr, nil } // OpenStorageTrie opens the storage trie of an account. It's not supported by // historic database. -func (db *HistoricDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error) { - return nil, errors.New("not implemented") +func (db *HistoricDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ Trie) (Trie, error) { + nr, err := db.triedb.HistoricNodeReader(stateRoot) + if err != nil { + return nil, err + } + id := trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root) + tr, err := trie.NewStateTrie(id, newHistoricTrieOpener(stateRoot, nr)) + if err != nil { + return nil, err + } + return tr, nil } // TrieDB returns the underlying trie database for managing trie nodes. diff --git a/eth/backend.go b/eth/backend.go index 932d1a2515..aed1542aeb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -222,18 +222,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } var ( options = &core.BlockChainConfig{ - TrieCleanLimit: config.TrieCleanCache, - NoPrefetch: config.NoPrefetch, - TrieDirtyLimit: config.TrieDirtyCache, - ArchiveMode: config.NoPruning, - TrieTimeLimit: config.TrieTimeout, - SnapshotLimit: config.SnapshotCache, - Preimages: config.Preimages, - StateHistory: config.StateHistory, - TrienodeHistory: config.TrienodeHistory, - StateScheme: scheme, - ChainHistoryMode: config.HistoryMode, - TxLookupLimit: int64(min(config.TransactionHistory, math.MaxInt64)), + TrieCleanLimit: config.TrieCleanCache, + NoPrefetch: config.NoPrefetch, + TrieDirtyLimit: config.TrieDirtyCache, + ArchiveMode: config.NoPruning, + TrieTimeLimit: config.TrieTimeout, + SnapshotLimit: config.SnapshotCache, + Preimages: config.Preimages, + StateHistory: config.StateHistory, + TrienodeHistory: config.TrienodeHistory, + NodeFullValueCheckpoint: config.NodeFullValueCheckpoint, + StateScheme: scheme, + ChainHistoryMode: config.HistoryMode, + TxLookupLimit: int64(min(config.TransactionHistory, math.MaxInt64)), VmConfig: vm.Config{ EnablePreimageRecording: config.EnablePreimageRecording, EnableWitnessStats: config.EnableWitnessStats, diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 9e967e45cc..e58c4b884a 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/triedb/pathdb" ) // FullNodeGPO contains default gasprice oracle settings for full node. @@ -49,32 +50,33 @@ var FullNodeGPO = gasprice.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ - HistoryMode: history.KeepAll, - SyncMode: SnapSync, - NetworkId: 0, // enable auto configuration of networkID == chainID - TxLookupLimit: 2350000, - TransactionHistory: 2350000, - LogHistory: 2350000, - StateHistory: params.FullImmutabilityThreshold, - TrienodeHistory: -1, - DatabaseCache: 512, - TrieCleanCache: 154, - TrieDirtyCache: 256, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 102, - FilterLogCacheSize: 32, - LogQueryLimit: 1000, - Miner: miner.DefaultConfig, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether - TxSyncDefaultTimeout: 20 * time.Second, - TxSyncMaxTimeout: 1 * time.Minute, - SlowBlockThreshold: time.Second * 2, - RangeLimit: 0, + HistoryMode: history.KeepAll, + SyncMode: SnapSync, + NetworkId: 0, // enable auto configuration of networkID == chainID + TxLookupLimit: 2350000, + TransactionHistory: 2350000, + LogHistory: 2350000, + StateHistory: pathdb.Defaults.StateHistory, + TrienodeHistory: pathdb.Defaults.TrienodeHistory, + NodeFullValueCheckpoint: pathdb.Defaults.FullValueCheckpoint, + DatabaseCache: 512, + TrieCleanCache: 154, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 102, + FilterLogCacheSize: 32, + LogQueryLimit: 1000, + Miner: miner.DefaultConfig, + TxPool: legacypool.DefaultConfig, + BlobPool: blobpool.DefaultConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether + TxSyncDefaultTimeout: 20 * time.Second, + TxSyncMaxTimeout: 1 * time.Minute, + SlowBlockThreshold: time.Second * 2, + RangeLimit: 0, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -112,6 +114,12 @@ type Config struct { StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. TrienodeHistory int64 `toml:",omitempty"` // Number of blocks from the chain head for which trienode histories are retained + // The frequency of full-value encoding. For example, a value of 16 means + // that, on average, for a given trie node across its 16 consecutive historical + // versions, only one version is stored in full format, while the others + // are stored in diff mode for storage compression. + NodeFullValueCheckpoint uint32 `toml:",omitempty"` + // State scheme represents the scheme used to store ethereum states and trie // nodes on top. It can be 'hash', 'path', or none which means use the scheme // consistent with persistent state. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 44b8c6306c..6f94a409e5 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -32,6 +32,7 @@ func (c Config) MarshalTOML() (interface{}, error) { LogExportCheckpoints string StateHistory uint64 `toml:",omitempty"` TrienodeHistory int64 `toml:",omitempty"` + NodeFullValueCheckpoint uint32 `toml:",omitempty"` StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SlowBlockThreshold time.Duration `toml:",omitempty"` @@ -84,6 +85,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.LogExportCheckpoints = c.LogExportCheckpoints enc.StateHistory = c.StateHistory enc.TrienodeHistory = c.TrienodeHistory + enc.NodeFullValueCheckpoint = c.NodeFullValueCheckpoint enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks enc.SlowBlockThreshold = c.SlowBlockThreshold @@ -140,6 +142,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { LogExportCheckpoints *string StateHistory *uint64 `toml:",omitempty"` TrienodeHistory *int64 `toml:",omitempty"` + NodeFullValueCheckpoint *uint32 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SlowBlockThreshold *time.Duration `toml:",omitempty"` @@ -225,6 +228,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TrienodeHistory != nil { c.TrienodeHistory = *dec.TrienodeHistory } + if dec.NodeFullValueCheckpoint != nil { + c.NodeFullValueCheckpoint = *dec.NodeFullValueCheckpoint + } if dec.StateScheme != nil { c.StateScheme = *dec.StateScheme } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d48bffd818..b0a79295f5 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -47,7 +47,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" ) // estimateGasErrorRatio is the amount of overestimation eth_estimateGas is @@ -382,8 +381,7 @@ func (api *BlockChainAPI) GetProof(ctx context.Context, address common.Address, if len(keys) > 0 { var storageTrie state.Trie if storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) { - id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot) - st, err := trie.NewStateTrie(id, statedb.Database().TrieDB()) + st, err := statedb.Database().OpenStorageTrie(header.Root, address, storageRoot, nil) if err != nil { return nil, err } @@ -414,7 +412,7 @@ func (api *BlockChainAPI) GetProof(ctx context.Context, address common.Address, } } // Create the accountProof. - tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), statedb.Database().TrieDB()) + tr, err := statedb.Database().OpenTrie(header.Root) if err != nil { return nil, err } diff --git a/rlp/raw.go b/rlp/raw.go index a696cb18c9..f284da3f6d 100644 --- a/rlp/raw.go +++ b/rlp/raw.go @@ -153,6 +153,11 @@ func CountValues(b []byte) (int, error) { } // SplitListValues extracts the raw elements from the list RLP-encoding blob. +// +// Note: the returned slice must not be modified, as it shares the same +// backing array as the original slice. It's acceptable to deep-copy the elements +// out if necessary, but let's stick with this approach for less allocation +// overhead. func SplitListValues(b []byte) ([][]byte, error) { b, _, err := SplitList(b) if err != nil { diff --git a/triedb/database.go b/triedb/database.go index d2637bd909..e7e47bb91a 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -129,8 +129,8 @@ func (db *Database) StateReader(blockRoot common.Hash) (database.StateReader, er return db.backend.StateReader(blockRoot) } -// HistoricReader constructs a reader for accessing the requested historic state. -func (db *Database) HistoricReader(root common.Hash) (*pathdb.HistoricalStateReader, error) { +// HistoricStateReader constructs a reader for accessing the requested historic state. +func (db *Database) HistoricStateReader(root common.Hash) (*pathdb.HistoricalStateReader, error) { pdb, ok := db.backend.(*pathdb.Database) if !ok { return nil, errors.New("not supported") @@ -138,6 +138,15 @@ func (db *Database) HistoricReader(root common.Hash) (*pathdb.HistoricalStateRea return pdb.HistoricReader(root) } +// HistoricNodeReader constructs a reader for accessing the historical trie node. +func (db *Database) HistoricNodeReader(root common.Hash) (*pathdb.HistoricalNodeReader, error) { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return nil, errors.New("not supported") + } + return pdb.HistoricNodeReader(root) +} + // Update performs a state transition by committing dirty nodes contained in the // given set in order to update state from the specified parent to the specified // root. The held pre-images accumulated up to this point will be flushed in case diff --git a/triedb/pathdb/config.go b/triedb/pathdb/config.go index 0da8604b6c..c236b34333 100644 --- a/triedb/pathdb/config.go +++ b/triedb/pathdb/config.go @@ -43,6 +43,26 @@ const ( // Do not increase the buffer size arbitrarily, otherwise the system // pause time will increase when the database writes happen. defaultBufferSize = 64 * 1024 * 1024 + + // maxFullValueCheckpoint defines the maximum allowed encoding frequency (1/16) + // for storing nodes in full format. With this setting, a node may be written + // to the trienode history as a full value at the specified frequency. + // + // Note that the frequency is not strict: the actual decision is probabilistic. + // Only the overall long-term full-value encoding rate is enforced. + // + // Values beyond this limit are considered ineffective, as the trienode history + // is already well compressed. Increasing it further will only degrade read + // performance linearly. + maxFullValueCheckpoint = 16 + + // defaultFullValueCheckpoint defines the default full-value encoding frequency + // (1/8) for storing nodes in full format. With this setting, nodes may be + // written to the trienode history as full values at the specified rate. + // + // This strikes a balance between effective compression of the trienode history + // and acceptable read performance. + defaultFullValueCheckpoint = 8 ) var ( @@ -54,6 +74,7 @@ var ( var Defaults = &Config{ StateHistory: params.FullImmutabilityThreshold, TrienodeHistory: -1, + FullValueCheckpoint: defaultFullValueCheckpoint, EnableStateIndexing: false, TrieCleanSize: defaultTrieCleanSize, StateCleanSize: defaultStateCleanSize, @@ -62,22 +83,26 @@ var Defaults = &Config{ // ReadOnly is the config in order to open database in read only mode. var ReadOnly = &Config{ - ReadOnly: true, - TrienodeHistory: -1, - TrieCleanSize: defaultTrieCleanSize, - StateCleanSize: defaultStateCleanSize, + ReadOnly: true, + TrienodeHistory: -1, + TrieCleanSize: defaultTrieCleanSize, + StateCleanSize: defaultStateCleanSize, + FullValueCheckpoint: defaultFullValueCheckpoint, } // Config contains the settings for database. type Config struct { + TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie data + StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data + WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer + ReadOnly bool // Flag whether the database is opened in read only mode + JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store) + + // Historical state configurations StateHistory uint64 // Number of recent blocks to maintain state history for, 0: full chain TrienodeHistory int64 // Number of recent blocks to maintain trienode history for, 0: full chain, negative: disable EnableStateIndexing bool // Whether to enable state history indexing for external state access - TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie data - StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data - WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer - ReadOnly bool // Flag whether the database is opened in read only mode - JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store) + FullValueCheckpoint uint32 // The rate at which trie nodes are encoded in full-value format // Testing configurations SnapshotNoBuild bool // Flag Whether the state generation is disabled @@ -93,6 +118,14 @@ func (c *Config) sanitize() *Config { log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.WriteBufferSize), "updated", common.StorageSize(maxBufferSize)) conf.WriteBufferSize = maxBufferSize } + if conf.FullValueCheckpoint > maxFullValueCheckpoint { + log.Warn("Sanitizing trienode history full value checkpoint", "provided", conf.FullValueCheckpoint, "updated", maxFullValueCheckpoint) + conf.FullValueCheckpoint = maxFullValueCheckpoint + } + if conf.FullValueCheckpoint == 0 { + conf.FullValueCheckpoint = 1 + log.Info("Disabling diff mode trie node history encoding") + } return &conf } diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index d6e997e044..911959dfa9 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -346,8 +346,9 @@ func (dl *diskLayer) writeHistory(typ historyType, diff *diffLayer) (bool, error case typeTrienodeHistory: freezer = dl.db.trienodeFreezer indexer = dl.db.trienodeIndexer - writeFunc = writeTrienodeHistory - + writeFunc = func(writer ethdb.AncientWriter, diff *diffLayer) error { + return writeTrienodeHistory(writer, diff, dl.db.config.FullValueCheckpoint) + } // Skip the history commit if the trienode history is not permitted if dl.db.config.TrienodeHistory < 0 { return false, nil diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 9efaa3ab24..820c3c03bf 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -158,8 +158,6 @@ func newStorageIdent(addressHash common.Hash, storageHash common.Hash) stateIden // newTrienodeIdent constructs a state identifier for a trie node. // The address denotes the address hash of the associated account; // the path denotes the path of the node within the trie; -// -// nolint:unused func newTrienodeIdent(addressHash common.Hash, path string) stateIdent { return stateIdent{ typ: typeTrienode, diff --git a/triedb/pathdb/history_index_iterator.go b/triedb/pathdb/history_index_iterator.go index 076baaa9e5..e4aca24f5d 100644 --- a/triedb/pathdb/history_index_iterator.go +++ b/triedb/pathdb/history_index_iterator.go @@ -609,3 +609,48 @@ func (it *indexIterator) Error() error { func (it *indexIterator) ID() uint64 { return it.blockIt.ID() } + +// seqIter provides a simple iterator over a contiguous sequence of +// unsigned integers, ending at end (end is included). +type seqIter struct { + cur uint64 // current position + end uint64 // iteration stops at end-1 + done bool // true when iteration is exhausted +} + +func newSeqIter(last uint64) *seqIter { + return &seqIter{end: last + 1} +} + +// SeekGT positions the iterator at the smallest element > id. +// Returns false if no such element exists. +func (it *seqIter) SeekGT(id uint64) bool { + if id+1 >= it.end { + it.done = true + return false + } + it.cur = id + 1 + it.done = false + return true +} + +// Next advances the iterator. Returns false if exhausted. +func (it *seqIter) Next() bool { + if it.done { + return false + } + if it.cur+1 < it.end { + it.cur++ + return true + } + // this was the last element + it.done = true + return false +} + +// ID returns the id of the element where the iterator is positioned at. +func (it *seqIter) ID() uint64 { return it.cur } + +// Error returns any accumulated error. Exhausting all the elements is not +// considered to be an error. +func (it *seqIter) Error() error { return nil } diff --git a/triedb/pathdb/history_index_iterator_test.go b/triedb/pathdb/history_index_iterator_test.go index 8b7591ce26..a11fd17666 100644 --- a/triedb/pathdb/history_index_iterator_test.go +++ b/triedb/pathdb/history_index_iterator_test.go @@ -313,3 +313,49 @@ func TestIndexIteratorTraversal(t *testing.T) { } } } + +func TestSeqIterBasicIteration(t *testing.T) { + it := newSeqIter(5) // iterates over [1..5] + it.SeekGT(0) + + var ( + got []uint64 + expected = []uint64{1, 2, 3, 4, 5} + ) + got = append(got, it.ID()) + for it.Next() { + got = append(got, it.ID()) + } + if len(got) != len(expected) { + t.Fatalf("iteration length mismatch: got %v, expected %v", got, expected) + } + for i := range expected { + if got[i] != expected[i] { + t.Fatalf("element mismatch at %d: got %d, expected %d", i, got[i], expected[i]) + } + } +} + +func TestSeqIterSeekGT(t *testing.T) { + it := newSeqIter(5) + + tests := []struct { + input uint64 + ok bool + expected uint64 + }{ + {0, true, 1}, + {1, true, 2}, + {4, true, 5}, + {5, false, 0}, // 6 is out of range + } + for _, tt := range tests { + ok := it.SeekGT(tt.input) + if ok != tt.ok { + t.Fatalf("SeekGT(%d) ok mismatch: got %v, expected %v", tt.input, ok, tt.ok) + } + if ok && it.ID() != tt.expected { + t.Fatalf("SeekGT(%d) positioned at %d, expected %d", tt.input, it.ID(), tt.expected) + } + } +} diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index 18d71f6dae..c987b380ed 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -29,7 +29,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" ) @@ -247,8 +246,8 @@ func (b *batchIndexer) finish(force bool) error { log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "size", common.StorageSize(batchSize), "elapsed", common.PrettyDuration(time.Since(start))) b.pending = 0 - maps.Clear(b.index) - maps.Clear(b.ext) + clear(b.index) + clear(b.ext) return nil } diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index 04cd869d2b..a2644c8fd4 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -22,11 +22,17 @@ import ( "errors" "fmt" "math" + "slices" "sort" + "sync" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/sync/errgroup" ) // indexReaderWithLimitTag is a wrapper around indexReader that includes an @@ -260,9 +266,204 @@ func (r *stateHistoryReader) read(state stateIdentQuery, stateID uint64, lastID return r.readStorage(state.address, state.storageKey, state.storageHash, historyID) } +// trienodeReader is the structure to access historical trienode data. +type trienodeReader struct { + disk ethdb.KeyValueReader + freezer ethdb.AncientReader + readConcurrency int // The concurrency used to load trie node data from history +} + +// newTrienodeReader constructs the history reader with the supplied db +// for accessing historical trie nodes. +func newTrienodeReader(disk ethdb.KeyValueReader, freezer ethdb.AncientReader, readConcurrency int) *trienodeReader { + return &trienodeReader{ + disk: disk, + freezer: freezer, + readConcurrency: readConcurrency, + } +} + +// readTrienode retrieves the trienode data from the specified trienode history. +func (r *trienodeReader) readTrienode(addrHash common.Hash, path string, historyID uint64) ([]byte, error) { + tr, err := newTrienodeHistoryReader(historyID, r.freezer) + if err != nil { + return nil, err + } + return tr.read(addrHash, path) +} + +// assembleNode takes a complete node value as the base and applies a list of +// mutation records to assemble the final node value accordingly. +func assembleNode(blob []byte, elements [][][]byte, indices [][]int) ([]byte, error) { + if len(elements) == 0 && len(indices) == 0 { + return blob, nil + } + children, err := rlp.SplitListValues(blob) + if err != nil { + return nil, err + } + for i := 0; i < len(elements); i++ { + for j, pos := range indices[i] { + children[pos] = elements[i][j] + } + } + return rlp.MergeListValues(children) +} + +type resultQueue struct { + data [][]byte + lock sync.Mutex +} + +func newResultQueue(size int) *resultQueue { + return &resultQueue{ + data: make([][]byte, size, size*2), + } +} + +func (q *resultQueue) set(data []byte, pos int) { + q.lock.Lock() + defer q.lock.Unlock() + + if pos >= len(q.data) { + newSize := pos + 1 + if cap(q.data) < newSize { + newData := make([][]byte, newSize, newSize*2) + copy(newData, q.data) + q.data = newData + } + q.data = q.data[:newSize] + } + q.data[pos] = data +} + +func (r *trienodeReader) readOptimized(state stateIdent, it HistoryIndexIterator, latestValue []byte) ([]byte, error) { + var ( + elements [][][]byte + indices [][]int + blob = latestValue + + eg errgroup.Group + seq int + term atomic.Bool + queue = newResultQueue(r.readConcurrency * 2) + ) + eg.SetLimit(r.readConcurrency) + + for { + id, pos := it.ID(), seq + seq += 1 + + eg.Go(func() error { + // In optimistic readahead mode, it is theoretically possible to encounter a + // NotFound error, where the trie node does not actually exist and the iterator + // reports a false-positive mutation record. Terminate the iterator if so, as + // all the necessary data (checkpoints and all diffs) required has already been + // fetching. + data, err := r.readTrienode(state.addressHash, state.path, id) + if err != nil { + term.Store(true) + log.Debug("Failed to read the trienode", "err", err) + return nil + } + full, _, err := decodeNodeFull(data) + if err != nil { + term.Store(true) + return err + } + if full { + term.Store(true) + } + queue.set(data, pos) + return nil + }) + if term.Load() || !it.Next() { + break + } + } + if err := eg.Wait(); err != nil { + return nil, err + } + if err := it.Error(); err != nil { + return nil, err + } + for i := 0; i < seq; i++ { + isComplete, fullBlob, err := decodeNodeFull(queue.data[i]) + if err != nil { + return nil, err + } + // Terminate the loop is the node with full value has been found + if isComplete { + blob = fullBlob + break + } + // Decode the partial encoded node and keep iterating the node history + // until the node with full value being reached. + element, index, err := decodeNodeCompressed(queue.data[i]) + if err != nil { + return nil, err + } + elements, indices = append(elements, element), append(indices, index) + } + slices.Reverse(elements) + slices.Reverse(indices) + return assembleNode(blob, elements, indices) +} + +// read retrieves the trie node data associated with the stateID. +// stateID: represents the ID of the state of the specified version; +// lastID: represents the ID of the latest/newest trie node history; +// latestValue: represents the trie node value at the current disk layer with ID == lastID; +func (r *trienodeReader) read(state stateIdent, stateID uint64, lastID uint64, latestValue []byte) ([]byte, error) { + _, err := checkStateAvail(state, typeTrienodeHistory, r.freezer, stateID, lastID, r.disk) + if err != nil { + return nil, err + } + // Construct the index iterator to traverse the trienode history + var ( + scheme *indexScheme + it HistoryIndexIterator + ) + if state.addressHash == (common.Hash{}) { + scheme = accountIndexScheme + } else { + scheme = storageIndexScheme + } + if state.addressHash == (common.Hash{}) && state.path == "" { + it = newSeqIter(lastID) + } else { + chunkID, nodeID := scheme.splitPathLast(state.path) + + queryIdent := state + queryIdent.path = chunkID + ir, err := newIndexReader(r.disk, queryIdent, scheme.getBitmapSize(len(chunkID))) + if err != nil { + return nil, err + } + filter := extFilter(nodeID) + it = ir.newIterator(&filter) + } + // Move the iterator to the first element whose id is greater than + // the given number. + found := it.SeekGT(stateID) + if err := it.Error(); err != nil { + return nil, err + } + // The state was not found in the trie node histories, as it has not been + // modified since stateID. Use the data from the associated disk layer + // instead (full value node as always) + if !found { + return latestValue, nil + } + return r.readOptimized(state, it, latestValue) +} + // checkStateAvail determines whether the requested historical state is available // for accessing. What's more, it also returns the ID of the latest indexed history // entry for subsequent usage. +// +// TODO(rjl493456442) it's really expensive to perform the check for every state +// retrieval, please rework this later. func checkStateAvail(state stateIdent, exptyp historyType, freezer ethdb.AncientReader, stateID uint64, lastID uint64, db ethdb.KeyValueReader) (uint64, error) { if toHistoryType(state.typ) != exptyp { return 0, fmt.Errorf("unsupported history type: %d, want: %v", toHistoryType(state.typ), exptyp) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index c584ac696c..f8ddc0665c 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -436,6 +436,7 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st } key = unsharedKey } else { + // TODO(rjl493456442) mitigate the allocation pressure. if int(nShared) > len(prevKey) { return nil, fmt.Errorf("unexpected shared key prefix: %d, prefix key length: %d", nShared, len(prevKey)) } @@ -556,7 +557,11 @@ type singleTrienodeHistoryReader struct { valueInternalOffsets map[string]iRange // value offset within the single trie data } +// TODO(rjl493456442): This function performs a large number of allocations. +// Given the large data size, a byte pool could be used to mitigate this. func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) { + // TODO(rjl493456442) the data size is known in advance, allocating the + // dedicated byte slices from the pool. keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id, uint64(keyRange.start), uint64(keyRange.len())) if err != nil { return nil, err @@ -672,9 +677,13 @@ func (r *trienodeHistoryReader) read(owner common.Hash, path string) ([]byte, er } // writeTrienodeHistory persists the trienode history associated with the given diff layer. -func writeTrienodeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { +func writeTrienodeHistory(writer ethdb.AncientWriter, dl *diffLayer, rate uint32) error { start := time.Now() - h := newTrienodeHistory(dl.rootHash(), dl.parent.rootHash(), dl.block, dl.nodes.nodeOrigin) + nodes, err := dl.nodes.encodeNodeHistory(dl.root, rate) + if err != nil { + return err + } + h := newTrienodeHistory(dl.rootHash(), dl.parent.rootHash(), dl.block, nodes) header, keySection, valueSection, err := h.encode() if err != nil { return err diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go index c4d6be28f7..a0a626f9b5 100644 --- a/triedb/pathdb/metrics.go +++ b/triedb/pathdb/metrics.go @@ -85,8 +85,9 @@ var ( lookupAddLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/add/time", nil) lookupRemoveLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/remove/time", nil) - historicalAccountReadTimer = metrics.NewRegisteredResettingTimer("pathdb/history/account/reads", nil) - historicalStorageReadTimer = metrics.NewRegisteredResettingTimer("pathdb/history/storage/reads", nil) + historicalAccountReadTimer = metrics.NewRegisteredResettingTimer("pathdb/history/account/reads", nil) + historicalStorageReadTimer = metrics.NewRegisteredResettingTimer("pathdb/history/storage/reads", nil) + historicalTrienodeReadTimer = metrics.NewRegisteredResettingTimer("pathdb/history/trienode/reads", nil) ) // Metrics in generation diff --git a/triedb/pathdb/nodes.go b/triedb/pathdb/nodes.go index b7290ed235..62c72c1953 100644 --- a/triedb/pathdb/nodes.go +++ b/triedb/pathdb/nodes.go @@ -517,6 +517,8 @@ func encodeNodeCompressed(addExtension bool, elements [][]byte, indices []int) [ // // - metadata byte layout (1 byte): 0b0 // - node value +// +// TODO(rjl493456442) it's not allocation efficient, please improve it. func encodeNodeFull(value []byte) []byte { enc := make([]byte, len(value)+1) copy(enc[1:], value) @@ -596,21 +598,17 @@ func decodeNodeCompressed(data []byte) ([][]byte, []int, error) { } // decodeNodeFull decodes the byte stream of full value trie node. -func decodeNodeFull(data []byte) ([]byte, error) { +func decodeNodeFull(data []byte) (bool, []byte, error) { if len(data) < 1 { - return nil, errors.New("invalid data: too short") + return false, nil, errors.New("invalid data: too short") } flag := data[0] if flag != byte(0) { - return nil, errors.New("invalid data: compressed node value") + return false, nil, nil } - return data[1:], nil + return true, data[1:], nil } -// encodeFullFrequency specifies the frequency (1/16) for encoding node in -// full format. TODO(rjl493456442) making it configurable. -const encodeFullFrequency = 16 - // encodeNodeHistory encodes the history of a node. Typically, the original values // of dirty nodes serve as the history, but this can lead to significant storage // overhead. @@ -626,7 +624,7 @@ const encodeFullFrequency = 16 // history records, which is computationally and IO intensive. To mitigate this, we // periodically record the full value of a node as a checkpoint. The frequency of // these checkpoints is a tradeoff between the compression rate and read overhead. -func (s *nodeSetWithOrigin) encodeNodeHistory(root common.Hash) (map[common.Hash]map[string][]byte, error) { +func (s *nodeSetWithOrigin) encodeNodeHistory(root common.Hash, rate uint32) (map[common.Hash]map[string][]byte, error) { var ( // the set of all encoded node history elements nodes = make(map[common.Hash]map[string][]byte) @@ -644,7 +642,7 @@ func (s *nodeSetWithOrigin) encodeNodeHistory(root common.Hash) (map[common.Hash h.Write(root.Bytes()) h.Write(owner.Bytes()) h.Write([]byte(path)) - return h.Sum32()%uint32(encodeFullFrequency) == 0 + return h.Sum32()%rate == 0 } ) for owner, origins := range s.nodeOrigin { @@ -664,6 +662,9 @@ func (s *nodeSetWithOrigin) encodeNodeHistory(root common.Hash) (map[common.Hash } encodeFull := encodeFullValue(owner, path) if !encodeFull { + // TODO(rjl493456442) the diff-mode reencoding can take non-trivial + // time, like 1-2ms per block, is there any way to mitigate the overhead? + // Partial encoding is required, try to find the node diffs and // fallback to the full-value encoding if fails. // diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go index c76d88b594..f55e015ee6 100644 --- a/triedb/pathdb/reader.go +++ b/triedb/pathdb/reader.go @@ -318,3 +318,90 @@ func (r *HistoricalStateReader) Storage(address common.Address, key common.Hash) } return r.reader.read(newStorageIdentQuery(address, addrHash, key, keyHash), r.id, dl.stateID(), latest) } + +// HistoricalNodeReader is a wrapper over history reader, providing access to +// historical trie node data. +type HistoricalNodeReader struct { + db *Database + reader *trienodeReader + id uint64 +} + +// HistoricNodeReader constructs a reader for accessing the requested historic state. +func (db *Database) HistoricNodeReader(root common.Hash) (*HistoricalNodeReader, error) { + // Bail out if the state history hasn't been fully indexed + if db.trienodeIndexer == nil || db.trienodeFreezer == nil { + return nil, fmt.Errorf("historical state %x is not available", root) + } + if !db.trienodeIndexer.inited() { + return nil, errors.New("trienode histories haven't been fully indexed yet") + } + // - States at the current disk layer or above are directly accessible + // via `db.NodeReader`. + // + // - States older than the current disk layer (including the disk layer + // itself) are available via `db.HistoricalNodeReader`. + id := rawdb.ReadStateID(db.diskdb, root) + if id == nil { + return nil, fmt.Errorf("state %#x is not available", root) + } + // Ensure the requested trienode history is canonical, states on side chain + // are not accessible. + meta, err := readTrienodeMetadata(db.trienodeFreezer, *id+1) + if err != nil { + return nil, err // e.g., the referred trienode history has been pruned + } + if meta.parent != root { + return nil, fmt.Errorf("state %#x is not canonincal", root) + } + return &HistoricalNodeReader{ + id: *id, + db: db, + reader: newTrienodeReader(db.diskdb, db.trienodeFreezer, int(db.config.FullValueCheckpoint)), + }, nil +} + +// Node directly retrieves the trie node data associated with a particular path, +// within a particular account. An error will be returned if the read operation +// exits abnormally. Specifically, if the layer is already stale. +// +// Note: +// - the returned trie node data is not a copy, please don't modify it. +// - an error will be returned if the requested trie node is not found in database. +func (r *HistoricalNodeReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + defer func(start time.Time) { + historicalTrienodeReadTimer.UpdateSince(start) + }(time.Now()) + + // TODO(rjl493456442): Theoretically, the obtained disk layer could become stale + // within a very short time window. + // + // While reading the account data while holding `db.tree.lock` can resolve + // this issue, but it will introduce a heavy contention over the lock. + // + // Let's optimistically assume the situation is very unlikely to happen, + // and try to define a low granularity lock if the current approach doesn't + // work later. + dl := r.db.tree.bottom() + latest, h, _, err := dl.node(owner, path, 0) + if err != nil { + return nil, err + } + if h == hash { + return latest, nil + } + blob, err := r.reader.read(newTrienodeIdent(owner, string(path)), r.id, dl.stateID(), latest) + if err != nil { + return nil, err + } + // Error out if the local one is inconsistent with the target. + if crypto.Keccak256Hash(blob) != hash { + blobHex := "nil" + if len(blob) > 0 { + blobHex = hexutil.Encode(blob) + } + log.Error("Unexpected historical trie node", "owner", owner.Hex(), "path", path, "blob", blobHex) + return nil, fmt.Errorf("unexpected historical trie node: (%x %v), blob: %s", owner, path, blobHex) + } + return blob, nil +} From 251b86310748b5db2256104b2f53554013493fbd Mon Sep 17 00:00:00 2001 From: Jonny Rhea <5555162+jrhea@users.noreply.github.com> Date: Thu, 22 Jan 2026 13:16:02 -0600 Subject: [PATCH 259/277] core/vm: update EIP-8024 - Missing immediate byte is now treated as 0x00 (#33614) This PR updates the EIP-8024 implementation to match the latest spec clarification. --------- Co-authored-by: lightclient --- core/vm/instructions.go | 24 +++++++------- core/vm/instructions_test.go | 62 +++++++++++++++++++++--------------- 2 files changed, 49 insertions(+), 37 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index baf6df8117..a57e9caa0f 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -973,11 +973,11 @@ func opDupN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { code := scope.Contract.Code i := *pc + 1 - // Ensure an immediate byte exists after DUPN - if i >= uint64(len(code)) { - return nil, &ErrInvalidOpCode{opcode: INVALID} + // If the immediate byte is missing, treat as 0x00 (same convention as PUSHn). + var x byte + if i < uint64(len(code)) { + x = code[i] } - x := code[i] // This range is excluded to preserve compatibility with existing opcodes. if x > 90 && x < 128 { @@ -1000,11 +1000,11 @@ func opSwapN(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { code := scope.Contract.Code i := *pc + 1 - // Ensure an immediate byte exists after SWAPN - if i >= uint64(len(code)) { - return nil, &ErrInvalidOpCode{opcode: INVALID} + // If the immediate byte is missing, treat as 0x00 (same convention as PUSHn). + var x byte + if i < uint64(len(code)) { + x = code[i] } - x := code[i] // This range is excluded to preserve compatibility with existing opcodes. if x > 90 && x < 128 { @@ -1029,11 +1029,11 @@ func opExchange(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { code := scope.Contract.Code i := *pc + 1 - // Ensure an immediate byte exists after EXCHANGE - if i >= uint64(len(code)) { - return nil, &ErrInvalidOpCode{opcode: INVALID} + // If the immediate byte is missing, treat as 0x00 (same convention as PUSHn). + var x byte + if i < uint64(len(code)) { + x = code[i] } - x := code[i] // This range is excluded both to preserve compatibility with existing opcodes // and to keep decode_pair’s 16-aligned arithmetic mapping valid (0–79, 128–255). diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index f38da7fb22..56cb2686a6 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -1027,6 +1027,15 @@ func TestEIP8024_Execution(t *testing.T) { 1, }, }, + { + name: "DUPN_MISSING_IMMEDIATE", + codeHex: "60016000808080808080808080808080808080e6", + wantVals: []uint64{ + 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, + }, + }, { name: "SWAPN", codeHex: "600160008080808080808080808080808080806002e700", @@ -1036,11 +1045,29 @@ func TestEIP8024_Execution(t *testing.T) { 2, }, }, + { + name: "SWAPN_MISSING_IMMEDIATE", + codeHex: "600160008080808080808080808080808080806002e7", + wantVals: []uint64{ + 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, + }, + }, { name: "EXCHANGE", codeHex: "600060016002e801", wantVals: []uint64{2, 0, 1}, }, + { + name: "EXCHANGE_MISSING_IMMEDIATE", + codeHex: "600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060016002e8", + wantVals: []uint64{ + 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, + }, + }, { name: "INVALID_SWAPN_LOW", codeHex: "e75b", @@ -1092,21 +1119,6 @@ func TestEIP8024_Execution(t *testing.T) { codeHex: "60016002e801", // (n,m)=(1,2), need 3 items, have 2 wantErr: true, }, - { - name: "MISSING_IMMEDIATE_DUPN", - codeHex: "e6", // no operand - wantErr: true, - }, - { - name: "MISSING_IMMEDIATE_SWAPN", - codeHex: "e7", // no operand - wantErr: true, - }, - { - name: "MISSING_IMMEDIATE_EXCHANGE", - codeHex: "e8", // no operand - wantErr: true, - }, { name: "PC_INCREMENT", codeHex: "600060006000e80115", @@ -1123,25 +1135,25 @@ func TestEIP8024_Execution(t *testing.T) { var err error for pc < uint64(len(code)) && err == nil { op := code[pc] - switch op { - case 0x00: + switch OpCode(op) { + case STOP: return - case 0x60: + case PUSH1: _, err = opPush1(&pc, evm, scope) - case 0x80: + case DUP1: dup1 := makeDup(1) _, err = dup1(&pc, evm, scope) - case 0x56: + case JUMP: _, err = opJump(&pc, evm, scope) - case 0x5b: + case JUMPDEST: _, err = opJumpdest(&pc, evm, scope) - case 0x15: + case ISZERO: _, err = opIszero(&pc, evm, scope) - case 0xe6: + case DUPN: _, err = opDupN(&pc, evm, scope) - case 0xe7: + case SWAPN: _, err = opSwapN(&pc, evm, scope) - case 0xe8: + case EXCHANGE: _, err = opExchange(&pc, evm, scope) default: err = &ErrInvalidOpCode{opcode: OpCode(op)} From 9a8e14e77eb1b81297438211d8f946038888345f Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Fri, 23 Jan 2026 13:35:14 +0100 Subject: [PATCH 260/277] core/txpool/legacypool: fix stale counter (#33653) Calling `pool.priced.Removed` is needed to keep is sync with `pool.all.Remove`. It was called in other occurances, but not here. The counter is used for internal heap management. It was working even without this, just not calling reheap at the intended frequency. Signed-off-by: Csaba Kiraly --- core/txpool/legacypool/legacypool.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index eb1fe23d5f..1bfc6e60b2 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1568,6 +1568,7 @@ func (pool *LegacyPool) demoteUnexecutables() { // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false) } + pool.priced.Removed(len(olds) + len(drops)) pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) // If there's a gap in front, alert (should never happen) and postpone all transactions From c2595381bf4a6638a910459a1f678f354f9b9909 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 26 Jan 2026 18:25:53 +0800 Subject: [PATCH 261/277] core: extend the code reader statistics (#33659) This PR extends the statistics of contract code read by adding these fields: - **CacheHitBytes**: the total number of bytes served by cache - **CacheMissBytes**: the total number of bytes read on cache miss - **CodeReadBytes**: the total number of bytes for contract code read --- core/blockchain.go | 2 ++ core/blockchain_stats.go | 6 +++-- core/state/reader.go | 52 +++++++++++++++++++++++++------------- core/state/state_object.go | 1 + core/state/statedb.go | 6 +++++ 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 8741b8b937..db3f71c44d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -76,6 +76,7 @@ var ( storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil) storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil) codeReadTimer = metrics.NewRegisteredResettingTimer("chain/code/reads", nil) + codeReadBytesTimer = metrics.NewRegisteredResettingTimer("chain/code/readbytes", nil) accountCacheHitMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/hit", nil) accountCacheMissMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/miss", nil) @@ -2238,6 +2239,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s stats.StorageUpdated = int(statedb.StorageUpdated.Load()) stats.StorageDeleted = int(statedb.StorageDeleted.Load()) stats.CodeLoaded = statedb.CodeLoaded + stats.CodeLoadBytes = statedb.CodeLoadBytes stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go index b6e9c614c5..e8c5860294 100644 --- a/core/blockchain_stats.go +++ b/core/blockchain_stats.go @@ -46,6 +46,7 @@ type ExecuteStats struct { StorageUpdated int // Number of storage slots updated StorageDeleted int // Number of storage slots deleted CodeLoaded int // Number of contract code loaded + CodeLoadBytes int // Number of bytes read from contract code Execution time.Duration // Time spent on the EVM execution Validation time.Duration // Time spent on the block validation @@ -74,6 +75,7 @@ func (s *ExecuteStats) reportMetrics() { if s.CodeLoaded != 0 { codeReadTimer.Update(s.CodeReads) codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded)) + codeReadBytesTimer.Update(time.Duration(s.CodeLoadBytes)) } accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation) storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation) @@ -123,7 +125,7 @@ Validation: %v State read: %v Account read: %v(%d) Storage read: %v(%d) - Code read: %v(%d) + Code read: %v(%d %v) State write: %v Trie commit: %v @@ -145,7 +147,7 @@ State write: %v common.PrettyDuration(s.AccountReads+s.StorageReads+s.CodeReads), common.PrettyDuration(s.AccountReads), s.AccountLoaded, common.PrettyDuration(s.StorageReads), s.StorageLoaded, - common.PrettyDuration(s.CodeReads), s.CodeLoaded, + common.PrettyDuration(s.CodeReads), s.CodeLoaded, common.StorageSize(s.CodeLoadBytes), // State write common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)+s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite), diff --git a/core/state/reader.go b/core/state/reader.go index 2db9d1f9b4..35b732173b 100644 --- a/core/state/reader.go +++ b/core/state/reader.go @@ -58,11 +58,28 @@ type ContractCodeReader interface { CodeSize(addr common.Address, codeHash common.Hash) (int, error) } +// ContractCodeReaderStats aggregates statistics for the contract code reader. +type ContractCodeReaderStats struct { + CacheHit int64 // Number of cache hits + CacheMiss int64 // Number of cache misses + CacheHitBytes int64 // Total bytes served from cache + CacheMissBytes int64 // Total bytes read on cache misses +} + +// HitRate returns the cache hit rate. +func (s ContractCodeReaderStats) HitRate() float64 { + if s.CacheHit == 0 { + return 0 + } + return float64(s.CacheHit) / float64(s.CacheHit+s.CacheMiss) +} + // ContractCodeReaderWithStats extends ContractCodeReader by adding GetStats to // expose statistics of code reader. type ContractCodeReaderWithStats interface { ContractCodeReader - GetStats() (int64, int64) + + GetStats() ContractCodeReaderStats } // StateReader defines the interface for accessing accounts and storage slots @@ -99,13 +116,11 @@ type Reader interface { // ReaderStats wraps the statistics of reader. type ReaderStats struct { - // Cache stats AccountCacheHit int64 AccountCacheMiss int64 StorageCacheHit int64 StorageCacheMiss int64 - ContractCodeHit int64 - ContractCodeMiss int64 + CodeStats ContractCodeReaderStats } // String implements fmt.Stringer, returning string format statistics. @@ -113,7 +128,6 @@ func (s ReaderStats) String() string { var ( accountCacheHitRate float64 storageCacheHitRate float64 - contractCodeHitRate float64 ) if s.AccountCacheHit > 0 { accountCacheHitRate = float64(s.AccountCacheHit) / float64(s.AccountCacheHit+s.AccountCacheMiss) * 100 @@ -121,13 +135,10 @@ func (s ReaderStats) String() string { if s.StorageCacheHit > 0 { storageCacheHitRate = float64(s.StorageCacheHit) / float64(s.StorageCacheHit+s.StorageCacheMiss) * 100 } - if s.ContractCodeHit > 0 { - contractCodeHitRate = float64(s.ContractCodeHit) / float64(s.ContractCodeHit+s.ContractCodeMiss) * 100 - } msg := fmt.Sprintf("Reader statistics\n") msg += fmt.Sprintf("account: hit: %d, miss: %d, rate: %.2f\n", s.AccountCacheHit, s.AccountCacheMiss, accountCacheHitRate) msg += fmt.Sprintf("storage: hit: %d, miss: %d, rate: %.2f\n", s.StorageCacheHit, s.StorageCacheMiss, storageCacheHitRate) - msg += fmt.Sprintf("code: hit: %d, miss: %d, rate: %.2f\n", s.ContractCodeHit, s.ContractCodeMiss, contractCodeHitRate) + msg += fmt.Sprintf("code: hit: %d(%v), miss: %d(%v), rate: %.2f\n", s.CodeStats.CacheHit, common.StorageSize(s.CodeStats.CacheHitBytes), s.CodeStats.CacheMiss, common.StorageSize(s.CodeStats.CacheMissBytes), s.CodeStats.HitRate()) return msg } @@ -150,8 +161,10 @@ type cachingCodeReader struct { codeSizeCache *lru.Cache[common.Hash, int] // Cache statistics - hit atomic.Int64 // Number of code lookups found in the cache. - miss atomic.Int64 // Number of code lookups not found in the cache. + hit atomic.Int64 // Number of code lookups found in the cache + miss atomic.Int64 // Number of code lookups not found in the cache + hitBytes atomic.Int64 // Total number of bytes read from cache + missBytes atomic.Int64 // Total number of bytes read from database } // newCachingCodeReader constructs the code reader. @@ -169,6 +182,7 @@ func (r *cachingCodeReader) Code(addr common.Address, codeHash common.Hash) ([]b code, _ := r.codeCache.Get(codeHash) if len(code) > 0 { r.hit.Add(1) + r.hitBytes.Add(int64(len(code))) return code, nil } r.miss.Add(1) @@ -177,6 +191,7 @@ func (r *cachingCodeReader) Code(addr common.Address, codeHash common.Hash) ([]b if len(code) > 0 { r.codeCache.Add(codeHash, code) r.codeSizeCache.Add(codeHash, len(code)) + r.missBytes.Add(int64(len(code))) } return code, nil } @@ -202,9 +217,14 @@ func (r *cachingCodeReader) Has(addr common.Address, codeHash common.Hash) bool return len(code) > 0 } -// GetStats returns the cache statistics fo the code reader. -func (r *cachingCodeReader) GetStats() (int64, int64) { - return r.hit.Load(), r.miss.Load() +// GetStats returns the statistics of the code reader. +func (r *cachingCodeReader) GetStats() ContractCodeReaderStats { + return ContractCodeReaderStats{ + CacheHit: r.hit.Load(), + CacheMiss: r.miss.Load(), + CacheHitBytes: r.hitBytes.Load(), + CacheMissBytes: r.missBytes.Load(), + } } // flatReader wraps a database state reader and is safe for concurrent access. @@ -654,13 +674,11 @@ func (r *readerWithStats) Storage(addr common.Address, slot common.Hash) (common // GetStats implements ReaderWithStats, returning the statistics of state reader. func (r *readerWithStats) GetStats() ReaderStats { - codeHit, codeMiss := r.ContractCodeReaderWithStats.GetStats() return ReaderStats{ AccountCacheHit: r.accountCacheHit.Load(), AccountCacheMiss: r.accountCacheMiss.Load(), StorageCacheHit: r.storageCacheHit.Load(), StorageCacheMiss: r.storageCacheMiss.Load(), - ContractCodeHit: codeHit, - ContractCodeMiss: codeMiss, + CodeStats: r.ContractCodeReaderWithStats.GetStats(), } } diff --git a/core/state/state_object.go b/core/state/state_object.go index 3b11553f04..2873c3cb8a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -541,6 +541,7 @@ func (s *stateObject) Code() []byte { defer func(start time.Time) { s.db.CodeLoaded += 1 s.db.CodeReads += time.Since(start) + s.db.CodeLoadBytes += len(s.code) }(time.Now()) code, err := s.db.reader.Code(s.address, common.BytesToHash(s.CodeHash())) diff --git a/core/state/statedb.go b/core/state/statedb.go index 39160aa1c7..610e7173cf 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -159,6 +159,12 @@ type StateDB struct { StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition CodeLoaded int // Number of contract code loaded during the state transition + + // CodeLoadBytes is the total number of bytes read from contract code. + // This value may be smaller than the actual number of bytes read, since + // some APIs (e.g. CodeSize) may load the entire code from either the + // cache or the database when the size is not available in the cache. + CodeLoadBytes int } // New creates a new state from a given trie. From e25083697396fabcd13163ddf43685c9c9600c61 Mon Sep 17 00:00:00 2001 From: marukai67 Date: Tue, 27 Jan 2026 05:04:12 +0100 Subject: [PATCH 262/277] trie: preallocate slice capacity (#33689) This PR optimizes memory allocation in StateTrie.PrefetchAccount() and StateTrie.PrefetchStorage() by preallocating slice capacity when the final size is known. --- trie/secure_trie.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 7c7bd184bf..1f150ede8c 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -134,9 +134,9 @@ func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, // PrefetchAccount attempts to resolve specific accounts from the database // to accelerate subsequent trie operations. func (t *StateTrie) PrefetchAccount(addresses []common.Address) error { - var keys [][]byte - for _, addr := range addresses { - keys = append(keys, crypto.Keccak256(addr.Bytes())) + keys := make([][]byte, len(addresses)) + for i, addr := range addresses { + keys[i] = crypto.Keccak256(addr.Bytes()) } return t.trie.Prefetch(keys) } @@ -157,9 +157,9 @@ func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { // PrefetchStorage attempts to resolve specific storage slots from the database // to accelerate subsequent trie operations. func (t *StateTrie) PrefetchStorage(_ common.Address, keys [][]byte) error { - var keylist [][]byte - for _, key := range keys { - keylist = append(keylist, crypto.Keccak256(key)) + keylist := make([][]byte, len(keys)) + for i, key := range keys { + keylist[i] = crypto.Keccak256(key) } return t.trie.Prefetch(keylist) } From 181a3ae9e0d6cfdb887454995d22e495fe67e9c4 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 27 Jan 2026 20:05:35 +0800 Subject: [PATCH 263/277] triedb/pathdb: improve trienode reader for searching (#33681) This PR optimizes the historical trie node reader by reworking how data is accessed and memory is managed, reducing allocation overhead significantly. Specifically: - Instead of decoding an entire history object to locate a specific trie node, the reader now searches directly within the history. - Besides, slice pre-allocation can avoid unnecessary deep-copy significantly. --- triedb/pathdb/history_index.go | 2 +- triedb/pathdb/history_index_block.go | 4 +- triedb/pathdb/history_reader.go | 17 +- triedb/pathdb/history_trienode.go | 495 +++++++++++++++---------- triedb/pathdb/history_trienode_test.go | 124 +++++-- 5 files changed, 395 insertions(+), 247 deletions(-) diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index 0c5eb8db21..eee4c10273 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -42,7 +42,7 @@ func parseIndex(blob []byte, bitmapSize int) ([]*indexBlockDesc, error) { } var ( lastID uint32 - descList []*indexBlockDesc + descList = make([]*indexBlockDesc, 0, len(blob)/size) ) for i := 0; i < len(blob)/size; i++ { var desc indexBlockDesc diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go index fd43d81b78..bb823bb13f 100644 --- a/triedb/pathdb/history_index_block.go +++ b/triedb/pathdb/history_index_block.go @@ -65,13 +65,13 @@ func (d *indexBlockDesc) encode() []byte { return buf[:] } -// decode unpacks index block descriptor from byte stream. It's safe to mutate +// decode unpacks index block descriptor from byte stream. It's unsafe to mutate // the provided byte stream after the function call. func (d *indexBlockDesc) decode(blob []byte) { d.max = binary.BigEndian.Uint64(blob[:8]) d.entries = binary.BigEndian.Uint16(blob[8:10]) d.id = binary.BigEndian.Uint32(blob[10:14]) - d.extBitmap = bytes.Clone(blob[indexBlockDescSize:]) + d.extBitmap = blob[indexBlockDescSize:] // no-deep copy! } // copy returns a deep-copied object. diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index a2644c8fd4..4ae1fb36cb 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -284,11 +284,8 @@ func newTrienodeReader(disk ethdb.KeyValueReader, freezer ethdb.AncientReader, r } // readTrienode retrieves the trienode data from the specified trienode history. -func (r *trienodeReader) readTrienode(addrHash common.Hash, path string, historyID uint64) ([]byte, error) { - tr, err := newTrienodeHistoryReader(historyID, r.freezer) - if err != nil { - return nil, err - } +func (r *trienodeReader) readTrienode(addrHash common.Hash, path string, historyID uint64) ([]byte, bool, error) { + tr := newTrienodeHistoryReader(historyID, r.freezer) return tr.read(addrHash, path) } @@ -355,15 +352,19 @@ func (r *trienodeReader) readOptimized(state stateIdent, it HistoryIndexIterator seq += 1 eg.Go(func() error { + data, found, err := r.readTrienode(state.addressHash, state.path, id) + if err != nil { + term.Store(true) + return err + } // In optimistic readahead mode, it is theoretically possible to encounter a // NotFound error, where the trie node does not actually exist and the iterator // reports a false-positive mutation record. Terminate the iterator if so, as // all the necessary data (checkpoints and all diffs) required has already been // fetching. - data, err := r.readTrienode(state.addressHash, state.path, id) - if err != nil { + if !found { term.Store(true) - log.Debug("Failed to read the trienode", "err", err) + log.Debug("Failed to read the trienode") return nil } full, _, err := decodeNodeFull(data) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index f8ddc0665c..05a43808c2 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -46,7 +46,10 @@ import ( // - block number (8 bytes) // // - a lexicographically sorted list of trie IDs -// - the corresponding offsets into the key and value sections for each trie data chunk +// - the corresponding offsets into the key and value sections for each trie +// data chunk. The offsets refer to the end position of each chunk, with +// the assumption that the key and value sections for the first data chunk +// start at offset 0. // // Although some fields (e.g., parent state root, block number) are duplicated // between the state history and the trienode history, these two histories @@ -55,19 +58,16 @@ import ( // // # Key section // The key section stores trie node keys (paths) in a compressed format. -// It also contains relative offsets into the value section for resolving -// the corresponding trie node data. Note that these offsets are relative -// to the data chunk for the trie; the chunk offset must be added to obtain -// the absolute position. +// It also contains relative offsets into the value section for locating +// the corresponding trie node data. These offsets are relative to the +// beginning of the trie data chunk, the chunk's base offset must be added +// to obtain the absolute position in the value section. // // # Value section // The value section is a concatenated byte stream of all trie node data. // Each trie node can be retrieved using the offset and length specified // by its index entry. // -// The header and key sections are sufficient for locating a trie node, -// while a partial read of the value section is enough to retrieve its data. - // Header section: // // +----------+------------------+---------------------+---------------------+-------+------------------+---------------------+---------------------| @@ -89,9 +89,9 @@ import ( // // +---- key len ----+ // / \ -// +-------+---------+-----------+---------+-----------------------+-----------------+ -// | shared (varint) | not shared (varint) | value length (varlen) | key (varlen) | -// +-----------------+---------------------+-----------------------+-----------------+ +// +-------+---------+-----------+---------+-----------------------+-----------------------+ +// | shared (varint) | not shared (varint) | value length (varlen) | unshared key (varlen) | +// +-----------------+---------------------+-----------------------+-----------------------+ // // trailer: // @@ -101,9 +101,9 @@ import ( // | restart_1 key offset | restart_1 value offset | ... | restart number (4-bytes) | // +----------------------+------------------------+-----+--------------------------+ // -// Note: Both the key offset and the value offset are relative to the start of -// the trie data chunk. To obtain the absolute offset, add the offset of the -// trie data chunk itself. +// Note: Both the key offset and the value offset are relative to the beginning +// of the trie data chunk. The chunk's base offset must be added to obtain the +// absolute position in the value section. // // Value section: // @@ -140,9 +140,12 @@ type trienodeHistory struct { // newTrienodeHistory constructs a trienode history with the provided trie nodes. func newTrienodeHistory(root common.Hash, parent common.Hash, block uint64, nodes map[common.Hash]map[string][]byte) *trienodeHistory { - nodeList := make(map[common.Hash][]string) + nodeList := make(map[common.Hash][]string, len(nodes)) for owner, subset := range nodes { - keys := sort.StringSlice(slices.Collect(maps.Keys(subset))) + keys := make(sort.StringSlice, 0, len(subset)) + for k := range subset { + keys = append(keys, k) + } keys.Sort() nodeList[owner] = keys } @@ -222,11 +225,16 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { restarts []uint32 prefixLen int - internalKeyOffset uint32 // key offset for the trie internally - internalValOffset uint32 // value offset for the trie internally + internalKeyOffset uint32 // key offset within the trie data internally + internalValOffset uint32 // value offset within the trie data internally ) for i, path := range h.nodeList[owner] { key := []byte(path) + + // Track the internal key and value offsets at the beginning of the + // restart section. The absolute offsets within the key and value + // sections should first include the offset of the trie chunk itself + // stored in the header section. if i%trienodeDataBlockRestartLen == 0 { restarts = append(restarts, internalKeyOffset) restarts = append(restarts, internalValOffset) @@ -271,18 +279,13 @@ func (h *trienodeHistory) encode() ([]byte, []byte, []byte, error) { } // Fill the header section with the offsets of the key and value sections. - // Note that the key/value offsets are intentionally tracked *after* encoding - // them into their respective sections, ensuring each offset refers to the end - // position. For n trie chunks, n offset pairs are sufficient to uniquely locate - // the corresponding data. - headerSection.Write(owner.Bytes()) // 32 bytes - binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes - - // The offset to the value section is theoretically unnecessary, since the - // individual value offset is already tracked in the key section. However, - // we still keep it here for two reasons: - // - It's cheap to store (only 4 bytes for each trie). - // - It can be useful for decoding the trie data when key is not required (e.g., in hash mode). + // Note that key/value offsets are intentionally recorded *after* encoding + // into their respective sections, so each offset refers to an end position. + // For n trie chunks, n offset pairs are sufficient to uniquely locate each + // chunk's data. For example, [0, offset_0] defines the range of trie chunk 0, + // while [offset_{n-2}, offset_{n-1}] defines the range of trie chunk n-1. + headerSection.Write(owner.Bytes()) // 32 bytes + binary.Write(&headerSection, binary.BigEndian, uint32(keySection.Len())) // 4 bytes binary.Write(&headerSection, binary.BigEndian, uint32(valueSection.Len())) // 4 bytes } return headerSection.Bytes(), keySection.Bytes(), valueSection.Bytes(), nil @@ -345,32 +348,68 @@ func decodeHeader(data []byte) (*trienodeMetadata, []common.Hash, []uint32, []ui }, owners, keyOffsets, valOffsets, nil } -func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]string, error) { +// decodeKeyEntry resolves a single entry from the key section starting from +// the specified offset. +func decodeKeyEntry(keySection []byte, offset int) (uint64, uint64, []byte, int, error) { + var byteRead int + + // Resolve the length of shared key + nShared, nn := binary.Uvarint(keySection[offset:]) // key length shared (varint) + if nn <= 0 { + return 0, 0, nil, 0, fmt.Errorf("corrupted varint encoding for nShared at offset %d", offset) + } + byteRead += nn + + // Resolve the length of unshared key + nUnshared, nn := binary.Uvarint(keySection[offset+byteRead:]) // key length not shared (varint) + if nn <= 0 { + return 0, 0, nil, 0, fmt.Errorf("corrupted varint encoding for nUnshared at offset %d", offset+byteRead) + } + byteRead += nn + + // Resolve the length of value + nValue, nn := binary.Uvarint(keySection[offset+byteRead:]) // value length (varint) + if nn <= 0 { + return 0, 0, nil, 0, fmt.Errorf("corrupted varint encoding for nValue at offset %d", offset+byteRead) + } + byteRead += nn + + // Validate that the values can fit in an int to prevent overflow on 32-bit systems + if nShared > uint64(math.MaxUint32) || nUnshared > uint64(math.MaxUint32) || nValue > uint64(math.MaxUint32) { + return 0, 0, nil, 0, errors.New("key/value size too large") + } + + // Resolve the unshared key + if offset+byteRead+int(nUnshared) > len(keySection) { + return 0, 0, nil, 0, fmt.Errorf("key length too long, unshared key length: %d, off: %d, section size: %d", nUnshared, offset+byteRead, len(keySection)) + } + unsharedKey := keySection[offset+byteRead : offset+byteRead+int(nUnshared)] + byteRead += int(nUnshared) + + return nShared, nValue, unsharedKey, byteRead, nil +} + +// decodeRestartTrailer resolves all the offsets recorded at the trailer. +func decodeRestartTrailer(keySection []byte) ([]uint32, []uint32, int, error) { var ( - prevKey []byte - items int keyOffsets []uint32 valOffsets []uint32 - - keyOff int // the key offset within the single trie data - valOff int // the value offset within the single trie data - - keys []string ) // Decode the number of restart section if len(keySection) < 4 { - return nil, fmt.Errorf("key section too short, size: %d", len(keySection)) + return nil, nil, 0, fmt.Errorf("key section too short, size: %d", len(keySection)) } nRestarts := binary.BigEndian.Uint32(keySection[len(keySection)-4:]) + // Decode the trailer if len(keySection) < int(8*nRestarts)+4 { - return nil, fmt.Errorf("key section too short, restarts: %d, size: %d", nRestarts, len(keySection)) + return nil, nil, 0, fmt.Errorf("key section too short, restarts: %d, size: %d", nRestarts, len(keySection)) } for i := range int(nRestarts) { o := len(keySection) - 4 - (int(nRestarts)-i)*8 keyOffset := binary.BigEndian.Uint32(keySection[o : o+4]) if i != 0 && keyOffset <= keyOffsets[i-1] { - return nil, fmt.Errorf("key offset is out of order, prev: %v, cur: %v", keyOffsets[i-1], keyOffset) + return nil, nil, 0, fmt.Errorf("key offset is out of order, prev: %v, cur: %v", keyOffsets[i-1], keyOffset) } keyOffsets = append(keyOffsets, keyOffset) @@ -378,99 +417,118 @@ func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) ([]st // section have zero-size value. valOffset := binary.BigEndian.Uint32(keySection[o+4 : o+8]) if i != 0 && valOffset < valOffsets[i-1] { - return nil, fmt.Errorf("value offset is out of order, prev: %v, cur: %v", valOffsets[i-1], valOffset) + return nil, nil, 0, fmt.Errorf("value offset is out of order, prev: %v, cur: %v", valOffsets[i-1], valOffset) } valOffsets = append(valOffsets, valOffset) } - keyLimit := len(keySection) - 4 - int(nRestarts)*8 + keyLimit := len(keySection) - 4 - int(nRestarts)*8 // End of key data + return keyOffsets, valOffsets, keyLimit, nil +} +// decodeRestartSection resolves all entries in a restart section. The keyData +// contains the encoded keys for the section. +// +// onValue is the callback function being invoked for each resolved entry. The +// start and limit are the offsets within the restart section, the base value +// offset of the restart section itself should be added by the caller itself. +// What's more, this function should return `aborted == true` if the entry +// resolution should be terminated. +func decodeRestartSection(keyData []byte, onValue func(key []byte, start int, limit int) (bool, error)) error { + var ( + prevKey []byte + items int + + keyOff int // the key offset within the single trie data + valOff int // the value offset within the single trie data + ) // Decode data - for keyOff < keyLimit { - // Validate the key and value offsets within the single trie data chunk - if items%trienodeDataBlockRestartLen == 0 { - restartIndex := items / trienodeDataBlockRestartLen - if restartIndex >= len(keyOffsets) { - return nil, fmt.Errorf("restart index out of range: %d, available restarts: %d", restartIndex, len(keyOffsets)) - } - if keyOff != int(keyOffsets[restartIndex]) { - return nil, fmt.Errorf("key offset is not matched, recorded: %d, want: %d", keyOffsets[restartIndex], keyOff) - } - if valOff != int(valOffsets[restartIndex]) { - return nil, fmt.Errorf("value offset is not matched, recorded: %d, want: %d", valOffsets[restartIndex], valOff) - } - } - // Resolve the entry from key section - nShared, nn := binary.Uvarint(keySection[keyOff:]) // key length shared (varint) - if nn <= 0 { - return nil, fmt.Errorf("corrupted varint encoding for nShared at offset %d", keyOff) + for keyOff < len(keyData) { + nShared, nValue, unsharedKey, nn, err := decodeKeyEntry(keyData, keyOff) + if err != nil { + return err } keyOff += nn - nUnshared, nn := binary.Uvarint(keySection[keyOff:]) // key length not shared (varint) - if nn <= 0 { - return nil, fmt.Errorf("corrupted varint encoding for nUnshared at offset %d", keyOff) - } - keyOff += nn - nValue, nn := binary.Uvarint(keySection[keyOff:]) // value length (varint) - if nn <= 0 { - return nil, fmt.Errorf("corrupted varint encoding for nValue at offset %d", keyOff) - } - keyOff += nn - - // Validate that the values can fit in an int to prevent overflow on 32-bit systems - if nShared > uint64(math.MaxUint32) || nUnshared > uint64(math.MaxUint32) || nValue > uint64(math.MaxUint32) { - return nil, errors.New("key size too large") - } - - // Resolve unshared key - if keyOff+int(nUnshared) > len(keySection) { - return nil, fmt.Errorf("key length too long, unshared key length: %d, off: %d, section size: %d", nUnshared, keyOff, len(keySection)) - } - unsharedKey := keySection[keyOff : keyOff+int(nUnshared)] - keyOff += int(nUnshared) // Assemble the full key var key []byte if items%trienodeDataBlockRestartLen == 0 { if nShared != 0 { - return nil, fmt.Errorf("unexpected non-zero shared key prefix: %d", nShared) + return fmt.Errorf("unexpected non-zero shared key prefix: %d", nShared) } key = unsharedKey } else { - // TODO(rjl493456442) mitigate the allocation pressure. if int(nShared) > len(prevKey) { - return nil, fmt.Errorf("unexpected shared key prefix: %d, prefix key length: %d", nShared, len(prevKey)) + return fmt.Errorf("unexpected shared key prefix: %d, prefix key length: %d", nShared, len(prevKey)) } - key = append([]byte{}, prevKey[:nShared]...) - key = append(key, unsharedKey...) + key = make([]byte, int(nShared)+len(unsharedKey)) + copy(key[:nShared], prevKey[:nShared]) + copy(key[nShared:], unsharedKey) } if items != 0 && bytes.Compare(prevKey, key) >= 0 { - return nil, fmt.Errorf("trienode paths are out of order, prev: %v, cur: %v", prevKey, key) + return fmt.Errorf("trienode paths are out of order, prev: %v, cur: %v", prevKey, key) } prevKey = key - // Resolve value - if onValue != nil { - if err := onValue(key, valOff, valOff+int(nValue)); err != nil { - return nil, err - } + valEnd := valOff + int(nValue) + abort, err := onValue(key, valOff, valEnd) + if err != nil { + return err } - valOff += int(nValue) - + if abort { + return nil + } + valOff = valEnd items++ - keys = append(keys, string(key)) } - if keyOff != keyLimit { - return nil, fmt.Errorf("excessive key data after decoding, offset: %d, size: %d", keyOff, keyLimit) + if keyOff != len(keyData) { + return fmt.Errorf("excessive key data after decoding, offset: %d, size: %d", keyOff, len(keyData)) } - return keys, nil + return nil +} + +// onValue is the callback function being invoked for each resolved entry. The +// start and limit are the offsets within this trie chunk, the base value +// offset of the trie chunk itself should be added by the caller itself. +func decodeSingle(keySection []byte, onValue func([]byte, int, int) error) error { + keyOffsets, valOffsets, keyLimit, err := decodeRestartTrailer(keySection) + if err != nil { + return err + } + for i := 0; i < len(keyOffsets); i++ { + var keyData []byte + if i == len(keyOffsets)-1 { + keyData = keySection[keyOffsets[i]:keyLimit] + } else { + keyData = keySection[keyOffsets[i]:keyOffsets[i+1]] + } + err := decodeRestartSection(keyData, func(key []byte, start int, limit int) (bool, error) { + valStart := int(valOffsets[i]) + start + valLimit := int(valOffsets[i]) + limit + + // Possible in tests + if onValue == nil { + return false, nil + } + if err := onValue(key, valStart, valLimit); err != nil { + return false, err + } + return false, nil // abort=false + }) + if err != nil { + return err + } + } + return nil } func decodeSingleWithValue(keySection []byte, valueSection []byte) ([]string, map[string][]byte, error) { var ( - offset int - nodes = make(map[string][]byte) + offset int + estimated = len(keySection) / 8 + nodes = make(map[string][]byte, estimated) + paths = make([]string, 0, estimated) ) - paths, err := decodeSingle(keySection, func(key []byte, start int, limit int) error { + err := decodeSingle(keySection, func(key []byte, start int, limit int) error { if start != offset { return fmt.Errorf("gapped value section offset: %d, want: %d", start, offset) } @@ -481,7 +539,9 @@ func decodeSingleWithValue(keySection []byte, valueSection []byte) ([]string, ma if start > len(valueSection) || limit > len(valueSection) { return fmt.Errorf("value section out of range: start: %d, limit: %d, size: %d", start, limit, len(valueSection)) } - nodes[string(key)] = valueSection[start:limit] + strkey := string(key) + paths = append(paths, strkey) + nodes[strkey] = valueSection[start:limit] offset = limit return nil @@ -507,7 +567,8 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection h.nodes = make(map[common.Hash]map[string][]byte) for i := range len(owners) { - // Resolve the boundary of key section + // Resolve the boundary of the key section, each offset referring + // to the end position of this trie chunk. var keyStart, keyLimit uint32 if i != 0 { keyStart = keyOffsets[i-1] @@ -517,7 +578,8 @@ func (h *trienodeHistory) decode(header []byte, keySection []byte, valueSection return fmt.Errorf("invalid key offsets: keyStart: %d, keyLimit: %d, size: %d", keyStart, keyLimit, len(keySection)) } - // Resolve the boundary of value section + // Resolve the boundary of the value section, each offset referring + // to the end position of this trie chunk. var valStart, valLimit uint32 if i != 0 { valStart = valueOffsets[i-1] @@ -547,133 +609,175 @@ func (ir iRange) len() uint32 { return ir.limit - ir.start } -// singleTrienodeHistoryReader provides read access to a single trie within the -// trienode history. It stores an offset to the trie's position in the history, -// along with a set of per-node offsets that can be resolved on demand. type singleTrienodeHistoryReader struct { - id uint64 - reader ethdb.AncientReader - valueRange iRange // value range within the global value section - valueInternalOffsets map[string]iRange // value offset within the single trie data + id uint64 + reader ethdb.AncientReader + keyData []byte + valueRange iRange } -// TODO(rjl493456442): This function performs a large number of allocations. -// Given the large data size, a byte pool could be used to mitigate this. func newSingleTrienodeHistoryReader(id uint64, reader ethdb.AncientReader, keyRange iRange, valueRange iRange) (*singleTrienodeHistoryReader, error) { - // TODO(rjl493456442) the data size is known in advance, allocating the - // dedicated byte slices from the pool. keyData, err := rawdb.ReadTrienodeHistoryKeySection(reader, id, uint64(keyRange.start), uint64(keyRange.len())) if err != nil { return nil, err } - valueOffsets := make(map[string]iRange) - _, err = decodeSingle(keyData, func(key []byte, start int, limit int) error { - valueOffsets[string(key)] = iRange{ - start: uint32(start), - limit: uint32(limit), - } - return nil - }) - if err != nil { - return nil, err - } return &singleTrienodeHistoryReader{ - id: id, - reader: reader, - valueRange: valueRange, - valueInternalOffsets: valueOffsets, + id: id, + reader: reader, + keyData: keyData, + valueRange: valueRange, }, nil } -// read retrieves the trie node data with the provided node path. -func (sr *singleTrienodeHistoryReader) read(path string) ([]byte, error) { - offset, exists := sr.valueInternalOffsets[path] - if !exists { - return nil, fmt.Errorf("trienode %v not found", []byte(path)) +// searchSingle searches for a specific trie node identified by the provided +// key within a single trie node chunk. +// +// It returns the node value's offset range (start and limit) within the +// trie node data. An error is returned if the node cannot be found. +func (sr *singleTrienodeHistoryReader) searchSingle(key []byte) (int, int, bool, error) { + keyOffsets, valOffsets, keyLimit, err := decodeRestartTrailer(sr.keyData) + if err != nil { + return 0, 0, false, err } - return rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id, uint64(sr.valueRange.start+offset.start), uint64(offset.len())) + // Binary search against the boundary keys for each restart section + var ( + boundFind bool + boundValueLen uint64 + ) + pos := sort.Search(len(keyOffsets), func(i int) bool { + _, nValue, dkey, _, derr := decodeKeyEntry(sr.keyData[keyOffsets[i]:], 0) + if derr != nil { + err = derr + return false + } + n := bytes.Compare(key, dkey) + if n == 0 { + boundFind = true + boundValueLen = nValue + } + return n <= 0 + }) + if err != nil { + return 0, 0, false, err + } + // The node is found as the boundary of restart section + if boundFind { + start := valOffsets[pos] + limit := valOffsets[pos] + uint32(boundValueLen) + return int(start), int(limit), true, nil + } + // The node is not found as all others have larger key than the target + if pos == 0 { + return 0, 0, false, nil + } + // Search the target node within the restart section + var keyData []byte + if pos == len(keyOffsets) { + keyData = sr.keyData[keyOffsets[pos-1]:keyLimit] // last section + } else { + keyData = sr.keyData[keyOffsets[pos-1]:keyOffsets[pos]] // non-last section + } + var ( + nStart int + nLimit int + found bool + ) + err = decodeRestartSection(keyData, func(ikey []byte, start, limit int) (bool, error) { + if bytes.Equal(key, ikey) { + nStart = int(valOffsets[pos-1]) + start + nLimit = int(valOffsets[pos-1]) + limit + found = true + return true, nil // abort = true + } + return false, nil // abort = false + }) + if err != nil { + return 0, 0, false, err + } + if !found { + return 0, 0, false, nil + } + return nStart, nLimit, true, nil +} + +// read retrieves the trie node data with the provided node path. +func (sr *singleTrienodeHistoryReader) read(key []byte) ([]byte, bool, error) { + start, limit, found, err := sr.searchSingle(key) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + valStart := uint64(start) + uint64(sr.valueRange.start) + valLen := uint64(limit - start) + value, err := rawdb.ReadTrienodeHistoryValueSection(sr.reader, sr.id, valStart, valLen) + if err != nil { + return nil, false, err + } + return value, true, nil } // trienodeHistoryReader provides read access to node data in the trie node history. // It resolves data from the underlying ancient store only when needed, minimizing // I/O overhead. type trienodeHistoryReader struct { - id uint64 // ID of the associated trienode history - reader ethdb.AncientReader // Database reader of ancient store - keyRanges map[common.Hash]iRange // Key ranges identifying trie chunks - valRanges map[common.Hash]iRange // Value ranges identifying trie chunks - iReaders map[common.Hash]*singleTrienodeHistoryReader // readers for each individual trie chunk + id uint64 // ID of the associated trienode history + reader ethdb.AncientReader // Database reader of ancient store } // newTrienodeHistoryReader constructs the reader for specific trienode history. -func newTrienodeHistoryReader(id uint64, reader ethdb.AncientReader) (*trienodeHistoryReader, error) { - r := &trienodeHistoryReader{ - id: id, - reader: reader, - keyRanges: make(map[common.Hash]iRange), - valRanges: make(map[common.Hash]iRange), - iReaders: make(map[common.Hash]*singleTrienodeHistoryReader), +func newTrienodeHistoryReader(id uint64, reader ethdb.AncientReader) *trienodeHistoryReader { + return &trienodeHistoryReader{ + id: id, + reader: reader, } - if err := r.decodeHeader(); err != nil { - return nil, err - } - return r, nil } // decodeHeader decodes the header section of trienode history. -func (r *trienodeHistoryReader) decodeHeader() error { +func (r *trienodeHistoryReader) decodeHeader(owner common.Hash) (iRange, iRange, bool, error) { header, err := rawdb.ReadTrienodeHistoryHeader(r.reader, r.id) if err != nil { - return err + return iRange{}, iRange{}, false, err } _, owners, keyOffsets, valOffsets, err := decodeHeader(header) if err != nil { - return err + return iRange{}, iRange{}, false, err } - for i, owner := range owners { - // Decode the key range for this trie chunk - var keyStart uint32 - if i != 0 { - keyStart = keyOffsets[i-1] - } - r.keyRanges[owner] = iRange{ - start: keyStart, - limit: keyOffsets[i], - } + pos := sort.Search(len(owners), func(i int) bool { + return owner.Cmp(owners[i]) <= 0 + }) + if pos == len(owners) || owners[pos] != owner { + return iRange{}, iRange{}, false, nil + } + var keyRange iRange + if pos != 0 { + keyRange.start = keyOffsets[pos-1] + } + keyRange.limit = keyOffsets[pos] - // Decode the value range for this trie chunk - var valStart uint32 - if i != 0 { - valStart = valOffsets[i-1] - } - r.valRanges[owner] = iRange{ - start: valStart, - limit: valOffsets[i], - } + var valRange iRange + if pos != 0 { + valRange.start = valOffsets[pos-1] } - return nil + valRange.limit = valOffsets[pos] + return keyRange, valRange, true, nil } // read retrieves the trie node data with the provided TrieID and node path. -func (r *trienodeHistoryReader) read(owner common.Hash, path string) ([]byte, error) { - ir, ok := r.iReaders[owner] - if !ok { - keyRange, exists := r.keyRanges[owner] - if !exists { - return nil, fmt.Errorf("trie %x is unknown", owner) - } - valRange, exists := r.valRanges[owner] - if !exists { - return nil, fmt.Errorf("trie %x is unknown", owner) - } - var err error - ir, err = newSingleTrienodeHistoryReader(r.id, r.reader, keyRange, valRange) - if err != nil { - return nil, err - } - r.iReaders[owner] = ir +func (r *trienodeHistoryReader) read(owner common.Hash, path string) ([]byte, bool, error) { + keyRange, valRange, found, err := r.decodeHeader(owner) + if err != nil { + return nil, false, err } - return ir.read(path) + if !found { + return nil, false, nil + } + ir, err := newSingleTrienodeHistoryReader(r.id, r.reader, keyRange, valRange) + if err != nil { + return nil, false, err + } + return ir.read([]byte(path)) } // writeTrienodeHistory persists the trienode history associated with the given diff layer. @@ -707,7 +811,6 @@ func writeTrienodeHistory(writer ethdb.AncientWriter, dl *diffLayer, rate uint32 } // readTrienodeMetadata resolves the metadata of the specified trienode history. -// nolint:unused func readTrienodeMetadata(reader ethdb.AncientReader, id uint64) (*trienodeMetadata, error) { header, err := rawdb.ReadTrienodeHistoryHeader(reader, id) if err != nil { diff --git a/triedb/pathdb/history_trienode_test.go b/triedb/pathdb/history_trienode_test.go index 8f9b9c2600..05278c30b1 100644 --- a/triedb/pathdb/history_trienode_test.go +++ b/triedb/pathdb/history_trienode_test.go @@ -19,6 +19,7 @@ package pathdb import ( "bytes" "encoding/binary" + "fmt" "math/rand" "reflect" "testing" @@ -137,14 +138,11 @@ func TestTrienodeHistoryReader(t *testing.T) { } } for i, h := range hs { - tr, err := newTrienodeHistoryReader(uint64(i+1), freezer) - if err != nil { - t.Fatalf("Failed to construct the history reader: %v", err) - } + tr := newTrienodeHistoryReader(uint64(i+1), freezer) for _, owner := range h.owners { nodes := h.nodes[owner] for key, value := range nodes { - blob, err := tr.read(owner, key) + blob, _, err := tr.read(owner, key) if err != nil { t.Fatalf("Failed to read trienode history: %v", err) } @@ -417,23 +415,23 @@ func TestTrienodeHistoryReaderNonExistentPath(t *testing.T) { if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { t.Fatalf("Failed to write trienode history: %v", err) } - - tr, err := newTrienodeHistoryReader(1, freezer) - if err != nil { - t.Fatalf("Failed to construct history reader: %v", err) - } + tr := newTrienodeHistoryReader(1, freezer) // Try to read a non-existent path - _, err = tr.read(testrand.Hash(), "nonexistent") - if err == nil { - t.Fatal("Expected error for non-existent trie owner") + var ( + err error + found bool + ) + _, found, err = tr.read(testrand.Hash(), "nonexistent") + if found || err != nil { + t.Fatal("Expected not found for non-existent trie owner") } // Try to read from existing owner but non-existent path owner := h.owners[0] - _, err = tr.read(owner, "nonexistent-path") - if err == nil { - t.Fatal("Expected error for non-existent path") + _, found, err = tr.read(owner, "nonexistent-path") + if found || err != nil { + t.Fatal("Expected not found for non-existent path") } } @@ -457,23 +455,19 @@ func TestTrienodeHistoryReaderNilValues(t *testing.T) { if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { t.Fatalf("Failed to write trienode history: %v", err) } - - tr, err := newTrienodeHistoryReader(1, freezer) - if err != nil { - t.Fatalf("Failed to construct history reader: %v", err) - } + tr := newTrienodeHistoryReader(1, freezer) // Test reading nil values - data1, err := tr.read(owner, "nil1") - if err != nil { + data1, found, err := tr.read(owner, "nil1") + if err != nil || !found { t.Fatalf("Failed to read nil value: %v", err) } if len(data1) != 0 { t.Fatal("Expected nil data for nil value") } - data2, err := tr.read(owner, "nil2") - if err != nil { + data2, found, err := tr.read(owner, "nil2") + if err != nil || !found { t.Fatalf("Failed to read nil value: %v", err) } if len(data2) != 0 { @@ -481,8 +475,8 @@ func TestTrienodeHistoryReaderNilValues(t *testing.T) { } // Test reading non-nil value - data3, err := tr.read(owner, "data1") - if err != nil { + data3, found, err := tr.read(owner, "data1") + if err != nil || !found { t.Fatalf("Failed to read non-nil value: %v", err) } if !bytes.Equal(data3, []byte("some data")) { @@ -498,7 +492,7 @@ func TestTrienodeHistoryReaderNilKey(t *testing.T) { // Add some nil values nodes[owner][""] = []byte("some data") - nodes[owner]["data1"] = []byte("some data") + nodes[owner]["data1"] = []byte("some data1") h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) @@ -509,14 +503,10 @@ func TestTrienodeHistoryReaderNilKey(t *testing.T) { if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { t.Fatalf("Failed to write trienode history: %v", err) } - - tr, err := newTrienodeHistoryReader(1, freezer) - if err != nil { - t.Fatalf("Failed to construct history reader: %v", err) - } + tr := newTrienodeHistoryReader(1, freezer) // Test reading nil values - data1, err := tr.read(owner, "") + data1, _, err := tr.read(owner, "") if err != nil { t.Fatalf("Failed to read nil value: %v", err) } @@ -525,11 +515,11 @@ func TestTrienodeHistoryReaderNilKey(t *testing.T) { } // Test reading non-nil value - data2, err := tr.read(owner, "data1") + data2, _, err := tr.read(owner, "data1") if err != nil { t.Fatalf("Failed to read non-nil value: %v", err) } - if !bytes.Equal(data2, []byte("some data")) { + if !bytes.Equal(data2, []byte("some data1")) { t.Fatal("Data mismatch for non-nil key") } } @@ -632,14 +622,14 @@ func TestDecodeSingleCorruptedData(t *testing.T) { _, keySection, _, _ := h.encode() // Test with empty key section - _, err := decodeSingle([]byte{}, nil) + err := decodeSingle([]byte{}, nil) if err == nil { t.Fatal("Expected error for empty key section") } // Test with key section too small for trailer if len(keySection) > 0 { - _, err := decodeSingle(keySection[:3], nil) // Less than 4 bytes for trailer + err := decodeSingle(keySection[:3], nil) // Less than 4 bytes for trailer if err == nil { t.Fatal("Expected error for key section too small for trailer") } @@ -652,7 +642,7 @@ func TestDecodeSingleCorruptedData(t *testing.T) { for i := range 10 { corrupted[i] = 0xFF } - _, err = decodeSingle(corrupted, nil) + err = decodeSingle(corrupted, nil) if err == nil { t.Fatal("Expected error for corrupted varint") } @@ -662,7 +652,7 @@ func TestDecodeSingleCorruptedData(t *testing.T) { copy(corrupted, keySection) // Set restart count to something too large binary.BigEndian.PutUint32(corrupted[len(corrupted)-4:], 10000) - _, err = decodeSingle(corrupted, nil) + err = decodeSingle(corrupted, nil) if err == nil { t.Fatal("Expected error for invalid restart count") } @@ -691,3 +681,57 @@ func testEncodeDecode(t *testing.T, h *trienodeHistory) { t.Fatal("Trienode content mismatch") } } + +func TestSearchSingle(t *testing.T) { + nodes := make(map[common.Hash]map[string][]byte) + ownerA, ownerB := testrand.Hash(), testrand.Hash() + nodes[ownerA] = make(map[string][]byte) + nodes[ownerB] = make(map[string][]byte) + + for i := 0; i < trienodeDataBlockRestartLen*2; i++ { + nodes[ownerA][fmt.Sprintf("%d", 2*i+1)] = testrand.Bytes(rand.Intn(5)) + nodes[ownerB][fmt.Sprintf("%d", 2*i+1)] = testrand.Bytes(rand.Intn(5)) + } + h := newTrienodeHistory(common.Hash{}, common.Hash{}, 1, nodes) + + var freezer, _ = rawdb.NewTrienodeFreezer(t.TempDir(), false, false) + defer freezer.Close() + + header, keySection, valueSection, _ := h.encode() + if err := rawdb.WriteTrienodeHistory(freezer, 1, header, keySection, valueSection); err != nil { + t.Fatalf("Failed to write trienode history: %v", err) + } + tr := newTrienodeHistoryReader(1, freezer) + + // Test reading non-existent entry + keys := []string{ + "0", + "2", + "30", + "32", + "64", + "1000", + } + for _, key := range keys { + _, found, err := tr.read(ownerA, key) + if err != nil || found { + t.Fatalf("Expected non-existent entry %v", err) + } + _, found, err = tr.read(ownerB, key) + if err != nil || found { + t.Fatalf("Expected non-existent entry %v", err) + } + } + + for owner, subnodes := range nodes { + for key, value := range subnodes { + got, found, err := tr.read(owner, key) + if err != nil || !found { + t.Fatal("Failed to read trienode") + } + if bytes.Compare(got, value) != 0 { + t.Fatalf("Unexpected value for key %v, got %v, expected %v", []byte(key), got, value) + } + } + } +} From 56be36f67291a7baea2fe52331e86201bfff1304 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 28 Jan 2026 02:50:15 +0100 Subject: [PATCH 264/277] cmd/keeper: export getInput in wasm builds (#33686) This is a tweak to the wasm build, that expects the `geth_io` namespace to expect a `geth_io` module, providing a `len` and `read` methods. This will be provided by the WASM interface in sp1. This forces an API change on the OpenVM side, but the interface on their side is still being designed, so we should proceed with this change, and we'll make a different tag for OpenVM if this can't work for them. Co-authored-by: wakabat --- cmd/keeper/getpayload_wasm.go | 36 +++++++++++++++++++++++++++++++++++ cmd/keeper/stubs.go | 3 ++- 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 cmd/keeper/getpayload_wasm.go diff --git a/cmd/keeper/getpayload_wasm.go b/cmd/keeper/getpayload_wasm.go new file mode 100644 index 0000000000..b912678825 --- /dev/null +++ b/cmd/keeper/getpayload_wasm.go @@ -0,0 +1,36 @@ +// Copyright 2026 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build wasm +// +build wasm + +package main + +import ( + "unsafe" +) + +//go:wasmimport geth_io len +func hintLen() uint32 + +//go:wasmimport geth_io read +func hintRead(data unsafe.Pointer) + +func getInput() []byte { + data := make([]byte, hintLen()) + hintRead(unsafe.Pointer(&data[0])) + return data +} diff --git a/cmd/keeper/stubs.go b/cmd/keeper/stubs.go index 04a3bc735b..407a21a145 100644 --- a/cmd/keeper/stubs.go +++ b/cmd/keeper/stubs.go @@ -14,7 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build !example && !ziren +//go:build !example && !ziren && !wasm +// +build !example,!ziren,!wasm package main From 344d01e2be231e328935c4c215e5c54c1c9e3761 Mon Sep 17 00:00:00 2001 From: Lessa <230214854+adblesss@users.noreply.github.com> Date: Tue, 27 Jan 2026 20:51:15 -0500 Subject: [PATCH 265/277] core/rawdb: preallocate slice in iterateTransactions (#33690) Preallocate hashes slice with known length instead of using append in a loop. This avoids multiple reallocations during transaction indexing. --- core/rawdb/chain_iterator.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 713c3d8ae2..f846d4d16c 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -153,9 +153,9 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool err: err, } } else { - var hashes []common.Hash - for _, tx := range body.Transactions { - hashes = append(hashes, tx.Hash()) + hashes := make([]common.Hash, len(body.Transactions)) + for i, tx := range body.Transactions { + hashes[i] = tx.Hash() } result = &blockTxHashes{ hashes: hashes, From 3d05284928959220a993e640c07b3d01f59fafd4 Mon Sep 17 00:00:00 2001 From: Ng Wei Han <47109095+weiihann@users.noreply.github.com> Date: Wed, 28 Jan 2026 18:51:02 +0800 Subject: [PATCH 266/277] trie/bintrie: fix tree key hashing to match spec (#33694) Based on [EIP-7864](https://eips.ethereum.org/EIPS/eip-7864), the tree index should be 32 bytes instead of 31 bytes. ``` def get_tree_key(address: Address32, tree_index: int, sub_index: int): # Assumes STEM_SUBTREE_WIDTH = 256 return tree_hash(address + tree_index.to_bytes(32, "little"))[:31] + bytes( [sub_index] ) ``` --- core/genesis_test.go | 2 +- trie/bintrie/key_encoding.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/genesis_test.go b/core/genesis_test.go index 821c71feb9..ba00581b06 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -308,7 +308,7 @@ func TestVerkleGenesisCommit(t *testing.T) { }, } - expected := common.FromHex("19056b480530799a4fdaa9fd9407043b965a3a5c37b4d2a1a9a4f3395a327561") + expected := common.FromHex("b94812c1674dcf4f2bc98f4503d15f4cc674265135bcf3be6e4417b60881042a") got := genesis.ToBlock().Root().Bytes() if !bytes.Equal(got, expected) { t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go index 5a93fcde9a..94a22d52d0 100644 --- a/trie/bintrie/key_encoding.go +++ b/trie/bintrie/key_encoding.go @@ -51,6 +51,7 @@ func GetBinaryTreeKey(addr common.Address, key []byte) []byte { hasher.Write(zeroHash[:12]) hasher.Write(addr[:]) hasher.Write(key[:31]) + hasher.Write([]byte{0}) k := hasher.Sum(nil) k[31] = key[31] return k From 0a8fd6841c826a4ea448f8ac52f4b694a9aa2efc Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 28 Jan 2026 13:32:27 +0100 Subject: [PATCH 267/277] eth/tracers/native: add index to callTracer log (#33629) closes https://github.com/ethereum/go-ethereum/issues/33566 --- .../internal/tracetest/calltrace_test.go | 3 +- .../call_tracer_withLog/calldata.json | 8 +- .../call_tracer_withLog/delegatecall.json | 7 +- .../call_tracer_withLog/multi_contracts.json | 34 +++++- .../call_tracer_withLog/multilogs.json | 108 +++++++++++++----- .../testdata/call_tracer_withLog/notopic.json | 4 +- .../testdata/call_tracer_withLog/simple.json | 3 +- eth/tracers/native/call.go | 2 + 8 files changed, 132 insertions(+), 37 deletions(-) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index ba0706c598..08bdafd91f 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -43,6 +43,7 @@ type callLog struct { Address common.Address `json:"address"` Topics []common.Hash `json:"topics"` Data hexutil.Bytes `json:"data"` + Index hexutil.Uint `json:"index"` Position hexutil.Uint `json:"position"` } @@ -300,7 +301,7 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: fmt.Sprintf(`{"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, originHex), + want: fmt.Sprintf(`{"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","index":"0x0","position":"0x0"}],"value":"0x0","type":"CALL"}`, originHex), }, { // Leads to OOM on the prestate tracer diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json index 30991edafb..8542524703 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json @@ -94,7 +94,8 @@ "0xe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda" ], "data": "0x0000000000000000000000004f5777744b500616697cb655dcb02ee6cd51deb5be96016bb57376da7a6d296e0a405ee1501778227dfa604df0a81cb1ae018598", - "position": "0x0" + "position": "0x0", + "index": "0x0" }, { "address": "0x200edd17f30485a8735878661960cd7a9a95733f", @@ -102,7 +103,8 @@ "0xacbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000000", - "position": "0x0" + "position": "0x0", + "index": "0x1" } ], "value": "0x8ac7230489e80000", @@ -112,4 +114,4 @@ "value": "0x8ac7230489e80000", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index cdb0dda5f8..8e991f2ba2 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -256,6 +256,7 @@ "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5" ], "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "index": "0x0", "position": "0x0" } ], @@ -278,6 +279,7 @@ "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" ], "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "index": "0x1", "position": "0x0" } ], @@ -308,6 +310,7 @@ "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" ], "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "index": "0x2", "position": "0x0" } ], @@ -330,6 +333,7 @@ "0x000000000000000000000000950ca4a06c78934a148b7a3ff3ea8fc366f77a06" ], "data": "0x0000000000000000000000000000000000000000000000000041f50e27d56848", + "index": "0x3", "position": "0x0" } ], @@ -394,6 +398,7 @@ "0x0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add" ], "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000", + "index": "0x4", "position": "0x0" } ], @@ -408,4 +413,4 @@ "value": "0x0", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json index 43d6be2be9..32a267fed0 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json @@ -356,6 +356,7 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "index": "0x0", "position": "0x0" } ], @@ -370,6 +371,7 @@ "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" ], "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93", + "index": "0x1", "position": "0x1" } ], @@ -492,6 +494,7 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa76", + "index": "0x2", "position": "0x0" } ], @@ -506,6 +509,7 @@ "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" ], "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76", + "index": "0x3", "position": "0x1" } ], @@ -695,6 +699,7 @@ "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "index": "0x5", "position": "0x0" } ], @@ -878,6 +883,7 @@ "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" ], "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccc", + "index": "0x7", "position": "0x0" } ], @@ -897,6 +903,7 @@ "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "index": "0x8", "position": "0x1" } ], @@ -920,6 +927,7 @@ "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" ], "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "index": "0x9", "position": "0x1" } ], @@ -946,6 +954,7 @@ "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "index": "0xa", "position": "0x0" } ], @@ -960,6 +969,7 @@ "0x07cf7e805770612a8b2ee8e0bcbba8aa908df5f85fbc4f9e2ef384cf75315038" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000000", + "index": "0x4", "position": "0x6" }, { @@ -968,6 +978,7 @@ "0x7027eecbd2a688fc1fa281702b311ed7168571514adfd17014a55d828cb43382" ], "data": "0x000000000000000000000000000000000000000000000004563918244f400000", + "index": "0x6", "position": "0x8" } ], @@ -1045,6 +1056,7 @@ "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000063", + "index": "0xb", "position": "0x0" } ], @@ -1173,6 +1185,7 @@ "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000064", + "index": "0xe", "position": "0x0" } ], @@ -1187,6 +1200,7 @@ "0x4b0bc4f25f8d0b92d2e12b686ba96cd75e4e69325e6cf7b1f3119d14eaf2cbdf" ], "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "index": "0xc", "position": "0x6" }, { @@ -1195,6 +1209,7 @@ "0xf340c079d598119636d42046c6a2d2faf7a68c04aecee516f0e0b8a9e79b8666" ], "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e9652600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000", + "index": "0xd", "position": "0x9" } ], @@ -1245,6 +1260,7 @@ "0x0000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "index": "0xf", "position": "0x0" } ], @@ -1339,6 +1355,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000001" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x10", "position": "0x2" } ], @@ -1433,6 +1450,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000002" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x11", "position": "0x2" } ], @@ -1527,6 +1545,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000003" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x12", "position": "0x2" } ], @@ -1621,6 +1640,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000004" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x13", "position": "0x2" } ], @@ -1715,6 +1735,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000005" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x14", "position": "0x2" } ], @@ -1809,6 +1830,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000006" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x15", "position": "0x2" } ], @@ -1903,6 +1925,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000007" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x16", "position": "0x2" } ], @@ -1997,6 +2020,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000008" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x17", "position": "0x2" } ], @@ -2091,6 +2115,7 @@ "0x0000000000000000000000000000000000000000000000000000000000000009" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x18", "position": "0x2" } ], @@ -2185,6 +2210,7 @@ "0x000000000000000000000000000000000000000000000000000000000000000a" ], "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000", + "index": "0x19", "position": "0x2" } ], @@ -2238,6 +2264,7 @@ "0x0000000000000000000000007ccbc69292c7a6d7b538c91f3b283de97906cf30" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1a", "position": "0x0" } ], @@ -2260,6 +2287,7 @@ "0x0000000000000000000000001b9ec8ba24630b75a7a958153ffff56dd6d4b6a2" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1c", "position": "0x0" } ], @@ -2282,6 +2310,7 @@ "0x000000000000000000000000c3a2c744ad1f5253c736875b93bacce5b01b060b" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1e", "position": "0x0" } ], @@ -2296,6 +2325,7 @@ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1b", "position": "0x2" }, { @@ -2304,6 +2334,7 @@ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1d", "position": "0x3" }, { @@ -2312,6 +2343,7 @@ "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" ], "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "index": "0x1f", "position": "0x4" } ], @@ -2322,4 +2354,4 @@ "value": "0x0", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json index 3434dd3103..721e300774 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json @@ -177,6 +177,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x0", "position": "0x0" }, { @@ -185,6 +186,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x1", "position": "0x0" }, { @@ -193,6 +195,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebeb0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x2", "position": "0x0" }, { @@ -201,6 +204,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8888880000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x3", "position": "0x0" }, { @@ -209,6 +213,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b30000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x4", "position": "0x0" }, { @@ -217,6 +222,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x5", "position": "0x0" }, { @@ -225,6 +231,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x6", "position": "0x0" }, { @@ -233,6 +240,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e30000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x7", "position": "0x0" }, { @@ -241,6 +249,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3e0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x8", "position": "0x0" }, { @@ -249,6 +258,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x9", "position": "0x0" }, { @@ -257,6 +267,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xa", "position": "0x0" }, { @@ -265,6 +276,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xb", "position": "0x0" }, { @@ -273,6 +285,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdb0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xc", "position": "0x0" }, { @@ -281,6 +294,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xd", "position": "0x0" }, { @@ -289,6 +303,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f40000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xe", "position": "0x0" }, { @@ -297,6 +312,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0xf", "position": "0x0" }, { @@ -305,6 +321,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x10", "position": "0x0" }, { @@ -313,6 +330,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x11", "position": "0x0" }, { @@ -321,6 +339,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x12", "position": "0x0" }, { @@ -329,6 +348,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x13", "position": "0x0" }, { @@ -337,6 +357,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x14", "position": "0x0" }, { @@ -345,6 +366,7 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b00000000000000000000000000000000000000000000000000011c37937e08000", + "index": "0x15", "position": "0x0" }, { @@ -353,7 +375,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x16", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -361,7 +384,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a00000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x17", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -369,7 +393,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5b0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x18", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -377,7 +402,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x19", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -385,7 +411,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1a", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -393,7 +420,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1b", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -401,7 +429,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a90000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1c", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -409,7 +438,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b90000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1d", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -417,7 +447,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1e", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -425,7 +456,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x1f", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -433,7 +465,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x20", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -441,7 +474,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x21", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -449,7 +483,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6363630000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x22", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -457,7 +492,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x23", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -465,7 +501,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f90000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x24", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -473,7 +510,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x25", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -481,7 +519,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9c0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x26", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -489,7 +528,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f80000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x27", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -497,7 +537,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x28", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -505,7 +546,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x29", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -513,7 +555,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2a", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -521,7 +564,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2b", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -529,7 +573,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2c", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -537,7 +582,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2d", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -545,7 +591,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2e", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -553,7 +600,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e530000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x2f", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -561,7 +609,8 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x30", + "position": "0x0" }, { "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", @@ -569,10 +618,11 @@ "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" ], "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b0000000000000000000000000000000000000000000000000011c37937e08000", - "position": "0x0" + "index": "0x31", + "position": "0x0" } ], "value": "0x3782dace9d90000", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json index 814189dc6b..13f286b7ca 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json @@ -265,6 +265,7 @@ "0xaf30e4d66b2f1f23e63ef4591058a897f67e6867233e33ca3508b982dcc4129b" ], "data": "0x00000000000000000000000050739060a2c32dc076e507ae1a893aab28ecfe68d1b13c1538a940417bf0e73b2498634436753c854c7fb971224d971bd2ae3e8800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000249f011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000355524c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000436a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d0000000000000000000000000000000000000000", + "index": "0x1", "position": "0x4" } ], @@ -277,10 +278,11 @@ "address": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", "topics": [], "data": "0x62616e6b726f6c6c5f6d69736d61746368", + "index": "0x0", "position": "0x2" } ], "value": "0x429d069189e0000", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json index c00f083159..c9de111a2c 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json @@ -74,10 +74,11 @@ "0x000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb" ], "data": "0x0000000000000000000000000000000000000000000000000000000000989680", + "index": "0x0", "position": "0x0" } ], "value": "0x0", "type": "CALL" } -} +} \ No newline at end of file diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index c2247d1ce4..06220da84d 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -42,6 +42,7 @@ type callLog struct { Address common.Address `json:"address"` Topics []common.Hash `json:"topics"` Data hexutil.Bytes `json:"data"` + Index hexutil.Uint `json:"index"` // Position of the log relative to subcalls within the same trace // See https://github.com/ethereum/go-ethereum/pull/28389 for details Position hexutil.Uint `json:"position"` @@ -250,6 +251,7 @@ func (t *callTracer) OnLog(log *types.Log) { Address: log.Address, Topics: log.Topics, Data: log.Data, + Index: hexutil.Uint(log.Index), Position: hexutil.Uint(len(t.callstack[len(t.callstack)-1].Calls)), } t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, l) From 1e9dfd5bb0f9913ee80b5b69e592a87ff64163d2 Mon Sep 17 00:00:00 2001 From: CPerezz <37264926+CPerezz@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:58:41 +0100 Subject: [PATCH 268/277] core: standardize slow block JSON output for cross-client metrics (#33655) Implement standardized JSON format for slow block logging to enable cross-client performance analysis and protocol research. This change is part of the Cross-Client Execution Metrics initiative proposed by Gary Rong: https://hackmd.io/dg7rizTyTXuCf2LSa2LsyQ The standardized metrics enabled data-driven analysis like the EIP-7907 research: https://ethresear.ch/t/data-driven-analysis-on-eip-7907/23850 JSON format includes: - block: number, hash, gas_used, tx_count - timing: execution_ms, total_ms - throughput: mgas_per_sec - state_reads: accounts, storage_slots, bytecodes, code_bytes - state_writes: accounts, storage_slots, bytecodes - cache: account/storage/code hits, misses, hit_rate This should come after merging #33522 --------- Co-authored-by: Gary Rong --- cmd/utils/flags.go | 10 +- core/blockchain.go | 23 ++-- core/blockchain_stats.go | 238 +++++++++++++++++++++++++++------------ core/state/statedb.go | 15 ++- eth/ethconfig/config.go | 7 +- 5 files changed, 198 insertions(+), 95 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 844397b734..91448c520c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -692,7 +692,7 @@ var ( } LogSlowBlockFlag = &cli.DurationFlag{ Name: "debug.logslowblock", - Usage: "Block execution time threshold beyond which detailed statistics will be logged (0 means disable)", + Usage: "Block execution time threshold beyond which detailed statistics will be logged (0 logs all blocks, negative means disable)", Value: ethconfig.Defaults.SlowBlockThreshold, Category: flags.LoggingCategory, } @@ -2351,8 +2351,12 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh // Enable state size tracking if enabled StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name), - // Configure the slow block statistic logger - SlowBlockThreshold: ctx.Duration(LogSlowBlockFlag.Name), + // Configure the slow block statistic logger (disabled by default) + SlowBlockThreshold: ethconfig.Defaults.SlowBlockThreshold, + } + // Only enable slow block logging if the flag was explicitly set + if ctx.IsSet(LogSlowBlockFlag.Name) { + options.SlowBlockThreshold = ctx.Duration(LogSlowBlockFlag.Name) } if options.ArchiveMode && !options.Preimages { options.Preimages = true diff --git a/core/blockchain.go b/core/blockchain.go index db3f71c44d..6f1db96463 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -214,7 +214,8 @@ type BlockChainConfig struct { StateSizeTracking bool // SlowBlockThreshold is the block execution time threshold beyond which - // detailed statistics will be logged. + // detailed statistics will be logged. Negative value means disabled (default), + // zero logs all blocks, positive value filters blocks by execution time. SlowBlockThreshold time.Duration } @@ -2106,24 +2107,11 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Upload the statistics of reader at the end defer func() { - pStat := prefetch.GetStats() - accountCacheHitPrefetchMeter.Mark(pStat.AccountCacheHit) - accountCacheMissPrefetchMeter.Mark(pStat.AccountCacheMiss) - storageCacheHitPrefetchMeter.Mark(pStat.StorageCacheHit) - storageCacheMissPrefetchMeter.Mark(pStat.StorageCacheMiss) - - rStat := process.GetStats() - accountCacheHitMeter.Mark(rStat.AccountCacheHit) - accountCacheMissMeter.Mark(rStat.AccountCacheMiss) - storageCacheHitMeter.Mark(rStat.StorageCacheHit) - storageCacheMissMeter.Mark(rStat.StorageCacheMiss) - if result != nil { - result.stats.StatePrefetchCacheStats = pStat - result.stats.StateReadCacheStats = rStat + result.stats.StatePrefetchCacheStats = prefetch.GetStats() + result.stats.StateReadCacheStats = process.GetStats() } }() - go func(start time.Time, throwaway *state.StateDB, block *types.Block) { // Disable tracing for prefetcher executions. vmCfg := bc.cfg.VmConfig @@ -2238,8 +2226,11 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s stats.StorageLoaded = statedb.StorageLoaded stats.StorageUpdated = int(statedb.StorageUpdated.Load()) stats.StorageDeleted = int(statedb.StorageDeleted.Load()) + stats.CodeLoaded = statedb.CodeLoaded stats.CodeLoadBytes = statedb.CodeLoadBytes + stats.CodeUpdated = statedb.CodeUpdated + stats.CodeUpdateBytes = statedb.CodeUpdateBytes stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation diff --git a/core/blockchain_stats.go b/core/blockchain_stats.go index e8c5860294..adc66266c4 100644 --- a/core/blockchain_stats.go +++ b/core/blockchain_stats.go @@ -17,8 +17,7 @@ package core import ( - "fmt" - "strings" + "encoding/json" "time" "github.com/ethereum/go-ethereum/common" @@ -39,14 +38,16 @@ type ExecuteStats struct { StorageCommits time.Duration // Time spent on the storage trie commit CodeReads time.Duration // Time spent on the contract code read - AccountLoaded int // Number of accounts loaded - AccountUpdated int // Number of accounts updated - AccountDeleted int // Number of accounts deleted - StorageLoaded int // Number of storage slots loaded - StorageUpdated int // Number of storage slots updated - StorageDeleted int // Number of storage slots deleted - CodeLoaded int // Number of contract code loaded - CodeLoadBytes int // Number of bytes read from contract code + AccountLoaded int // Number of accounts loaded + AccountUpdated int // Number of accounts updated + AccountDeleted int // Number of accounts deleted + StorageLoaded int // Number of storage slots loaded + StorageUpdated int // Number of storage slots updated + StorageDeleted int // Number of storage slots deleted + CodeLoaded int // Number of contract code loaded + CodeLoadBytes int // Number of bytes read from contract code + CodeUpdated int // Number of contract code written (CREATE/CREATE2 + EIP-7702) + CodeUpdateBytes int // Total bytes of code written Execution time.Duration // Time spent on the EVM execution Validation time.Duration // Time spent on the block validation @@ -104,64 +105,161 @@ func (s *ExecuteStats) reportMetrics() { storageCacheMissMeter.Mark(s.StateReadCacheStats.StorageCacheMiss) } -// logSlow prints the detailed execution statistics if the block is regarded as slow. -func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Duration) { - if slowBlockThreshold == 0 { - return - } - if s.TotalTime < slowBlockThreshold { - return - } - msg := fmt.Sprintf(` -########## SLOW BLOCK ######### -Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v - -EVM execution: %v - -Validation: %v - Account hash: %v - Storage hash: %v - -State read: %v - Account read: %v(%d) - Storage read: %v(%d) - Code read: %v(%d %v) - -State write: %v - Trie commit: %v - State write: %v - Block write: %v - -%s -############################## -`, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime), - // EVM execution - common.PrettyDuration(s.Execution), - - // Block validation - common.PrettyDuration(s.Validation+s.CrossValidation+s.AccountHashes+s.AccountUpdates+s.StorageUpdates), - common.PrettyDuration(s.AccountHashes+s.AccountUpdates), - common.PrettyDuration(s.StorageUpdates), - - // State read - common.PrettyDuration(s.AccountReads+s.StorageReads+s.CodeReads), - common.PrettyDuration(s.AccountReads), s.AccountLoaded, - common.PrettyDuration(s.StorageReads), s.StorageLoaded, - common.PrettyDuration(s.CodeReads), s.CodeLoaded, common.StorageSize(s.CodeLoadBytes), - - // State write - common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)+s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite), - common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)), - common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), - common.PrettyDuration(s.BlockWrite), - - // cache statistics - s.StateReadCacheStats, - ) - for _, line := range strings.Split(msg, "\n") { - if line == "" { - continue - } - log.Info(line) - } +// slowBlockLog represents the JSON structure for slow block logging. +// This format is designed for cross-client compatibility with other +// Ethereum execution clients (reth, Besu, Nethermind). +type slowBlockLog struct { + Level string `json:"level"` + Msg string `json:"msg"` + Block slowBlockInfo `json:"block"` + Timing slowBlockTime `json:"timing"` + Throughput slowBlockThru `json:"throughput"` + StateReads slowBlockReads `json:"state_reads"` + StateWrites slowBlockWrites `json:"state_writes"` + Cache slowBlockCache `json:"cache"` +} + +type slowBlockInfo struct { + Number uint64 `json:"number"` + Hash common.Hash `json:"hash"` + GasUsed uint64 `json:"gas_used"` + TxCount int `json:"tx_count"` +} + +type slowBlockTime struct { + ExecutionMs float64 `json:"execution_ms"` + StateReadMs float64 `json:"state_read_ms"` + StateHashMs float64 `json:"state_hash_ms"` + CommitMs float64 `json:"commit_ms"` + TotalMs float64 `json:"total_ms"` +} + +type slowBlockThru struct { + MgasPerSec float64 `json:"mgas_per_sec"` +} + +type slowBlockReads struct { + Accounts int `json:"accounts"` + StorageSlots int `json:"storage_slots"` + Code int `json:"code"` + CodeBytes int `json:"code_bytes"` +} + +type slowBlockWrites struct { + Accounts int `json:"accounts"` + AccountsDeleted int `json:"accounts_deleted"` + StorageSlots int `json:"storage_slots"` + StorageSlotsDeleted int `json:"storage_slots_deleted"` + Code int `json:"code"` + CodeBytes int `json:"code_bytes"` +} + +// slowBlockCache represents cache hit/miss statistics for cross-client analysis. +type slowBlockCache struct { + Account slowBlockCacheEntry `json:"account"` + Storage slowBlockCacheEntry `json:"storage"` + Code slowBlockCodeCacheEntry `json:"code"` +} + +// slowBlockCacheEntry represents cache statistics for account/storage caches. +type slowBlockCacheEntry struct { + Hits int64 `json:"hits"` + Misses int64 `json:"misses"` + HitRate float64 `json:"hit_rate"` +} + +// slowBlockCodeCacheEntry represents cache statistics for code cache with byte-level granularity. +type slowBlockCodeCacheEntry struct { + Hits int64 `json:"hits"` + Misses int64 `json:"misses"` + HitRate float64 `json:"hit_rate"` + HitBytes int64 `json:"hit_bytes"` + MissBytes int64 `json:"miss_bytes"` +} + +// calculateHitRate computes the cache hit rate as a percentage (0-100). +func calculateHitRate(hits, misses int64) float64 { + if total := hits + misses; total > 0 { + return float64(hits) / float64(total) * 100.0 + } + return 0.0 +} + +// durationToMs converts a time.Duration to milliseconds as a float64 +// with sub-millisecond precision for accurate cross-client metrics. +func durationToMs(d time.Duration) float64 { + return float64(d.Nanoseconds()) / 1e6 +} + +// logSlow prints the detailed execution statistics in JSON format if the block +// is regarded as slow. The JSON format is designed for cross-client compatibility +// with other Ethereum execution clients. +func (s *ExecuteStats) logSlow(block *types.Block, slowBlockThreshold time.Duration) { + // Negative threshold means disabled (default when flag not set) + if slowBlockThreshold < 0 { + return + } + // Threshold of 0 logs all blocks; positive threshold filters + if slowBlockThreshold > 0 && s.TotalTime < slowBlockThreshold { + return + } + logEntry := slowBlockLog{ + Level: "warn", + Msg: "Slow block", + Block: slowBlockInfo{ + Number: block.NumberU64(), + Hash: block.Hash(), + GasUsed: block.GasUsed(), + TxCount: len(block.Transactions()), + }, + Timing: slowBlockTime{ + ExecutionMs: durationToMs(s.Execution), + StateReadMs: durationToMs(s.AccountReads + s.StorageReads + s.CodeReads), + StateHashMs: durationToMs(s.AccountHashes + s.AccountUpdates + s.StorageUpdates), + CommitMs: durationToMs(max(s.AccountCommits, s.StorageCommits) + s.TrieDBCommit + s.SnapshotCommit + s.BlockWrite), + TotalMs: durationToMs(s.TotalTime), + }, + Throughput: slowBlockThru{ + MgasPerSec: s.MgasPerSecond, + }, + StateReads: slowBlockReads{ + Accounts: s.AccountLoaded, + StorageSlots: s.StorageLoaded, + Code: s.CodeLoaded, + CodeBytes: s.CodeLoadBytes, + }, + StateWrites: slowBlockWrites{ + Accounts: s.AccountUpdated, + AccountsDeleted: s.AccountDeleted, + StorageSlots: s.StorageUpdated, + StorageSlotsDeleted: s.StorageDeleted, + Code: s.CodeUpdated, + CodeBytes: s.CodeUpdateBytes, + }, + Cache: slowBlockCache{ + Account: slowBlockCacheEntry{ + Hits: s.StateReadCacheStats.AccountCacheHit, + Misses: s.StateReadCacheStats.AccountCacheMiss, + HitRate: calculateHitRate(s.StateReadCacheStats.AccountCacheHit, s.StateReadCacheStats.AccountCacheMiss), + }, + Storage: slowBlockCacheEntry{ + Hits: s.StateReadCacheStats.StorageCacheHit, + Misses: s.StateReadCacheStats.StorageCacheMiss, + HitRate: calculateHitRate(s.StateReadCacheStats.StorageCacheHit, s.StateReadCacheStats.StorageCacheMiss), + }, + Code: slowBlockCodeCacheEntry{ + Hits: s.StateReadCacheStats.CodeStats.CacheHit, + Misses: s.StateReadCacheStats.CodeStats.CacheMiss, + HitRate: calculateHitRate(s.StateReadCacheStats.CodeStats.CacheHit, s.StateReadCacheStats.CodeStats.CacheMiss), + HitBytes: s.StateReadCacheStats.CodeStats.CacheHitBytes, + MissBytes: s.StateReadCacheStats.CodeStats.CacheMissBytes, + }, + }, + } + jsonBytes, err := json.Marshal(logEntry) + if err != nil { + log.Error("Failed to marshal slow block log", "error", err) + return + } + log.Warn(string(jsonBytes)) } diff --git a/core/state/statedb.go b/core/state/statedb.go index 610e7173cf..3d329bab64 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -158,13 +158,15 @@ type StateDB struct { StorageLoaded int // Number of storage slots retrieved from the database during the state transition StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition - CodeLoaded int // Number of contract code loaded during the state transition // CodeLoadBytes is the total number of bytes read from contract code. // This value may be smaller than the actual number of bytes read, since // some APIs (e.g. CodeSize) may load the entire code from either the // cache or the database when the size is not available in the cache. - CodeLoadBytes int + CodeLoaded int // Number of contract code loaded during the state transition + CodeLoadBytes int // Total bytes of resolved code + CodeUpdated int // Number of contracts with code changes that persisted + CodeUpdateBytes int // Total bytes of persisted code written } // New creates a new state from a given trie. @@ -941,8 +943,15 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if op.isDelete() { deletedAddrs = append(deletedAddrs, addr) } else { - s.updateStateObject(s.stateObjects[addr]) + obj := s.stateObjects[addr] + s.updateStateObject(obj) s.AccountUpdated += 1 + + // Count code writes post-Finalise so reverted CREATEs are excluded. + if obj.dirtyCode { + s.CodeUpdated += 1 + s.CodeUpdateBytes += len(obj.code) + } } usedAddrs = append(usedAddrs, addr) // Copy needed for closure } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index e58c4b884a..8aa6e4ef09 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -75,7 +75,7 @@ var Defaults = Config{ RPCTxFeeCap: 1, // 1 ether TxSyncDefaultTimeout: 20 * time.Second, TxSyncMaxTimeout: 1 * time.Minute, - SlowBlockThreshold: time.Second * 2, + SlowBlockThreshold: -1, // Disabled by default; set via --debug.logslowblock flag RangeLimit: 0, } @@ -130,8 +130,9 @@ type Config struct { // presence of these blocks for every new peer connection. RequiredBlocks map[uint64]common.Hash `toml:"-"` - // SlowBlockThreshold is the block execution speed threshold (Mgas/s) - // below which detailed statistics are logged. + // SlowBlockThreshold is the block execution time threshold beyond which + // detailed statistics are logged. Negative means disabled (default), zero + // logs all blocks, positive filters by execution time. SlowBlockThreshold time.Duration `toml:",omitempty"` // Database options From 424bc22ab8e5d5f6cefd28cece7b5bdebac838d8 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 28 Jan 2026 20:33:04 +0100 Subject: [PATCH 269/277] eth/gasprice: reduce allocations (#33698) Recent pprof from our validator shows ~6% of all allocations because of the gas price oracle. This PR reduces that. --- eth/gasprice/gasprice.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 4fd3df7428..a922eab675 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/holiman/uint256" ) const sampleNumber = 3 // Number of transactions sampled in a block @@ -257,12 +258,12 @@ func (oracle *Oracle) getBlockValues(ctx context.Context, blockNum uint64, limit sortedTxs := make([]*types.Transaction, len(txs)) copy(sortedTxs, txs) baseFee := block.BaseFee() + baseFee256 := new(uint256.Int) + if baseFee != nil { + baseFee256.SetFromBig(baseFee) + } slices.SortFunc(sortedTxs, func(a, b *types.Transaction) int { - // It's okay to discard the error because a tx would never be - // accepted into a block with an invalid effective tip. - tip1, _ := a.EffectiveGasTip(baseFee) - tip2, _ := b.EffectiveGasTip(baseFee) - return tip1.Cmp(tip2) + return a.EffectiveGasTipCmp(b, baseFee256) }) var prices []*big.Int From 2513feddf804e7916bb87c909374fef79714a93e Mon Sep 17 00:00:00 2001 From: Lessa <230214854+adblesss@users.noreply.github.com> Date: Thu, 29 Jan 2026 02:49:10 -0500 Subject: [PATCH 270/277] crypto/kzg4844: preallocate proof slice in ComputeCellProofs (#33703) Preallocate the proof slice with the known size instead of growing it via append in a loop. The length is already known from the source slice. --- crypto/kzg4844/kzg4844_ckzg_cgo.go | 6 +++--- crypto/kzg4844/kzg4844_gokzg.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crypto/kzg4844/kzg4844_ckzg_cgo.go b/crypto/kzg4844/kzg4844_ckzg_cgo.go index 46509674b6..93d5f4ff94 100644 --- a/crypto/kzg4844/kzg4844_ckzg_cgo.go +++ b/crypto/kzg4844/kzg4844_ckzg_cgo.go @@ -143,9 +143,9 @@ func ckzgComputeCellProofs(blob *Blob) ([]Proof, error) { if err != nil { return []Proof{}, err } - var p []Proof - for _, proof := range proofs { - p = append(p, (Proof)(proof)) + p := make([]Proof, len(proofs)) + for i, proof := range proofs { + p[i] = (Proof)(proof) } return p, nil } diff --git a/crypto/kzg4844/kzg4844_gokzg.go b/crypto/kzg4844/kzg4844_gokzg.go index e9676ff1b8..03627ebafb 100644 --- a/crypto/kzg4844/kzg4844_gokzg.go +++ b/crypto/kzg4844/kzg4844_gokzg.go @@ -108,9 +108,9 @@ func gokzgComputeCellProofs(blob *Blob) ([]Proof, error) { if err != nil { return []Proof{}, err } - var p []Proof - for _, proof := range proofs { - p = append(p, (Proof)(proof)) + p := make([]Proof, len(proofs)) + for i, proof := range proofs { + p[i] = (Proof)(proof) } return p, nil } From 7046e6324414122b70fba00f5389d6899d77c5cb Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 29 Jan 2026 17:22:15 +0800 Subject: [PATCH 271/277] trie: fix flaky test (#33711) --- trie/node_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/trie/node_test.go b/trie/node_test.go index 875f6e38dc..b4f427ca77 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -20,6 +20,7 @@ import ( "bytes" "math/rand" "reflect" + "slices" "testing" "github.com/ethereum/go-ethereum/crypto" @@ -248,7 +249,7 @@ func TestNodeDifference(t *testing.T) { old: nil, new: testrand.Bytes(32), expErr: true, }, { - old: testrand.Bytes(32), new: testrand.Bytes(32), expErr: true, + old: bytes.Repeat([]byte{0x1}, 32), new: bytes.Repeat([]byte{0x2}, 32), expErr: true, }, // Different node type { @@ -276,19 +277,19 @@ func TestNodeDifference(t *testing.T) { }) } - for _, test := range tests { + for i, test := range tests { _, indices, values, err := NodeDifference(test.old, test.new) if test.expErr && err == nil { - t.Fatal("Expect error, got nil") + t.Fatalf("Expect error, got nil %d", i) } if !test.expErr && err != nil { t.Fatalf("Unexpect error, %v", err) } if err == nil { - if !reflect.DeepEqual(indices, test.expIndices) { + if !slices.Equal(indices, test.expIndices) { t.Fatalf("Unexpected indices, want: %v, got: %v", test.expIndices, indices) } - if !reflect.DeepEqual(values, test.expValues) { + if !slices.EqualFunc(values, test.expValues, bytes.Equal) { t.Fatalf("Unexpected values, want: %v, got: %v", test.expValues, values) } } From 628ff79be3c0a4415cf9429bbd52118980915a66 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 29 Jan 2026 17:48:34 +0800 Subject: [PATCH 272/277] ethdb/pebble: disable seek compaction for Pebble (#33697) This PR restores the previous Pebble configuration, disabling seek compaction. This feature is still needed by hash mode archive node, mitigating the overhead of frequent compaction. --- ethdb/pebble/pebble.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 800559ab4b..395daa6cf4 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -300,6 +300,10 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( // debt will be less than 1GB, but with more frequent compactions scheduled. L0CompactionThreshold: 2, } + // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 + // for more details. + opt.Experimental.ReadSamplingMultiplier = -1 + // These two settings define the conditions under which compaction concurrency // is increased. Specifically, one additional compaction job will be enabled when: // - there is one more overlapping sub-level0; From 9a6905318a8381fe282bf9e546a10511f6b4de82 Mon Sep 17 00:00:00 2001 From: Csaba Kiraly Date: Thu, 29 Jan 2026 10:53:55 +0100 Subject: [PATCH 273/277] core/txpool/legacypool: clarify and fix non-executable tx heartbeat (#33704) Heartbeats are used to drop non-executable transactions from the queue. The timeout mechanism was not clearly documented, and it was updates also when not necessary. --- core/txpool/legacypool/legacypool.go | 6 +++--- core/txpool/legacypool/queue.go | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 1bfc6e60b2..e17b12f7f2 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -151,7 +151,7 @@ type Config struct { AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts - Lifetime time.Duration // Maximum amount of time non-executable transaction are queued + Lifetime time.Duration // Maximum amount of time an account can remain stale in the non-executable pool } // DefaultConfig contains the default configurations for the transaction pool. @@ -778,7 +778,7 @@ func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) { pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) - // Successful promotion, bump the heartbeat + // Successful replacement. If needed, bump the heartbeat giving more time to queued txs. pool.queue.bump(from) return old != nil, nil } @@ -871,7 +871,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.Nonce()+1) - // Successful promotion, bump the heartbeat + // Successful promotion, bump the heartbeat, giving more time to queued txs. pool.queue.bump(addr) return true } diff --git a/core/txpool/legacypool/queue.go b/core/txpool/legacypool/queue.go index 918a219ab6..35beaded12 100644 --- a/core/txpool/legacypool/queue.go +++ b/core/txpool/legacypool/queue.go @@ -88,8 +88,12 @@ func (q *queue) get(addr common.Address) (*list, bool) { return l, ok } +// bump updates the heartbeat for the given account address. +// If the address is unknown, the call is a no-op. func (q *queue) bump(addr common.Address) { - q.beats[addr] = time.Now() + if _, ok := q.beats[addr]; ok { + q.beats[addr] = time.Now() + } } func (q *queue) addresses() []common.Address { From c974722dc0fa85b780b02de76fe0f7e9ba51fa67 Mon Sep 17 00:00:00 2001 From: fengjian <445077+fengjian@users.noreply.github.com> Date: Thu, 29 Jan 2026 17:56:12 +0800 Subject: [PATCH 274/277] crypto/ecies: fix ECIES invalid-curve handling (#33669) Fix ECIES invalid-curve handling in RLPx handshake (reject invalid ephemeral pubkeys early) - Add curve validation in crypto/ecies.GenerateShared to reject invalid public keys before ECDH. - Update RLPx PoC test to assert invalid curve points fail with ErrInvalidPublicKey. Motivation / Context RLPx handshake uses ECIES decryption on unauthenticated network input. Prior to this change, an invalid-curve ephemeral public key would proceed into ECDH and only fail at MAC verification, returning ErrInvalidMessage. This allows an oracle on decrypt success/failure and leaves the code path vulnerable to invalid-curve/small-subgroup attacks. The fix enforces IsOnCurve validation up front. --- crypto/ecies/ecies.go | 3 ++ p2p/rlpx/rlpx_oracle_poc_test.go | 57 ++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 p2p/rlpx/rlpx_oracle_poc_test.go diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index 9a892781f4..378d764a19 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -124,6 +124,9 @@ func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []b if prv.PublicKey.Curve != pub.Curve { return nil, ErrInvalidCurve } + if pub.X == nil || pub.Y == nil || !pub.Curve.IsOnCurve(pub.X, pub.Y) { + return nil, ErrInvalidPublicKey + } if skLen+macLen > MaxSharedKeyLength(pub) { return nil, ErrSharedKeyTooBig } diff --git a/p2p/rlpx/rlpx_oracle_poc_test.go b/p2p/rlpx/rlpx_oracle_poc_test.go new file mode 100644 index 0000000000..3497f0026e --- /dev/null +++ b/p2p/rlpx/rlpx_oracle_poc_test.go @@ -0,0 +1,57 @@ +package rlpx + +import ( + "bytes" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/ecies" +) + +func TestHandshakeECIESInvalidCurveOracle(t *testing.T) { + initKey, err := crypto.GenerateKey() + if err != nil { + t.Fatal(err) + } + respKey, err := crypto.GenerateKey() + if err != nil { + t.Fatal(err) + } + + init := handshakeState{ + initiator: true, + remote: ecies.ImportECDSAPublic(&respKey.PublicKey), + } + authMsg, err := init.makeAuthMsg(initKey) + if err != nil { + t.Fatal(err) + } + packet, err := init.sealEIP8(authMsg) + if err != nil { + t.Fatal(err) + } + + var recv handshakeState + if _, err := recv.readMsg(new(authMsgV4), respKey, bytes.NewReader(packet)); err != nil { + t.Fatalf("expected valid packet to decrypt: %v", err) + } + + tampered := append([]byte(nil), packet...) + if len(tampered) < 2+65 { + t.Fatalf("unexpected packet length %d", len(tampered)) + } + tampered[2] = 0x04 + for i := 1; i < 65; i++ { + tampered[2+i] = 0x00 + } + + var recv2 handshakeState + _, err = recv2.readMsg(new(authMsgV4), respKey, bytes.NewReader(tampered)) + if err == nil { + t.Fatal("expected decryption failure for invalid curve point") + } + if !errors.Is(err, ecies.ErrInvalidPublicKey) { + t.Fatalf("unexpected error: %v", err) + } +} From a179ccf6f002024268d524a4254abb0011f3d964 Mon Sep 17 00:00:00 2001 From: Noisy <125606576+donatik27@users.noreply.github.com> Date: Thu, 29 Jan 2026 14:08:04 +0100 Subject: [PATCH 275/277] core/state: add bounds check in heap eviction loop (#33712) core/state: add bounds check in heap eviction loop Add len(h) > 0 check before accessing h[0] to prevent potential panic and align with existing heap access patterns in txpool, p2p, and mclock packages. --- core/state/state_sizer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go index fc6781ad93..02b73e5575 100644 --- a/core/state/state_sizer.go +++ b/core/state/state_sizer.go @@ -346,7 +346,7 @@ func (t *SizeTracker) run() { // Evict the stale statistics heap.Push(&h, stats[u.root]) - for u.blockNumber-h[0].BlockNumber > statEvictThreshold { + for len(h) > 0 && u.blockNumber-h[0].BlockNumber > statEvictThreshold { delete(stats, h[0].StateRoot) heap.Pop(&h) } From 845009f684a99901d176c4e6467eed24bccce316 Mon Sep 17 00:00:00 2001 From: milan-cb Date: Thu, 29 Jan 2026 12:41:18 -0800 Subject: [PATCH 276/277] ethclient: fix timeout param for eth_sendRawTransactionSync (#33693) Fix timeout parameter in eth_sendRawTransactionSync to be an integer instead of hex. The spec has now been clarified on this point. --- ethclient/ethclient.go | 6 ++++-- internal/ethapi/api.go | 2 +- internal/ethapi/api_test.go | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 6f2fb5ebc8..bc4eaad6fa 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -729,9 +729,11 @@ func (ec *Client) SendRawTransactionSync( rawTx []byte, timeout *time.Duration, ) (*types.Receipt, error) { - var ms *hexutil.Uint64 + var ms *uint64 if timeout != nil { - if d := hexutil.Uint64(timeout.Milliseconds()); d > 0 { + msInt := timeout.Milliseconds() + if msInt > 0 { + d := uint64(msInt) ms = &d } } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b0a79295f5..e0baccbfe7 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1661,7 +1661,7 @@ func (api *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil // SendRawTransactionSync will add the signed transaction to the transaction pool // and wait until the transaction has been included in a block and return the receipt, or the timeout. -func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hexutil.Bytes, timeoutMs *hexutil.Uint64) (map[string]interface{}, error) { +func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hexutil.Bytes, timeoutMs *uint64) (map[string]interface{}, error) { tx := new(types.Transaction) if err := tx.UnmarshalBinary(input); err != nil { return nil, err diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index aaa002b5ec..3668bd0e14 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -4026,7 +4026,7 @@ func TestSendRawTransactionSync_Timeout(t *testing.T) { raw, _ := makeSelfSignedRaw(t, api, b.acc.Address) - timeout := hexutil.Uint64(200) // 200ms + timeout := uint64(200) // 200ms receipt, err := api.SendRawTransactionSync(context.Background(), raw, &timeout) if receipt != nil { From cb97c48cb6a84703dcc269fbed2cd8b61c464a2d Mon Sep 17 00:00:00 2001 From: alex017 Date: Fri, 30 Jan 2026 14:14:15 +0100 Subject: [PATCH 277/277] triedb/pathdb: preallocate slices in decodeRestartTrailer (#33715) Preallocate capacity for `keyOffsets` and `valOffsets` slices in `decodeRestartTrailer` since the exact size (`nRestarts`) is known upfront. --------- Co-authored-by: rjl493456442 --- triedb/pathdb/history_trienode.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/triedb/pathdb/history_trienode.go b/triedb/pathdb/history_trienode.go index 05a43808c2..11d6112806 100644 --- a/triedb/pathdb/history_trienode.go +++ b/triedb/pathdb/history_trienode.go @@ -391,10 +391,6 @@ func decodeKeyEntry(keySection []byte, offset int) (uint64, uint64, []byte, int, // decodeRestartTrailer resolves all the offsets recorded at the trailer. func decodeRestartTrailer(keySection []byte) ([]uint32, []uint32, int, error) { - var ( - keyOffsets []uint32 - valOffsets []uint32 - ) // Decode the number of restart section if len(keySection) < 4 { return nil, nil, 0, fmt.Errorf("key section too short, size: %d", len(keySection)) @@ -402,6 +398,10 @@ func decodeRestartTrailer(keySection []byte) ([]uint32, []uint32, int, error) { nRestarts := binary.BigEndian.Uint32(keySection[len(keySection)-4:]) // Decode the trailer + var ( + keyOffsets = make([]uint32, 0, int(nRestarts)) + valOffsets = make([]uint32, 0, int(nRestarts)) + ) if len(keySection) < int(8*nRestarts)+4 { return nil, nil, 0, fmt.Errorf("key section too short, restarts: %d, size: %d", nRestarts, len(keySection)) }