Merge branch 'ethereum:master' into fix/p2p

This commit is contained in:
shhhh 2025-12-30 23:36:47 +05:30 committed by GitHub
commit c3681a4a3a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
88 changed files with 1652 additions and 2892 deletions

View file

@ -8,6 +8,45 @@ jobs:
validate-pr:
runs-on: ubuntu-latest
steps:
- name: Check for Spam PR
uses: actions/github-script@v7
with:
script: |
const prTitle = context.payload.pull_request.title;
const spamRegex = /^(feat|chore|fix)(\(.*\))?\s*:/i;
if (spamRegex.test(prTitle)) {
// Leave a comment explaining why
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: `## PR Closed as Spam
This PR was automatically closed because the title format \`feat:\`, \`fix:\`, or \`chore:\` is commonly associated with spam contributions.
If this is a legitimate contribution, please:
1. Review our contribution guidelines
2. Use the correct PR title format: \`directory, ...: description\`
3. Open a new PR with the proper title format
Thank you for your understanding.`
});
// Close the PR
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.payload.pull_request.number,
state: 'closed'
});
core.setFailed('PR closed as spam due to suspicious title format');
return;
}
console.log('✅ PR passed spam check');
- name: Checkout repository
uses: actions/checkout@v4

View file

@ -17,24 +17,23 @@ var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@ -59,31 +58,29 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@ -157,8 +154,5 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}
return nil
}

View file

@ -73,24 +73,23 @@ type payloadAttributesMarshaling struct {
// ExecutableData is the data necessary to execute an EL payload.
type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
}
// JSON type overrides for executableData.
@ -316,8 +315,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
RequestsHash: requestsHash,
}
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
WithWitness(data.ExecutionWitness),
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
nil
}
@ -325,24 +323,23 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
}
// Add blobs.

View file

@ -105,6 +105,7 @@ func (s *HeadSync) Process(requester request.Requester, events []request.Event)
delete(s.serverHeads, event.Server)
delete(s.unvalidatedOptimistic, event.Server)
delete(s.unvalidatedFinality, event.Server)
delete(s.reqFinalityEpoch, event.Server)
}
}
}

View file

@ -251,8 +251,6 @@ func init() {
utils.ShowDeprecated,
// See snapshot.go
snapshotCommand,
// See verkle.go
verkleCommand,
}
if logTestCommand != nil {
app.Commands = append(app.Commands, logTestCommand)

View file

@ -1,214 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"os"
"slices"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-verkle"
"github.com/urfave/cli/v2"
)
var (
zero [32]byte
verkleCommand = &cli.Command{
Name: "verkle",
Usage: "A set of experimental verkle tree management commands",
Description: "",
Subcommands: []*cli.Command{
{
Name: "verify",
Usage: "verify the conversion of a MPT into a verkle tree",
ArgsUsage: "<root>",
Action: verifyVerkle,
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth verkle verify <state-root>
This command takes a root commitment and attempts to rebuild the tree.
`,
},
{
Name: "dump",
Usage: "Dump a verkle tree to a DOT file",
ArgsUsage: "<root> <key1> [<key 2> ...]",
Action: expandVerkle,
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth verkle dump <state-root> <key 1> [<key 2> ...]
This command will produce a dot file representing the tree, rooted at <root>.
in which key1, key2, ... are expanded.
`,
},
},
}
)
// recurse into each child to ensure they can be loaded from the db. The tree isn't rebuilt
// (only its nodes are loaded) so there is no need to flush them, the garbage collector should
// take care of that for us.
func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error {
switch node := root.(type) {
case *verkle.InternalNode:
for i, child := range node.Children() {
childC := child.Commit().Bytes()
if bytes.Equal(childC[:], zero[:]) {
continue
}
childS, err := resolver(childC[:])
if err != nil {
return fmt.Errorf("could not find child %x in db: %w", childC, err)
}
// depth is set to 0, the tree isn't rebuilt so it's not a problem
childN, err := verkle.ParseNode(childS, 0)
if err != nil {
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
}
if err := checkChildren(childN, resolver); err != nil {
return fmt.Errorf("%x%w", i, err) // write the path to the erroring node
}
}
case *verkle.LeafNode:
// sanity check: ensure at least one value is non-zero
for i := 0; i < verkle.NodeWidth; i++ {
if len(node.Value(i)) != 0 {
return nil
}
}
return errors.New("both balance and nonce are 0")
case verkle.Empty:
// nothing to do
default:
return fmt.Errorf("unsupported type encountered %v", root)
}
return nil
}
func verifyVerkle(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
return errors.New("no head block")
}
if ctx.NArg() > 1 {
log.Error("Too many arguments given")
return errors.New("too many arguments")
}
var (
rootC common.Hash
err error
)
if ctx.NArg() == 1 {
rootC, err = parseRoot(ctx.Args().First())
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
log.Info("Rebuilding the tree", "root", rootC)
} else {
rootC = headBlock.Root()
log.Info("Rebuilding the tree", "root", rootC, "number", headBlock.NumberU64())
}
serializedRoot, err := chaindb.Get(rootC[:])
if err != nil {
return err
}
root, err := verkle.ParseNode(serializedRoot, 0)
if err != nil {
return err
}
if err := checkChildren(root, chaindb.Get); err != nil {
log.Error("Could not rebuild the tree from the database", "err", err)
return err
}
log.Info("Tree was rebuilt from the database")
return nil
}
func expandVerkle(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
var (
rootC common.Hash
keylist [][]byte
err error
)
if ctx.NArg() >= 2 {
rootC, err = parseRoot(ctx.Args().First())
if err != nil {
log.Error("Failed to resolve state root", "error", err)
return err
}
keylist = make([][]byte, 0, ctx.Args().Len()-1)
args := ctx.Args().Slice()
for i := range args[1:] {
key, err := hex.DecodeString(args[i+1])
log.Info("decoded key", "arg", args[i+1], "key", key)
if err != nil {
return fmt.Errorf("error decoding key #%d: %w", i+1, err)
}
keylist = append(keylist, key)
}
log.Info("Rebuilding the tree", "root", rootC)
} else {
return fmt.Errorf("usage: %s root key1 [key 2...]", ctx.App.Name)
}
serializedRoot, err := chaindb.Get(rootC[:])
if err != nil {
return err
}
root, err := verkle.ParseNode(serializedRoot, 0)
if err != nil {
return err
}
for i, key := range keylist {
log.Info("Reading key", "index", i, "key", key)
root.Get(key, chaindb.Get)
}
if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0600); err != nil {
log.Error("Failed to dump file", "err", err)
} else {
log.Info("Tree was dumped to file", "file", "dump.dot")
}
return nil
}

View file

@ -14,13 +14,11 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/gnark-crypto v0.18.1 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/emicklei/dot v1.6.2 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect
github.com/ethereum/go-verkle v0.2.2 // indirect
github.com/ferranbt/fastssz v0.1.4 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect

View file

@ -30,8 +30,6 @@ github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDd
github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
@ -46,8 +44,6 @@ github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY=
github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=

View file

@ -1430,7 +1430,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
cfg.KeyStoreDir = ctx.String(KeyStoreDirFlag.Name)
}
if ctx.IsSet(DeveloperFlag.Name) {
cfg.UseLightweightKDF = true
cfg.UseLightweightKDF = ctx.Bool(DeveloperFlag.Name)
}
if ctx.IsSet(LightKDFFlag.Name) {
cfg.UseLightweightKDF = ctx.Bool(LightKDFFlag.Name)

View file

@ -34,4 +34,5 @@ the following commands (in this directory) against a synced mainnet node:
> go run . filtergen --queries queries/filter_queries_mainnet.json http://host:8545
> go run . historygen --history-tests queries/history_mainnet.json http://host:8545
> go run . tracegen --trace-tests queries/trace_mainnet.json --trace-start 4000000 --trace-end 4000100 http://host:8545
> go run . proofgen --proof-tests queries/proof_mainnet.json --proof-states 3000 http://host:8545
```

View file

@ -48,6 +48,7 @@ func init() {
historyGenerateCommand,
filterGenerateCommand,
traceGenerateCommand,
proofGenerateCommand,
filterPerfCommand,
filterFuzzCommand,
}

105
cmd/workload/prooftest.go Normal file
View file

@ -0,0 +1,105 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"encoding/json"
"fmt"
"math/big"
"os"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/utesting"
"github.com/urfave/cli/v2"
)
// proofTest is the content of a state-proof test.
type proofTest struct {
BlockNumbers []uint64 `json:"blockNumbers"`
Addresses [][]common.Address `json:"addresses"`
StorageKeys [][][]string `json:"storageKeys"`
Results [][]common.Hash `json:"results"`
}
type proofTestSuite struct {
cfg testConfig
tests proofTest
invalidDir string
}
func newProofTestSuite(cfg testConfig, ctx *cli.Context) *proofTestSuite {
s := &proofTestSuite{
cfg: cfg,
invalidDir: ctx.String(proofTestInvalidOutputFlag.Name),
}
if err := s.loadTests(); err != nil {
exit(err)
}
return s
}
func (s *proofTestSuite) loadTests() error {
file, err := s.cfg.fsys.Open(s.cfg.proofTestFile)
if err != nil {
// If not found in embedded FS, try to load it from disk
if !os.IsNotExist(err) {
return err
}
file, err = os.OpenFile(s.cfg.proofTestFile, os.O_RDONLY, 0666)
if err != nil {
return fmt.Errorf("can't open proofTestFile: %v", err)
}
}
defer file.Close()
if err := json.NewDecoder(file).Decode(&s.tests); err != nil {
return fmt.Errorf("invalid JSON in %s: %v", s.cfg.proofTestFile, err)
}
if len(s.tests.BlockNumbers) == 0 {
return fmt.Errorf("proofTestFile %s has no test data", s.cfg.proofTestFile)
}
return nil
}
func (s *proofTestSuite) allTests() []workloadTest {
return []workloadTest{
newArchiveWorkloadTest("Proof/GetProof", s.getProof),
}
}
func (s *proofTestSuite) getProof(t *utesting.T) {
ctx := context.Background()
for i, blockNumber := range s.tests.BlockNumbers {
for j := 0; j < len(s.tests.Addresses[i]); j++ {
res, err := s.cfg.client.Geth.GetProof(ctx, s.tests.Addresses[i][j], s.tests.StorageKeys[i][j], big.NewInt(int64(blockNumber)))
if err != nil {
t.Errorf("State proving fails, blockNumber: %d, address: %x, keys: %v, err: %v\n", blockNumber, s.tests.Addresses[i][j], strings.Join(s.tests.StorageKeys[i][j], " "), err)
continue
}
blob, err := json.Marshal(res)
if err != nil {
t.Fatalf("State proving fails: error %v", err)
continue
}
if crypto.Keccak256Hash(blob) != s.tests.Results[i][j] {
t.Errorf("State proof mismatch, %d, number: %d, address: %x, keys: %v: invalid result", i, blockNumber, s.tests.Addresses[i][j], strings.Join(s.tests.StorageKeys[i][j], " "))
}
}
}
}

View file

@ -0,0 +1,355 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>
package main
import (
"context"
"encoding/json"
"fmt"
"math/big"
"math/rand"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/native"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/internal/testrand"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
)
var (
proofGenerateCommand = &cli.Command{
Name: "proofgen",
Usage: "Generates tests for state proof verification",
ArgsUsage: "<RPC endpoint URL>",
Action: generateProofTests,
Flags: []cli.Flag{
proofTestFileFlag,
proofTestResultOutputFlag,
proofTestStatesFlag,
proofTestStartBlockFlag,
proofTestEndBlockFlag,
},
}
proofTestFileFlag = &cli.StringFlag{
Name: "proof-tests",
Usage: "JSON file containing proof test queries",
Value: "proof_tests.json",
Category: flags.TestingCategory,
}
proofTestResultOutputFlag = &cli.StringFlag{
Name: "proof-output",
Usage: "Folder containing detailed trace output files",
Value: "",
Category: flags.TestingCategory,
}
proofTestStatesFlag = &cli.Int64Flag{
Name: "proof-states",
Usage: "Number of states to generate proof against",
Value: 10000,
Category: flags.TestingCategory,
}
proofTestInvalidOutputFlag = &cli.StringFlag{
Name: "proof-invalid",
Usage: "Folder containing the mismatched state proof output files",
Value: "",
Category: flags.TestingCategory,
}
proofTestStartBlockFlag = &cli.Uint64Flag{
Name: "proof-start",
Usage: "The number of starting block for proof verification (included)",
Category: flags.TestingCategory,
}
proofTestEndBlockFlag = &cli.Uint64Flag{
Name: "proof-end",
Usage: "The number of ending block for proof verification (excluded)",
Category: flags.TestingCategory,
}
)
type proofGenerator func(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error)
func genAccountProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) {
var (
blockNumbers []uint64
accountAddresses [][]common.Address
storageKeys [][][]string
nAccounts int
ctx = context.Background()
start = time.Now()
)
chainID, err := cli.Eth.ChainID(ctx)
if err != nil {
return nil, nil, nil, err
}
signer := types.LatestSignerForChainID(chainID)
for {
if nAccounts >= number {
break
}
blockNumber := uint64(rand.Intn(int(endBlock-startBlock))) + startBlock
block, err := cli.Eth.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber)))
if err != nil {
continue
}
var (
addresses []common.Address
keys [][]string
gather = func(address common.Address) {
addresses = append(addresses, address)
keys = append(keys, nil)
nAccounts++
}
)
for _, tx := range block.Transactions() {
if nAccounts >= number {
break
}
sender, err := signer.Sender(tx)
if err != nil {
log.Error("Failed to resolve the sender address", "hash", tx.Hash(), "err", err)
continue
}
gather(sender)
if tx.To() != nil {
gather(*tx.To())
}
}
blockNumbers = append(blockNumbers, blockNumber)
accountAddresses = append(accountAddresses, addresses)
storageKeys = append(storageKeys, keys)
}
log.Info("Generated tests for account proof", "blocks", len(blockNumbers), "accounts", nAccounts, "elapsed", common.PrettyDuration(time.Since(start)))
return blockNumbers, accountAddresses, storageKeys, nil
}
func genNonExistentAccountProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) {
var (
blockNumbers []uint64
accountAddresses [][]common.Address
storageKeys [][][]string
total int
)
for i := 0; i < number/5; i++ {
var (
addresses []common.Address
keys [][]string
blockNumber = uint64(rand.Intn(int(endBlock-startBlock))) + startBlock
)
for j := 0; j < 5; j++ {
addresses = append(addresses, testrand.Address())
keys = append(keys, nil)
}
total += len(addresses)
blockNumbers = append(blockNumbers, blockNumber)
accountAddresses = append(accountAddresses, addresses)
storageKeys = append(storageKeys, keys)
}
log.Info("Generated tests for non-existing account proof", "blocks", len(blockNumbers), "accounts", total)
return blockNumbers, accountAddresses, storageKeys, nil
}
func genStorageProof(cli *client, startBlock uint64, endBlock uint64, number int) ([]uint64, [][]common.Address, [][][]string, error) {
var (
blockNumbers []uint64
accountAddresses [][]common.Address
storageKeys [][][]string
nAccounts int
nStorages int
start = time.Now()
)
for {
if nAccounts+nStorages >= number {
break
}
blockNumber := uint64(rand.Intn(int(endBlock-startBlock))) + startBlock
block, err := cli.Eth.BlockByNumber(context.Background(), big.NewInt(int64(blockNumber)))
if err != nil {
continue
}
var (
addresses []common.Address
slots [][]string
tracer = "prestateTracer"
configBlob, _ = json.Marshal(native.PrestateTracerConfig{
DiffMode: false,
DisableCode: true,
DisableStorage: false,
})
)
for _, tx := range block.Transactions() {
if nAccounts+nStorages >= number {
break
}
if tx.To() == nil {
continue
}
ret, err := cli.Geth.TraceTransaction(context.Background(), tx.Hash(), &tracers.TraceConfig{
Tracer: &tracer,
TracerConfig: configBlob,
})
if err != nil {
log.Error("Failed to trace the transaction", "blockNumber", blockNumber, "hash", tx.Hash(), "err", err)
continue
}
blob, err := json.Marshal(ret)
if err != nil {
log.Error("Failed to marshal data", "err", err)
continue
}
var accounts map[common.Address]*types.Account
if err := json.Unmarshal(blob, &accounts); err != nil {
log.Error("Failed to decode trace result", "blockNumber", blockNumber, "hash", tx.Hash(), "err", err)
continue
}
for addr, account := range accounts {
if len(account.Storage) == 0 {
continue
}
addresses = append(addresses, addr)
nAccounts += 1
var keys []string
for k := range account.Storage {
keys = append(keys, k.Hex())
}
nStorages += len(keys)
var emptyKeys []string
for i := 0; i < 3; i++ {
emptyKeys = append(emptyKeys, testrand.Hash().Hex())
}
nStorages += len(emptyKeys)
slots = append(slots, append(keys, emptyKeys...))
}
}
blockNumbers = append(blockNumbers, blockNumber)
accountAddresses = append(accountAddresses, addresses)
storageKeys = append(storageKeys, slots)
}
log.Info("Generated tests for storage proof", "blocks", len(blockNumbers), "accounts", nAccounts, "storages", nStorages, "elapsed", common.PrettyDuration(time.Since(start)))
return blockNumbers, accountAddresses, storageKeys, nil
}
func genProofRequests(cli *client, startBlock, endBlock uint64, states int) (*proofTest, error) {
var (
blockNumbers []uint64
accountAddresses [][]common.Address
storageKeys [][][]string
)
ratio := []float64{0.2, 0.1, 0.7}
for i, fn := range []proofGenerator{genAccountProof, genNonExistentAccountProof, genStorageProof} {
numbers, addresses, keys, err := fn(cli, startBlock, endBlock, int(float64(states)*ratio[i]))
if err != nil {
return nil, err
}
blockNumbers = append(blockNumbers, numbers...)
accountAddresses = append(accountAddresses, addresses...)
storageKeys = append(storageKeys, keys...)
}
return &proofTest{
BlockNumbers: blockNumbers,
Addresses: accountAddresses,
StorageKeys: storageKeys,
}, nil
}
func generateProofTests(clictx *cli.Context) error {
var (
client = makeClient(clictx)
ctx = context.Background()
states = clictx.Int(proofTestStatesFlag.Name)
outputFile = clictx.String(proofTestFileFlag.Name)
outputDir = clictx.String(proofTestResultOutputFlag.Name)
startBlock = clictx.Uint64(proofTestStartBlockFlag.Name)
endBlock = clictx.Uint64(proofTestEndBlockFlag.Name)
)
head, err := client.Eth.BlockNumber(ctx)
if err != nil {
exit(err)
}
if startBlock > head || endBlock > head {
return fmt.Errorf("chain is out of proof range, head %d, start: %d, limit: %d", head, startBlock, endBlock)
}
if endBlock == 0 {
endBlock = head
}
log.Info("Generating proof states", "startBlock", startBlock, "endBlock", endBlock, "states", states)
test, err := genProofRequests(client, startBlock, endBlock, states)
if err != nil {
exit(err)
}
for i, blockNumber := range test.BlockNumbers {
var hashes []common.Hash
for j := 0; j < len(test.Addresses[i]); j++ {
res, err := client.Geth.GetProof(ctx, test.Addresses[i][j], test.StorageKeys[i][j], big.NewInt(int64(blockNumber)))
if err != nil {
log.Error("Failed to prove the state", "number", blockNumber, "address", test.Addresses[i][j], "slots", len(test.StorageKeys[i][j]), "err", err)
continue
}
blob, err := json.Marshal(res)
if err != nil {
return err
}
hashes = append(hashes, crypto.Keccak256Hash(blob))
writeStateProof(outputDir, blockNumber, test.Addresses[i][j], res)
}
test.Results = append(test.Results, hashes)
}
writeJSON(outputFile, test)
return nil
}
func writeStateProof(dir string, blockNumber uint64, address common.Address, result any) {
if dir == "" {
return
}
// Ensure the directory exists
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
exit(fmt.Errorf("failed to create directories: %w", err))
}
fname := fmt.Sprintf("%d-%x", blockNumber, address)
name := filepath.Join(dir, fname)
file, err := os.Create(name)
if err != nil {
exit(fmt.Errorf("error creating %s: %v", name, err))
return
}
defer file.Close()
data, _ := json.MarshalIndent(result, "", " ")
_, err = file.Write(data)
if err != nil {
exit(fmt.Errorf("error writing %s: %v", name, err))
return
}
}

View file

@ -50,7 +50,9 @@ var (
filterQueryFileFlag,
historyTestFileFlag,
traceTestFileFlag,
proofTestFileFlag,
traceTestInvalidOutputFlag,
proofTestInvalidOutputFlag,
},
}
testPatternFlag = &cli.StringFlag{
@ -95,6 +97,7 @@ type testConfig struct {
historyTestFile string
historyPruneBlock *uint64
traceTestFile string
proofTestFile string
}
var errPrunedHistory = errors.New("attempt to access pruned history")
@ -145,6 +148,12 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) {
} else {
cfg.traceTestFile = "queries/trace_mainnet.json"
}
if ctx.IsSet(proofTestFileFlag.Name) {
cfg.proofTestFile = ctx.String(proofTestFileFlag.Name)
} else {
cfg.proofTestFile = "queries/proof_mainnet.json"
}
cfg.historyPruneBlock = new(uint64)
*cfg.historyPruneBlock = history.PrunePoints[params.MainnetGenesisHash].BlockNumber
case ctx.Bool(testSepoliaFlag.Name):
@ -164,6 +173,12 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) {
} else {
cfg.traceTestFile = "queries/trace_sepolia.json"
}
if ctx.IsSet(proofTestFileFlag.Name) {
cfg.proofTestFile = ctx.String(proofTestFileFlag.Name)
} else {
cfg.proofTestFile = "queries/proof_sepolia.json"
}
cfg.historyPruneBlock = new(uint64)
*cfg.historyPruneBlock = history.PrunePoints[params.SepoliaGenesisHash].BlockNumber
default:
@ -171,6 +186,7 @@ func testConfigFromCLI(ctx *cli.Context) (cfg testConfig) {
cfg.filterQueryFile = ctx.String(filterQueryFileFlag.Name)
cfg.historyTestFile = ctx.String(historyTestFileFlag.Name)
cfg.traceTestFile = ctx.String(traceTestFileFlag.Name)
cfg.proofTestFile = ctx.String(proofTestFileFlag.Name)
}
return cfg
}
@ -222,11 +238,13 @@ func runTestCmd(ctx *cli.Context) error {
filterSuite := newFilterTestSuite(cfg)
historySuite := newHistoryTestSuite(cfg)
traceSuite := newTraceTestSuite(cfg, ctx)
proofSuite := newProofTestSuite(cfg, ctx)
// Filter test cases.
tests := filterSuite.allTests()
tests = append(tests, historySuite.allTests()...)
tests = append(tests, traceSuite.allTests()...)
tests = append(tests, proofSuite.allTests()...)
utests := filterTests(tests, ctx.String(testPatternFlag.Name), func(t workloadTest) bool {
if t.Slow && !ctx.Bool(testSlowFlag.Name) {

View file

@ -365,46 +365,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
header.Root = state.IntermediateRoot(true)
// Assemble the final block.
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
// Create the block witness and attach to block.
// This step needs to happen as late as possible to catch all access events.
if chain.Config().IsVerkle(header.Number, header.Time) {
keys := state.AccessEvents().Keys()
// Open the pre-tree to prove the pre-state against
parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1)
if parent == nil {
return nil, fmt.Errorf("nil parent header for block %d", header.Number)
}
preTrie, err := state.Database().OpenTrie(parent.Root)
if err != nil {
return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
}
postTrie := state.GetTrie()
if postTrie == nil {
return nil, errors.New("post-state tree is not available")
}
vktPreTrie, okpre := preTrie.(*trie.VerkleTrie)
vktPostTrie, okpost := postTrie.(*trie.VerkleTrie)
// The witness is only attached iff both parent and current block are
// using verkle tree.
if okpre && okpost {
if len(keys) > 0 {
verkleProof, stateDiff, err := vktPreTrie.Proof(vktPostTrie, keys)
if err != nil {
return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
}
block = block.WithWitness(&types.ExecutionWitness{
StateDiff: stateDiff,
VerkleProof: verkleProof,
})
}
}
}
return block, nil
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil
}
// Seal generates a new sealing request for the given input block and pushes

View file

@ -142,7 +142,7 @@ func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err err
// PromptConfirm displays the given prompt to the user and requests a boolean
// choice to be made, returning that choice.
func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
input, err := p.Prompt(prompt + " [y/n] ")
input, err := p.PromptInput(prompt + " [y/n] ")
if len(input) > 0 && strings.EqualFold(input[:1], "y") {
return true, nil
}

View file

@ -75,6 +75,7 @@ var (
storageReadTimer = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil)
storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil)
storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil)
codeReadTimer = metrics.NewRegisteredResettingTimer("chain/code/reads", nil)
accountCacheHitMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/hit", nil)
accountCacheMissMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/miss", nil)
@ -88,6 +89,7 @@ var (
accountReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/account/single/reads", nil)
storageReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/storage/single/reads", nil)
codeReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/code/single/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
@ -951,7 +953,8 @@ func (bc *BlockChain) rewindPathHead(head *types.Header, root common.Hash) (*typ
// Recover if the target state if it's not available yet.
if !bc.HasState(head.Root) {
if err := bc.triedb.Recover(head.Root); err != nil {
log.Crit("Failed to rollback state", "err", err)
log.Error("Failed to rollback state, resetting to genesis", "err", err)
return bc.genesisBlock.Header(), rootNumber
}
}
log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash())
@ -1113,14 +1116,48 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
return rootNumber, bc.loadLastState()
}
// SnapSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
// SnapSyncStart disables the underlying databases (such as the trie DB and the
// optional state snapshot) to prevent potential concurrent mutations between
// snap sync and other chain operations.
func (bc *BlockChain) SnapSyncStart() error {
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
// Snap sync will directly modify the persistent state, making the entire
// trie database unusable until the state is fully synced. To prevent any
// subsequent state reads, explicitly disable the trie database and state
// syncer is responsible to address and correct any state missing.
if bc.TrieDB().Scheme() == rawdb.PathScheme {
if err := bc.TrieDB().Disable(); err != nil {
return err
}
}
// Snap sync uses the snapshot namespace to store potentially flaky data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean-
// time to prevent access.
if snapshots := bc.Snapshots(); snapshots != nil { // Only nil in tests
snapshots.Disable()
}
return nil
}
// SnapSyncComplete sets the current head block to the block identified by the
// given hash, regardless of the chain contents prior to snap sync. It is
// invoked once snap sync completes and assumes that SnapSyncStart was called
// previously.
func (bc *BlockChain) SnapSyncComplete(hash common.Hash) error {
// Make sure that both the block as well at its state trie exists
block := bc.GetBlockByHash(hash)
if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4])
}
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
// Reset the trie database with the fresh snap synced state.
root := block.Root()
if bc.triedb.Scheme() == rawdb.PathScheme {
@ -1131,19 +1168,16 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
if !bc.HasState(root) {
return fmt.Errorf("non existent state [%x..]", root[:4])
}
// If all checks out, manually set the head block.
if !bc.chainmu.TryLock() {
return errChainStopped
}
bc.currentBlock.Store(block.Header())
headBlockGauge.Update(int64(block.NumberU64()))
bc.chainmu.Unlock()
// Destroy any existing state snapshot and regenerate it in the background,
// also resuming the normal maintenance of any previously paused snapshot.
if bc.snaps != nil {
bc.snaps.Rebuild(root)
}
// If all checks out, manually set the head block.
bc.currentBlock.Store(block.Header())
headBlockGauge.Update(int64(block.NumberU64()))
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil
}
@ -1602,22 +1636,33 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
//
// Note all the components of block(hash->number map, header, body, receipts)
// should be written atomically. BlockBatch is used for containing all components.
blockBatch := bc.db.NewBatch()
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(blockBatch, statedb.Preimages())
if err := blockBatch.Write(); err != nil {
var (
batch = bc.db.NewBatch()
start = time.Now()
)
rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(batch, statedb.Preimages())
if err := batch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
log.Debug("Committed block data", "size", common.StorageSize(batch.ValueSize()), "elapsed", common.PrettyDuration(time.Since(start)))
var (
err error
root common.Hash
isEIP158 = bc.chainConfig.IsEIP158(block.Number())
isCancun = bc.chainConfig.IsCancun(block.Number(), block.Time())
)
if bc.stateSizer == nil {
root, err = statedb.Commit(block.NumberU64(), isEIP158, isCancun)
} else {
root, err = statedb.CommitAndTrack(block.NumberU64(), isEIP158, isCancun, bc.stateSizer)
}
if err != nil {
return err
}
// Emit the state update to the state sizestats if it's active
if bc.stateSizer != nil {
bc.stateSizer.Notify(stateUpdate)
}
// If node is running in path mode, skip explicit gc operation
// which is unnecessary in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
@ -2160,6 +2205,7 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
stats.AccountUpdates = statedb.AccountUpdates // Account updates are complete(in validation)
stats.StorageUpdates = statedb.StorageUpdates // Storage updates are complete(in validation)
stats.AccountHashes = statedb.AccountHashes // Account hashes are complete(in validation)
stats.CodeReads = statedb.CodeReads
stats.AccountLoaded = statedb.AccountLoaded
stats.AccountUpdated = statedb.AccountUpdated
@ -2167,8 +2213,9 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s
stats.StorageLoaded = statedb.StorageLoaded
stats.StorageUpdated = int(statedb.StorageUpdated.Load())
stats.StorageDeleted = int(statedb.StorageDeleted.Load())
stats.CodeLoaded = statedb.CodeLoaded
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads) // The time spent on EVM processing
stats.Execution = ptime - (statedb.AccountReads + statedb.StorageReads + statedb.CodeReads) // The time spent on EVM processing
stats.Validation = vtime - (statedb.AccountHashes + statedb.AccountUpdates + statedb.StorageUpdates) // The time spent on block validation
stats.CrossValidation = xvtime // The time spent on stateless cross validation

View file

@ -37,6 +37,7 @@ type ExecuteStats struct {
AccountCommits time.Duration // Time spent on the account trie commit
StorageUpdates time.Duration // Time spent on the storage trie update
StorageCommits time.Duration // Time spent on the storage trie commit
CodeReads time.Duration // Time spent on the contract code read
AccountLoaded int // Number of accounts loaded
AccountUpdated int // Number of accounts updated
@ -44,6 +45,7 @@ type ExecuteStats struct {
StorageLoaded int // Number of storage slots loaded
StorageUpdated int // Number of storage slots updated
StorageDeleted int // Number of storage slots deleted
CodeLoaded int // Number of contract code loaded
Execution time.Duration // Time spent on the EVM execution
Validation time.Duration // Time spent on the block validation
@ -61,19 +63,21 @@ type ExecuteStats struct {
// reportMetrics uploads execution statistics to the metrics system.
func (s *ExecuteStats) reportMetrics() {
accountReadTimer.Update(s.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(s.StorageReads) // Storage reads are complete(in processing)
if s.AccountLoaded != 0 {
accountReadTimer.Update(s.AccountReads)
accountReadSingleTimer.Update(s.AccountReads / time.Duration(s.AccountLoaded))
}
if s.StorageLoaded != 0 {
storageReadTimer.Update(s.StorageReads)
storageReadSingleTimer.Update(s.StorageReads / time.Duration(s.StorageLoaded))
}
if s.CodeLoaded != 0 {
codeReadTimer.Update(s.CodeReads)
codeReadSingleTimer.Update(s.CodeReads / time.Duration(s.CodeLoaded))
}
accountUpdateTimer.Update(s.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(s.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(s.AccountHashes) // Account hashes are complete(in validation)
accountCommitTimer.Update(s.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(s.StorageCommits) // Storage commits are complete, we can mark them
@ -112,22 +116,44 @@ Block: %v (%#x) txs: %d, mgasps: %.2f, elapsed: %v
EVM execution: %v
Validation: %v
Account read: %v(%d)
Storage read: %v(%d)
Account hash: %v
Storage hash: %v
DB commit: %v
Block write: %v
State read: %v
Account read: %v(%d)
Storage read: %v(%d)
Code read: %v(%d)
State hash: %v
Account hash: %v
Storage hash: %v
Trie commit: %v
DB write: %v
State write: %v
Block write: %v
%s
##############################
`, block.Number(), block.Hash(), len(block.Transactions()), s.MgasPerSecond, common.PrettyDuration(s.TotalTime),
common.PrettyDuration(s.Execution), common.PrettyDuration(s.Validation+s.CrossValidation),
common.PrettyDuration(s.Execution),
common.PrettyDuration(s.Validation+s.CrossValidation),
// State read
common.PrettyDuration(s.AccountReads+s.StorageReads+s.CodeReads),
common.PrettyDuration(s.AccountReads), s.AccountLoaded,
common.PrettyDuration(s.StorageReads), s.StorageLoaded,
common.PrettyDuration(s.AccountHashes+s.AccountCommits+s.AccountUpdates),
common.PrettyDuration(s.StorageCommits+s.StorageUpdates),
common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit), common.PrettyDuration(s.BlockWrite),
common.PrettyDuration(s.CodeReads), s.CodeLoaded,
// State hash
common.PrettyDuration(s.AccountHashes+s.AccountUpdates+s.StorageUpdates+max(s.AccountCommits, s.StorageCommits)),
common.PrettyDuration(s.AccountHashes+s.AccountUpdates),
common.PrettyDuration(s.StorageUpdates),
common.PrettyDuration(max(s.AccountCommits, s.StorageCommits)),
// Database commit
common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit+s.BlockWrite),
common.PrettyDuration(s.TrieDBCommit+s.SnapshotCommit),
common.PrettyDuration(s.BlockWrite),
// cache statistics
s.StateReadCacheStats)
for _, line := range strings.Split(msg, "\n") {
if line == "" {

View file

@ -97,7 +97,7 @@ func LoadTransitionState(db ethdb.KeyValueReader, root common.Hash, isVerkle boo
// Initialize the first transition state, with the "ended"
// field set to true if the database was created
// as a verkle database.
log.Debug("no transition state found, starting fresh", "is verkle", db)
log.Debug("no transition state found, starting fresh", "verkle", isVerkle)
// Start with a fresh state
ts = &TransitionState{Ended: isVerkle}

View file

@ -91,6 +91,13 @@ func (t *memoryTable) truncateHead(items uint64) error {
if items < t.offset {
return errors.New("truncation below tail")
}
for i := int(items - t.offset); i < len(t.data); i++ {
if t.size > uint64(len(t.data[i])) {
t.size -= uint64(len(t.data[i]))
} else {
t.size = 0
}
}
t.data = t.data[:items-t.offset]
t.items = items
return nil
@ -108,6 +115,13 @@ func (t *memoryTable) truncateTail(items uint64) error {
if t.items < items {
return errors.New("truncation above head")
}
for i := uint64(0); i < items-t.offset; i++ {
if t.size > uint64(len(t.data[i])) {
t.size -= uint64(len(t.data[i]))
} else {
t.size = 0
}
}
t.data = t.data[items-t.offset:]
t.offset = items
return nil

View file

@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/holiman/uint256"
)
@ -45,15 +45,12 @@ var zeroTreeIndex uint256.Int
type AccessEvents struct {
branches map[branchAccessKey]mode
chunks map[chunkAccessKey]mode
pointCache *utils.PointCache
}
func NewAccessEvents(pointCache *utils.PointCache) *AccessEvents {
func NewAccessEvents() *AccessEvents {
return &AccessEvents{
branches: make(map[branchAccessKey]mode),
chunks: make(map[chunkAccessKey]mode),
pointCache: pointCache,
branches: make(map[branchAccessKey]mode),
chunks: make(map[chunkAccessKey]mode),
}
}
@ -75,8 +72,11 @@ func (ae *AccessEvents) Keys() [][]byte {
// TODO: consider if parallelizing this is worth it, probably depending on len(ae.chunks).
keys := make([][]byte, 0, len(ae.chunks))
for chunk := range ae.chunks {
basePoint := ae.pointCache.Get(chunk.addr[:])
key := utils.GetTreeKeyWithEvaluatedAddress(basePoint, &chunk.treeIndex, chunk.leafKey)
var offset [32]byte
treeIndexBytes := chunk.treeIndex.Bytes32()
copy(offset[:31], treeIndexBytes[1:])
offset[31] = chunk.leafKey
key := bintrie.GetBinaryTreeKey(chunk.addr, offset[:])
keys = append(keys, key)
}
return keys
@ -84,9 +84,8 @@ func (ae *AccessEvents) Keys() [][]byte {
func (ae *AccessEvents) Copy() *AccessEvents {
cpy := &AccessEvents{
branches: maps.Clone(ae.branches),
chunks: maps.Clone(ae.chunks),
pointCache: ae.pointCache,
branches: maps.Clone(ae.branches),
chunks: maps.Clone(ae.chunks),
}
return cpy
}
@ -95,12 +94,12 @@ func (ae *AccessEvents) Copy() *AccessEvents {
// member fields of an account.
func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableGas uint64) uint64 {
var gas uint64 // accumulate the consumed gas
consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas)
consumed, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas)
if consumed < expected {
return expected
}
gas += consumed
consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas-consumed)
consumed, expected = ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas-consumed)
if consumed < expected {
return expected + gas
}
@ -112,7 +111,7 @@ func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool, availableG
// cold member fields of an account, that need to be touched when making a message
// call to that account.
func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas uint64) uint64 {
_, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas)
_, expected := ae.touchAddressAndChargeGas(destination, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas)
if expected == 0 {
expected = params.WarmStorageReadCostEIP2929
}
@ -122,11 +121,11 @@ func (ae *AccessEvents) MessageCallGas(destination common.Address, availableGas
// ValueTransferGas returns the gas to be charged for each of the currently
// cold balance member fields of the caller and the callee accounts.
func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address, availableGas uint64) uint64 {
_, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas)
_, expected1 := ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas)
if expected1 > availableGas {
return expected1
}
_, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas-expected1)
_, expected2 := ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas-expected1)
if expected1+expected2 == 0 {
return params.WarmStorageReadCostEIP2929
}
@ -138,8 +137,8 @@ func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address,
// address collision is done before the transfer, and so no write
// are guaranteed to happen at this point.
func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, availableGas uint64) uint64 {
consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, false, availableGas)
_, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, false, availableGas-consumed)
consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, false, availableGas)
_, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, availableGas-consumed)
return expected1 + expected2
}
@ -147,9 +146,9 @@ func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address, available
// a contract creation.
func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas uint64) (uint64, uint64) {
var gas uint64
consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, true, availableGas)
consumed, expected1 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, availableGas)
gas += consumed
consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, true, availableGas-consumed)
consumed, expected2 := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, true, availableGas-consumed)
gas += consumed
return gas, expected1 + expected2
}
@ -157,20 +156,20 @@ func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, availableGas
// AddTxOrigin adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) {
ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BasicDataLeafKey, true, gomath.MaxUint64)
ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeHashLeafKey, false, gomath.MaxUint64)
ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.BasicDataLeafKey, true, gomath.MaxUint64)
ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, bintrie.CodeHashLeafKey, false, gomath.MaxUint64)
}
// AddTxDestination adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue, doesntExist bool) {
ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, sendsValue, gomath.MaxUint64)
ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, doesntExist, gomath.MaxUint64)
ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, sendsValue, gomath.MaxUint64)
ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, doesntExist, gomath.MaxUint64)
}
// SlotGas returns the amount of gas to be charged for a cold storage access.
func (ae *AccessEvents) SlotGas(addr common.Address, slot common.Hash, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
treeIndex, subIndex := utils.StorageIndex(slot.Bytes())
treeIndex, subIndex := bintrie.StorageIndex(slot.Bytes())
_, expected := ae.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
expected = params.WarmStorageReadCostEIP2929
@ -313,7 +312,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC,
// Note that an access in write mode implies an access in read mode, whereas an
// access in read mode does not imply an access in write mode.
func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
_, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite, availableGas)
_, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.BasicDataLeafKey, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
if availableGas < params.WarmStorageReadCostEIP2929 {
return availableGas
@ -329,7 +328,7 @@ func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool, availabl
// Note that an access in write mode implies an access in read mode, whereas an access in
// read mode does not imply an access in write mode.
func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool, availableGas uint64, chargeWarmCosts bool) uint64 {
_, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite, availableGas)
_, expected := ae.touchAddressAndChargeGas(addr, zeroTreeIndex, bintrie.CodeHashLeafKey, isWrite, availableGas)
if expected == 0 && chargeWarmCosts {
if availableGas < params.WarmStorageReadCostEIP2929 {
return availableGas

View file

@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
)
var (
@ -38,7 +37,7 @@ func init() {
}
func TestAccountHeaderGas(t *testing.T) {
ae := NewAccessEvents(utils.NewPointCache(1024))
ae := NewAccessEvents()
// Check cold read cost
gas := ae.BasicDataGas(testAddr, false, math.MaxUint64, false)
@ -93,7 +92,7 @@ func TestAccountHeaderGas(t *testing.T) {
// TestContractCreateInitGas checks that the gas cost of contract creation is correctly
// calculated.
func TestContractCreateInitGas(t *testing.T) {
ae := NewAccessEvents(utils.NewPointCache(1024))
ae := NewAccessEvents()
var testAddr [20]byte
for i := byte(0); i < 20; i++ {
@ -116,7 +115,7 @@ func TestContractCreateInitGas(t *testing.T) {
// TestMessageCallGas checks that the gas cost of message calls is correctly
// calculated.
func TestMessageCallGas(t *testing.T) {
ae := NewAccessEvents(utils.NewPointCache(1024))
ae := NewAccessEvents()
// Check cold read cost, without a value
gas := ae.MessageCallGas(testAddr, math.MaxUint64)

View file

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
)
@ -41,9 +40,6 @@ const (
// Cache size granted for caching clean code.
codeCacheSize = 256 * 1024 * 1024
// Number of address->curve point associations to keep.
pointCacheSize = 4096
)
// Database wraps access to tries and contract code.
@ -57,9 +53,6 @@ type Database interface {
// OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
// PointCache returns the cache holding points used in verkle tree key computation
PointCache() *utils.PointCache
// TrieDB returns the underlying trie database for managing trie nodes.
TrieDB() *triedb.Database
@ -161,7 +154,6 @@ type CachingDB struct {
snap *snapshot.Tree
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
pointCache *utils.PointCache
// Transition-specific fields
TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState]
@ -175,7 +167,6 @@ func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
snap: snap,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
pointCache: utils.NewPointCache(pointCacheSize),
TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000),
}
}
@ -211,7 +202,7 @@ func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
}
// Configure the trie reader, which is expected to be available as the
// gatekeeper unless the state is corrupted.
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
tr, err := newTrieReader(stateRoot, db.triedb)
if err != nil {
return nil, err
}
@ -289,11 +280,6 @@ func (db *CachingDB) TrieDB() *triedb.Database {
return db.triedb
}
// PointCache returns the cache of evaluated curve points.
func (db *CachingDB) PointCache() *utils.PointCache {
return db.pointCache
}
// Snapshot returns the underlying state snapshot.
func (db *CachingDB) Snapshot() *snapshot.Tree {
return db.snap
@ -304,8 +290,6 @@ func mustCopyTrie(t Trie) Trie {
switch t := t.(type) {
case *trie.StateTrie:
return t.Copy()
case *trie.VerkleTrie:
return t.Copy()
case *transitiontrie.TransitionTrie:
return t.Copy()
default:

View file

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
)
@ -105,7 +104,6 @@ type HistoricDB struct {
triedb *triedb.Database
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
pointCache *utils.PointCache
}
// NewHistoricDatabase creates a historic state database.
@ -115,7 +113,6 @@ func NewHistoricDatabase(disk ethdb.KeyValueStore, triedb *triedb.Database) *His
triedb: triedb,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
pointCache: utils.NewPointCache(pointCacheSize),
}
}
@ -139,11 +136,6 @@ func (db *HistoricDB) OpenStorageTrie(stateRoot common.Hash, address common.Addr
return nil, errors.New("not implemented")
}
// PointCache returns the cache holding points used in verkle tree key computation
func (db *HistoricDB) PointCache() *utils.PointCache {
return db.pointCache
}
// TrieDB returns the underlying trie database for managing trie nodes.
func (db *HistoricDB) TrieDB() *triedb.Database {
return db.triedb

View file

@ -33,13 +33,16 @@ import (
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/database"
)
// ContractCodeReader defines the interface for accessing contract code.
type ContractCodeReader interface {
// Has returns the flag indicating whether the contract code with
// specified address and hash exists or not.
Has(addr common.Address, codeHash common.Hash) bool
// Code retrieves a particular contract's code.
//
// - Returns nil code along with nil error if the requested contract code
@ -170,6 +173,13 @@ func (r *cachingCodeReader) CodeSize(addr common.Address, codeHash common.Hash)
return len(code), nil
}
// Has returns the flag indicating whether the contract code with
// specified address and hash exists or not.
func (r *cachingCodeReader) Has(addr common.Address, codeHash common.Hash) bool {
code, _ := r.Code(addr, codeHash)
return len(code) > 0
}
// flatReader wraps a database state reader and is safe for concurrent access.
type flatReader struct {
reader database.StateReader
@ -256,7 +266,7 @@ type trieReader struct {
// newTrieReader constructs a trie reader of the specific state. An error will be
// returned if the associated trie specified by root is not existent.
func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCache) (*trieReader, error) {
func newTrieReader(root common.Hash, db *triedb.Database) (*trieReader, error) {
var (
tr Trie
err error

View file

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
@ -498,8 +499,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
}
switch s.trie.(type) {
case *trie.VerkleTrie:
// Verkle uses only one tree, and the copy has already been
case *bintrie.BinaryTrie:
// UBT uses only one tree, and the copy has already been
// made in mustCopyTrie.
obj.trie = db.trie
case *transitiontrie.TransitionTrie:
@ -531,6 +532,11 @@ func (s *stateObject) Code() []byte {
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return nil
}
defer func(start time.Time) {
s.db.CodeLoaded += 1
s.db.CodeReads += time.Since(start)
}(time.Now())
code, err := s.db.reader.Code(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
@ -552,6 +558,11 @@ func (s *stateObject) CodeSize() int {
if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) {
return 0
}
defer func(start time.Time) {
s.db.CodeLoaded += 1
s.db.CodeReads += time.Since(start)
}(time.Now())
size, err := s.db.reader.CodeSize(s.address, common.BytesToHash(s.CodeHash()))
if err != nil {
s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))

View file

@ -243,12 +243,14 @@ func calSizeStats(update *stateUpdate) (SizeStats, error) {
}
}
// Measure code changes. Note that the reported contract code size may be slightly
// inaccurate due to database deduplication (code is stored by its hash). However,
// this deviation is negligible and acceptable for measurement purposes.
codeExists := make(map[common.Hash]struct{})
for _, code := range update.codes {
if _, ok := codeExists[code.hash]; ok || code.exists {
continue
}
stats.ContractCodes += 1
stats.ContractCodeBytes += codeKeySize + int64(len(code.blob))
codeExists[code.hash] = struct{}{}
}
return stats, nil
}

View file

@ -58,7 +58,7 @@ func TestSizeTracker(t *testing.T) {
state.AddBalance(addr3, uint256.NewInt(3000), tracing.BalanceChangeUnspecified)
state.SetNonce(addr3, 3, tracing.NonceChangeUnspecified)
currentRoot, _, err := state.CommitWithUpdate(1, true, false)
currentRoot, err := state.Commit(1, true, false)
if err != nil {
t.Fatalf("Failed to commit initial state: %v", err)
}
@ -83,7 +83,7 @@ func TestSizeTracker(t *testing.T) {
if i%3 == 0 {
newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
}
root, _, err := newState.CommitWithUpdate(blockNum, true, false)
root, err := newState.Commit(blockNum, true, false)
if err != nil {
t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
}
@ -154,21 +154,22 @@ func TestSizeTracker(t *testing.T) {
if i%3 == 0 {
newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified)
}
root, update, err := newState.CommitWithUpdate(blockNum, true, false)
ret, err := newState.commitAndFlush(blockNum, true, false, true)
if err != nil {
t.Fatalf("Failed to commit state at block %d: %v", blockNum, err)
}
if err := tdb.Commit(root, false); err != nil {
tracker.Notify(ret)
if err := tdb.Commit(ret.root, false); err != nil {
t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err)
}
diff, err := calSizeStats(update)
diff, err := calSizeStats(ret)
if err != nil {
t.Fatalf("Failed to calculate size stats for block %d: %v", blockNum, err)
}
trackedUpdates = append(trackedUpdates, diff)
tracker.Notify(update)
currentRoot = root
currentRoot = ret.root
}
finalRoot := rawdb.ReadSnapshotRoot(db)

View file

@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
"golang.org/x/sync/errgroup"
)
@ -151,6 +150,7 @@ type StateDB struct {
StorageCommits time.Duration
SnapshotCommits time.Duration
TrieDBCommits time.Duration
CodeReads time.Duration
AccountLoaded int // Number of accounts retrieved from the database during the state transition
AccountUpdated int // Number of accounts updated during the state transition
@ -158,6 +158,7 @@ type StateDB struct {
StorageLoaded int // Number of storage slots retrieved from the database during the state transition
StorageUpdated atomic.Int64 // Number of storage slots updated during the state transition
StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition
CodeLoaded int // Number of contract code loaded during the state transition
}
// New creates a new state from a given trie.
@ -186,7 +187,7 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
transientStorage: newTransientStorage(),
}
if db.TrieDB().IsVerkle() {
sdb.accessEvents = NewAccessEvents(db.PointCache())
sdb.accessEvents = NewAccessEvents()
}
return sdb, nil
}
@ -1317,11 +1318,16 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool, dedupCode bool) (*stateUpdate, error) {
ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block)
if err != nil {
return nil, err
}
if dedupCode {
ret.markCodeExistence(s.reader)
}
// Commit dirty contract code if any exists
if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 {
batch := db.NewBatch()
@ -1376,21 +1382,21 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag
// no empty accounts left that could be deleted by EIP-158, storage wiping
// should not occur.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping, false)
if err != nil {
return common.Hash{}, err
}
return ret.root, nil
}
// CommitWithUpdate writes the state mutations and returns both the root hash and the state update.
// This is useful for tracking state changes at the blockchain level.
func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
// CommitAndTrack writes the state mutations and notifies the size tracker of the state changes.
func (s *StateDB) CommitAndTrack(block uint64, deleteEmptyObjects bool, noStorageWiping bool, sizer *SizeTracker) (common.Hash, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping, true)
if err != nil {
return common.Hash{}, nil, err
return common.Hash{}, err
}
return ret.root, ret, nil
sizer.Notify(ret)
return ret.root, nil
}
// Prepare handles the preparatory steps for executing a state transition with.
@ -1488,11 +1494,6 @@ func (s *StateDB) markUpdate(addr common.Address) {
s.mutations[addr].typ = update
}
// PointCache returns the point cache used by verkle tree.
func (s *StateDB) PointCache() *utils.PointCache {
return s.db.PointCache()
}
// Witness retrieves the current state witness being collected.
func (s *StateDB) Witness() *stateless.Witness {
return s.witness

View file

@ -228,7 +228,7 @@ func (test *stateTest) run() bool {
} else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
}
ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
ret, err := state.commitAndFlush(0, true, false, false) // call commit at the block boundary
if err != nil {
panic(err)
}

View file

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
@ -133,10 +132,6 @@ func (s *hookedStateDB) AddSlotToAccessList(addr common.Address, slot common.Has
s.inner.AddSlotToAccessList(addr, slot)
}
func (s *hookedStateDB) PointCache() *utils.PointCache {
return s.inner.PointCache()
}
func (s *hookedStateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) {
s.inner.Prepare(rules, sender, coinbase, dest, precompiles, txAccesses)
}

View file

@ -129,7 +129,7 @@ func TestHooks(t *testing.T) {
for i, want := range wants {
if have := result[i]; have != want {
t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want)
t.Fatalf("error event %d\nhave: %v\nwant: %v", i, have, want)
}
}
}
@ -165,7 +165,7 @@ func TestHooks_OnCodeChangeV2(t *testing.T) {
for i, want := range wants {
if have := result[i]; have != want {
t.Fatalf("error event %d, have\n%v\nwant%v\n", i, have, want)
t.Fatalf("error event %d\nhave: %v\nwant: %v", i, have, want)
}
}
}

View file

@ -26,8 +26,9 @@ import (
// contractCode represents a contract code with associated metadata.
type contractCode struct {
hash common.Hash // hash is the cryptographic hash of the contract code.
blob []byte // blob is the binary representation of the contract code.
hash common.Hash // hash is the cryptographic hash of the contract code.
blob []byte // blob is the binary representation of the contract code.
exists bool // flag whether the code has been existent
}
// accountDelete represents an operation for deleting an Ethereum account.
@ -82,8 +83,8 @@ type stateUpdate struct {
storagesOrigin map[common.Address]map[common.Hash][]byte
rawStorageKey bool
codes map[common.Address]contractCode // codes contains the set of dirty codes
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
codes map[common.Address]*contractCode // codes contains the set of dirty codes
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
}
// empty returns a flag indicating the state transition is empty or not.
@ -103,7 +104,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
accountsOrigin = make(map[common.Address][]byte)
storages = make(map[common.Hash]map[common.Hash][]byte)
storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
codes = make(map[common.Address]contractCode)
codes = make(map[common.Address]*contractCode)
)
// Since some accounts might be destroyed and recreated within the same
// block, deletions must be aggregated first.
@ -125,7 +126,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash
// Aggregate dirty contract codes if they are available.
addr := op.address
if op.code != nil {
codes[addr] = *op.code
codes[addr] = op.code
}
accounts[addrHash] = op.data
@ -190,3 +191,22 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet {
RawStorageKey: sc.rawStorageKey,
}
}
// markCodeExistence determines whether each piece of contract code referenced
// in this state update actually exists.
//
// Note: This operation is expensive and not needed during normal state transitions.
// It is only required when SizeTracker is enabled to produce accurate state
// statistics.
func (sc *stateUpdate) markCodeExistence(reader ContractCodeReader) {
cache := make(map[common.Hash]bool)
for addr, code := range sc.codes {
if exists, ok := cache[code.hash]; ok {
code.exists = exists
continue
}
res := reader.Has(addr, code.hash)
cache[code.hash] = res
code.exists = res
}
}

View file

@ -62,10 +62,17 @@ func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) {
// If current path is a prefix of the next path, it's not a leaf.
// The last path is always a leaf.
if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) {
depth := len(path)
if owner == (common.Hash{}) {
s.accountTrieLeaves[len(path)] += 1
if depth >= len(s.accountTrieLeaves) {
depth = len(s.accountTrieLeaves) - 1
}
s.accountTrieLeaves[depth] += 1
} else {
s.storageTrieLeaves[len(path)] += 1
if depth >= len(s.storageTrieLeaves) {
depth = len(s.storageTrieLeaves) - 1
}
s.storageTrieLeaves[depth] += 1
}
}
}

View file

@ -21,12 +21,10 @@ import (
"container/heap"
"errors"
"fmt"
"maps"
"math"
"math/big"
"os"
"path/filepath"
"slices"
"sort"
"sync"
"sync/atomic"
@ -96,11 +94,6 @@ const (
// storeVersion is the current slotter layout used for the billy.Database
// store.
storeVersion = 1
// conversionTimeWindow defines the period after the Osaka fork during which
// the pool will still accept and convert legacy blob transactions. After this
// window, all legacy blob transactions will be rejected.
conversionTimeWindow = time.Hour * 2
)
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
@ -337,9 +330,8 @@ type BlobPool struct {
stored uint64 // Useful data size of all transactions on disk
limbo *limbo // Persistent data store for the non-finalized blobs
signer types.Signer // Transaction signer to use for sender recovery
chain BlockChain // Chain object to access the state through
cQueue *conversionQueue // The queue for performing legacy sidecar conversion (TODO: remove after Osaka)
signer types.Signer // Transaction signer to use for sender recovery
chain BlockChain // Chain object to access the state through
head atomic.Pointer[types.Header] // Current head of the chain
state *state.StateDB // Current state at the head of the chain
@ -368,7 +360,6 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo
hasPendingAuth: hasPendingAuth,
signer: types.LatestSigner(chain.Config()),
chain: chain,
cQueue: newConversionQueue(), // Deprecate it after the osaka fork
lookup: newLookup(),
index: make(map[common.Address][]*blobTxMeta),
spent: make(map[common.Address]*uint256.Int),
@ -377,7 +368,12 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo
// Filter returns whether the given transaction can be consumed by the blob pool.
func (p *BlobPool) Filter(tx *types.Transaction) bool {
return tx.Type() == types.BlobTxType
return p.FilterType(tx.Type())
}
// FilterType returns whether the blob pool supports the given transaction type.
func (p *BlobPool) FilterType(kind byte) bool {
return kind == types.BlobTxType
}
// Init sets the gas price needed to keep a transaction in the pool and the chain
@ -414,7 +410,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
p.state = state
// Create new slotter for pre-Osaka blob configuration.
slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
slotter := newSlotter(params.BlobTxMaxBlobs)
// See if we need to migrate the queue blob store after fusaka
slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir)
@ -485,9 +481,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
// Close closes down the underlying persistent store.
func (p *BlobPool) Close() error {
// Terminate the conversion queue
p.cQueue.close()
var errs []error
if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
if err := p.limbo.Close(); err != nil {
@ -885,172 +878,6 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
basefeeGauge.Update(int64(basefee.Uint64()))
blobfeeGauge.Update(int64(blobfee.Uint64()))
p.updateStorageMetrics()
// Perform the conversion logic at the fork boundary
if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) {
// Deep copy all indexed transaction metadata.
var (
ids = make(map[common.Address]map[uint64]uint64)
txs = make(map[common.Address]map[uint64]common.Hash)
)
for sender, list := range p.index {
ids[sender] = make(map[uint64]uint64)
txs[sender] = make(map[uint64]common.Hash)
for _, m := range list {
ids[sender][m.nonce] = m.id
txs[sender][m.nonce] = m.hash
}
}
// Initiate the background conversion thread.
p.cQueue.launchBillyConversion(func() {
p.convertLegacySidecars(ids, txs)
})
}
}
// compareAndSwap checks if the specified transaction is still tracked in the pool
// and replace the metadata accordingly. It should only be used in the fork boundary
// bulk conversion. If it fails for some reason, the subsequent txs won't be dropped
// for simplicity which we assume it's very likely to happen.
//
// The returned flag indicates whether the replacement succeeded.
func (p *BlobPool) compareAndSwap(address common.Address, hash common.Hash, blob []byte, oldID uint64, oldStorageSize uint32) bool {
p.lock.Lock()
defer p.lock.Unlock()
newId, err := p.store.Put(blob)
if err != nil {
log.Error("Failed to store transaction", "hash", hash, "err", err)
return false
}
newSize := uint64(len(blob))
newStorageSize := p.store.Size(newId)
// Terminate the procedure if the transaction was already evicted. The
// newly added blob should be removed before return.
if !p.lookup.update(hash, newId, newSize) {
if derr := p.store.Delete(newId); derr != nil {
log.Error("Failed to delete the dangling blob tx", "err", derr)
} else {
log.Warn("Deleted the dangling blob tx", "id", newId)
}
return false
}
// Update the metadata of blob transaction
for _, meta := range p.index[address] {
if meta.hash == hash {
meta.id = newId
meta.version = types.BlobSidecarVersion1
meta.storageSize = newStorageSize
meta.size = newSize
p.stored += uint64(newStorageSize)
p.stored -= uint64(oldStorageSize)
break
}
}
if err := p.store.Delete(oldID); err != nil {
log.Error("Failed to delete the legacy transaction", "hash", hash, "id", oldID, "err", err)
}
return true
}
// convertLegacySidecar fetches transaction data from the store, performs an
// on-the-fly conversion. This function is intended for use only during the
// Osaka fork transition period.
//
// The returned flag indicates whether the replacement succeeds or not.
func (p *BlobPool) convertLegacySidecar(sender common.Address, hash common.Hash, id uint64) bool {
start := time.Now()
// Retrieves the legacy blob transaction from the underlying store with
// read lock held, preventing any potential data race around the slot
// specified by the id.
p.lock.RLock()
data, err := p.store.Get(id)
if err != nil {
p.lock.RUnlock()
// The transaction may have been evicted simultaneously, safe to skip conversion.
log.Debug("Blob transaction is missing", "hash", hash, "id", id, "err", err)
return false
}
oldStorageSize := p.store.Size(id)
p.lock.RUnlock()
// Decode the transaction, the failure is not expected and report the error
// loudly if possible. If the blob transaction in this slot is corrupted,
// leave it in the store, it will be dropped during the next pool
// initialization.
var tx types.Transaction
if err = rlp.DecodeBytes(data, &tx); err != nil {
log.Error("Blob transaction is corrupted", "hash", hash, "id", id, "err", err)
return false
}
// Skip conversion if the transaction does not match the expected hash, or if it was
// already converted. This can occur if the original transaction was evicted from the
// pool and the slot was reused by a new one.
if tx.Hash() != hash {
log.Warn("Blob transaction was replaced", "hash", hash, "id", id, "stored", tx.Hash())
return false
}
sc := tx.BlobTxSidecar()
if sc.Version >= types.BlobSidecarVersion1 {
log.Debug("Skipping conversion of blob tx", "hash", hash, "id", id)
return false
}
// Perform the sidecar conversion, the failure is not expected and report the error
// loudly if possible.
if err := tx.BlobTxSidecar().ToV1(); err != nil {
log.Error("Failed to convert blob transaction", "hash", hash, "err", err)
return false
}
// Encode the converted transaction, the failure is not expected and report
// the error loudly if possible.
blob, err := rlp.EncodeToBytes(&tx)
if err != nil {
log.Error("Failed to encode blob transaction", "hash", tx.Hash(), "err", err)
return false
}
// Replace the legacy blob transaction with the converted format.
if !p.compareAndSwap(sender, hash, blob, id, oldStorageSize) {
log.Error("Failed to replace the legacy transaction", "hash", hash)
return false
}
log.Debug("Converted legacy blob transaction", "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
return true
}
// convertLegacySidecars converts all given transactions to sidecar version 1.
//
// If any of them fails to be converted, the subsequent transactions will still
// be processed, as we assume the failure is very unlikely to happen. If happens,
// these transactions will be stuck in the pool until eviction.
func (p *BlobPool) convertLegacySidecars(ids map[common.Address]map[uint64]uint64, txs map[common.Address]map[uint64]common.Hash) {
var (
start = time.Now()
success int
failure int
)
for addr, list := range txs {
// Transactions evicted from the pool must be contiguous, if in any case,
// the transactions are gapped with each other, they will be discarded.
nonces := slices.Collect(maps.Keys(list))
slices.Sort(nonces)
// Convert the txs with nonce order
for _, nonce := range nonces {
if p.convertLegacySidecar(addr, list[nonce], ids[addr][nonce]) {
success++
} else {
failure++
}
}
}
log.Info("Completed blob transaction conversion", "discarded", failure, "injected", success, "elapsed", common.PrettyDuration(time.Since(start)))
}
// reorg assembles all the transactors and missing transactions between an old
@ -1530,8 +1357,8 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
//
// The version argument specifies the type of proofs to return, either the
// blob proofs (version 0) or the cell proofs (version 1). Proofs conversion is
// CPU intensive, so only done if explicitly requested with the convert flag.
func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
// CPU intensive and prohibited in the blobpool explicitly.
func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
var (
blobs = make([]*kzg4844.Blob, len(vhashes))
commitments = make([]kzg4844.Commitment, len(vhashes))
@ -1582,7 +1409,7 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) (
}
// Mark hash as seen.
filled[hash] = struct{}{}
if sidecar.Version != version && !convert {
if sidecar.Version != version {
// Skip blobs with incompatible version. Note we still track the blob hash
// in `filled` here, ensuring that we do not resolve this tx another time.
continue
@ -1591,29 +1418,13 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) (
var pf []kzg4844.Proof
switch version {
case types.BlobSidecarVersion0:
if sidecar.Version == types.BlobSidecarVersion0 {
pf = []kzg4844.Proof{sidecar.Proofs[i]}
} else {
proof, err := kzg4844.ComputeBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i])
if err != nil {
return nil, nil, nil, err
}
pf = []kzg4844.Proof{proof}
}
pf = []kzg4844.Proof{sidecar.Proofs[i]}
case types.BlobSidecarVersion1:
if sidecar.Version == types.BlobSidecarVersion0 {
cellProofs, err := kzg4844.ComputeCellProofs(&sidecar.Blobs[i])
if err != nil {
return nil, nil, nil, err
}
pf = cellProofs
} else {
cellProofs, err := sidecar.CellProofsAt(i)
if err != nil {
return nil, nil, nil, err
}
pf = cellProofs
cellProofs, err := sidecar.CellProofsAt(i)
if err != nil {
return nil, nil, nil, err
}
pf = cellProofs
}
for _, index := range list {
blobs[index] = &sidecar.Blobs[i]
@ -1640,56 +1451,15 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
return available
}
// preCheck performs the static validation upon the provided tx and converts
// the legacy sidecars if Osaka fork has been activated with a short time window.
//
// This function is pure static and lock free.
func (p *BlobPool) preCheck(tx *types.Transaction) error {
var (
head = p.head.Load()
isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time)
deadline time.Time
)
if isOsaka {
deadline = time.Unix(int64(*p.chain.Config().OsakaTime), 0).Add(conversionTimeWindow)
}
// Validate the transaction statically at first to avoid unnecessary
// conversion. This step doesn't require lock protection.
if err := p.ValidateTxBasics(tx); err != nil {
return err
}
// Before the Osaka fork, reject the blob txs with cell proofs
if !isOsaka {
if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 {
return nil
} else {
return errors.New("cell proof is not supported yet")
}
}
// After the Osaka fork, reject the legacy blob txs if the conversion
// time window is passed.
if tx.BlobTxSidecar().Version == types.BlobSidecarVersion1 {
return nil
}
if head.Time > uint64(deadline.Unix()) {
return errors.New("legacy blob tx is not supported")
}
// Convert the legacy sidecar after Osaka fork. This could be a long
// procedure which takes a few seconds, even minutes if there is a long
// queue. Fortunately it will only block the routine of the source peer
// announcing the tx, without affecting other parts.
return p.cQueue.convert(tx)
}
// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions).
func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var (
errs []error = make([]error, len(txs))
adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs))
adds = make([]*types.Transaction, 0, len(txs))
)
for i, tx := range txs {
if errs[i] = p.preCheck(tx); errs[i] != nil {
if errs[i] = p.ValidateTxBasics(tx); errs[i] != nil {
continue
}
if errs[i] = p.add(tx); errs[i] == nil {

View file

@ -92,10 +92,6 @@ type testBlockChain struct {
blockTime *uint64
}
func (bc *testBlockChain) setHeadTime(time uint64) {
bc.blockTime = &time
}
func (bc *testBlockChain) Config() *params.ChainConfig {
return bc.config
}
@ -433,11 +429,11 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
hashes = append(hashes, tx.vhashes...)
}
}
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0, false)
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
if err != nil {
t.Fatal(err)
}
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1, false)
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1)
if err != nil {
t.Fatal(err)
}
@ -1329,7 +1325,7 @@ func TestBlobCountLimit(t *testing.T) {
// Check that first succeeds second fails.
if errs[0] != nil {
t.Fatalf("expected tx with 7 blobs to succeed")
t.Fatalf("expected tx with 7 blobs to succeed, got %v", errs[0])
}
if !errors.Is(errs[1], txpool.ErrTxBlobLimitExceeded) {
t.Fatalf("expected tx with 8 blobs to fail, got: %v", errs[1])
@ -1806,66 +1802,6 @@ func TestAdd(t *testing.T) {
}
}
// Tests that transactions with legacy sidecars are accepted within the
// conversion window but rejected after it has passed.
func TestAddLegacyBlobTx(t *testing.T) {
testAddLegacyBlobTx(t, true) // conversion window has not yet passed
testAddLegacyBlobTx(t, false) // conversion window passed
}
func testAddLegacyBlobTx(t *testing.T, accept bool) {
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
)
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true, false)
chain := &testBlockChain{
config: params.MergedTestChainConfig,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
}
var timeDiff uint64
if accept {
timeDiff = uint64(conversionTimeWindow.Seconds()) - 1
} else {
timeDiff = uint64(conversionTimeWindow.Seconds()) + 1
}
time := *params.MergedTestChainConfig.OsakaTime + timeDiff
chain.setHeadTime(time)
pool := New(Config{Datadir: t.TempDir()}, chain, nil)
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
// Attempt to add legacy blob transactions.
var (
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0)
txs = []*types.Transaction{tx1, tx2}
)
errs := pool.Add(txs, true)
for _, err := range errs {
if accept && err != nil {
t.Fatalf("expected tx add to succeed, %v", err)
}
if !accept && err == nil {
t.Fatal("expected tx add to fail")
}
}
verifyPoolInternals(t, pool)
pool.Close()
}
func TestGetBlobs(t *testing.T) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
@ -1952,7 +1888,6 @@ func TestGetBlobs(t *testing.T) {
limit int
fillRandom bool // Whether to randomly fill some of the requested blobs with unknowns
version byte // Blob sidecar version to request
convert bool // Whether to convert version on retrieval
}{
{
start: 0, limit: 6,
@ -2018,11 +1953,6 @@ func TestGetBlobs(t *testing.T) {
start: 0, limit: 18, fillRandom: true,
version: types.BlobSidecarVersion1,
},
{
start: 0, limit: 18, fillRandom: true,
version: types.BlobSidecarVersion1,
convert: true, // Convert some version 0 blobs to version 1 while retrieving
},
}
for i, c := range cases {
var (
@ -2044,7 +1974,7 @@ func TestGetBlobs(t *testing.T) {
filled[len(vhashes)] = struct{}{}
vhashes = append(vhashes, testrand.Hash())
}
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version, c.convert)
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version)
if err != nil {
t.Errorf("Unexpected error for case %d, %v", i, err)
}
@ -2070,8 +2000,7 @@ func TestGetBlobs(t *testing.T) {
// If an item is missing, but shouldn't, error
if blobs[j] == nil || proofs[j] == nil {
// This is only an error if there was no version mismatch
if c.convert ||
(c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) ||
if (c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) ||
(c.version == types.BlobSidecarVersion0 && (testBlobIndex < 6 || 12 <= testBlobIndex)) {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
}
@ -2098,185 +2027,6 @@ func TestGetBlobs(t *testing.T) {
pool.Close()
}
// TestSidecarConversion will verify that after the Osaka fork, all legacy
// sidecars in the pool are successfully convert to v1 sidecars.
func TestSidecarConversion(t *testing.T) {
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
var (
preOsakaTxs = make(types.Transactions, 10)
postOsakaTxs = make(types.Transactions, 3)
keys = make([]*ecdsa.PrivateKey, len(preOsakaTxs)+len(postOsakaTxs))
addrs = make([]common.Address, len(preOsakaTxs)+len(postOsakaTxs))
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
)
for i := range keys {
keys[i], _ = crypto.GenerateKey()
addrs[i] = crypto.PubkeyToAddress(keys[i].PublicKey)
statedb.AddBalance(addrs[i], uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
}
for i := range preOsakaTxs {
preOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 2, 0, keys[i], types.BlobSidecarVersion0)
}
for i := range postOsakaTxs {
if i == 0 {
// First has a v0 sidecar.
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion0)
}
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion1)
}
statedb.Commit(0, true, false)
// Test plan:
// 1) Create a bunch v0 sidecar txs and add to pool before Osaka.
// 2) Pass in new Osaka header to activate the conversion thread.
// 3) Continue adding both v0 and v1 transactions to the pool.
// 4) Verify that as additional blocks come in, transactions involved in the
// migration are correctly discarded.
config := &params.ChainConfig{
ChainID: big.NewInt(1),
LondonBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CancunTime: newUint64(0),
PragueTime: newUint64(0),
OsakaTime: newUint64(1),
BlobScheduleConfig: params.DefaultBlobSchedule,
}
chain := &testBlockChain{
config: config,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
blocks: make(map[uint64]*types.Block),
}
// Create 3 blocks:
// - the current block, before Osaka
// - the first block after Osaka
// - another post-Osaka block with several transactions in it
header0 := chain.CurrentBlock()
header0.Time = 0
chain.blocks[0] = types.NewBlockWithHeader(header0)
header1 := chain.CurrentBlock()
header1.Number = big.NewInt(1)
header1.Time = 1
chain.blocks[1] = types.NewBlockWithHeader(header1)
header2 := chain.CurrentBlock()
header2.Time = 2
header2.Number = big.NewInt(2)
// Make a copy of one of the pre-Osaka transactions and convert it to v1 here
// so that we can add it to the pool later and ensure a duplicate is not added
// by the conversion queue.
tx := preOsakaTxs[len(preOsakaTxs)-1]
sc := *tx.BlobTxSidecar() // copy sidecar
sc.ToV1()
tx.WithBlobTxSidecar(&sc)
block2 := types.NewBlockWithHeader(header2).WithBody(types.Body{Transactions: append(postOsakaTxs, tx)})
chain.blocks[2] = block2
pool := New(Config{Datadir: storage}, chain, nil)
if err := pool.Init(1, header0, newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
errs := pool.Add(preOsakaTxs, true)
for i, err := range errs {
if err != nil {
t.Errorf("failed to insert blob tx from %s: %s", addrs[i], errs[i])
}
}
// Kick off migration.
pool.Reset(header0, header1)
// Add the v0 sidecar tx, but don't block so we can keep doing other stuff
// while it converts the sidecar.
addDone := make(chan struct{})
go func() {
pool.Add(types.Transactions{postOsakaTxs[0]}, false)
close(addDone)
}()
// Add the post-Osaka v1 sidecar txs.
errs = pool.Add(postOsakaTxs[1:], false)
for _, err := range errs {
if err != nil {
t.Fatalf("expected tx add to succeed: %v", err)
}
}
// Wait for the first tx's conversion to complete, then check that all
// transactions added after Osaka can be accounted for in the pool.
<-addDone
pending := pool.Pending(txpool.PendingFilter{BlobTxs: true, BlobVersion: types.BlobSidecarVersion1})
for _, tx := range postOsakaTxs {
from, _ := pool.signer.Sender(tx)
if len(pending[from]) != 1 || pending[from][0].Hash != tx.Hash() {
t.Fatalf("expected post-Osaka txs to be pending")
}
}
// Now update the pool with the next block. This should cause the pool to
// clear out the post-Osaka txs since they were included in block 2. Since the
// test blockchain doesn't manage nonces, we'll just do that manually before
// the reset is called. Don't forget about the pre-Osaka transaction we also
// added to block 2!
for i := range postOsakaTxs {
statedb.SetNonce(addrs[len(preOsakaTxs)+i], 1, tracing.NonceChangeEoACall)
}
statedb.SetNonce(addrs[len(preOsakaTxs)-1], 1, tracing.NonceChangeEoACall)
pool.Reset(header1, block2.Header())
// Now verify no post-Osaka transactions are tracked by the pool.
for i, tx := range postOsakaTxs {
if pool.Get(tx.Hash()) != nil {
t.Fatalf("expected txs added post-osaka to have been placed in limbo due to inclusion in a block: index %d, hash %s", i, tx.Hash())
}
}
// Wait for the pool migration to complete.
<-pool.cQueue.anyBillyConversionDone
// Verify all transactions in the pool were converted and verify the
// subsequent cell proofs.
count, _ := pool.Stats()
if count != len(preOsakaTxs)-1 {
t.Errorf("expected pending count to match initial tx count: pending=%d, expected=%d", count, len(preOsakaTxs)-1)
}
for addr, acc := range pool.index {
for _, m := range acc {
if m.version != types.BlobSidecarVersion1 {
t.Errorf("expected sidecar to have been converted: from %s, hash %s", addr, m.hash)
}
tx := pool.Get(m.hash)
if tx == nil {
t.Errorf("failed to get tx by hash: %s", m.hash)
}
sc := tx.BlobTxSidecar()
if err := kzg4844.VerifyCellProofs(sc.Blobs, sc.Commitments, sc.Proofs); err != nil {
t.Errorf("failed to verify cell proofs for tx %s after conversion: %s", m.hash, err)
}
}
}
verifyPoolInternals(t, pool)
// Launch conversion a second time.
// This is just a sanity check to ensure we can handle it.
pool.Reset(header0, header1)
pool.Close()
}
// fakeBilly is a billy.Database implementation which just drops data on the floor.
type fakeBilly struct {
billy.Database
@ -2360,5 +2110,3 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
}
}
}
func newUint64(val uint64) *uint64 { return &val }

View file

@ -1,218 +0,0 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blobpool
import (
"errors"
"slices"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// maxPendingConversionTasks caps the number of pending conversion tasks. This
// prevents excessive memory usage; the worst-case scenario (2k transactions
// with 6 blobs each) would consume approximately 1.5GB of memory.
const maxPendingConversionTasks = 2048
// txConvert represents a conversion task with an attached legacy blob transaction.
type txConvert struct {
tx *types.Transaction // Legacy blob transaction
done chan error // Channel for signaling back if the conversion succeeds
}
// conversionQueue is a dedicated queue for converting legacy blob transactions
// received from the network after the Osaka fork. Since conversion is expensive,
// it is performed in the background by a single thread, ensuring the main Geth
// process is not overloaded.
type conversionQueue struct {
tasks chan *txConvert
startBilly chan func()
quit chan struct{}
closed chan struct{}
billyQueue []func()
billyTaskDone chan struct{}
// This channel will be closed when the first billy conversion finishes.
// It's added for unit tests to synchronize with the conversion progress.
anyBillyConversionDone chan struct{}
}
// newConversionQueue constructs the conversion queue.
func newConversionQueue() *conversionQueue {
q := &conversionQueue{
tasks: make(chan *txConvert),
startBilly: make(chan func()),
quit: make(chan struct{}),
closed: make(chan struct{}),
anyBillyConversionDone: make(chan struct{}),
}
go q.loop()
return q
}
// convert accepts a legacy blob transaction with version-0 blobs and queues it
// for conversion.
//
// This function may block for a long time until the transaction is processed.
func (q *conversionQueue) convert(tx *types.Transaction) error {
done := make(chan error, 1)
select {
case q.tasks <- &txConvert{tx: tx, done: done}:
return <-done
case <-q.closed:
return errors.New("conversion queue closed")
}
}
// launchBillyConversion starts a conversion task in the background.
func (q *conversionQueue) launchBillyConversion(fn func()) error {
select {
case q.startBilly <- fn:
return nil
case <-q.closed:
return errors.New("conversion queue closed")
}
}
// close terminates the conversion queue.
func (q *conversionQueue) close() {
select {
case <-q.closed:
return
default:
close(q.quit)
<-q.closed
}
}
// run converts a batch of legacy blob txs to the new cell proof format.
func (q *conversionQueue) run(tasks []*txConvert, done chan struct{}, interrupt *atomic.Int32) {
defer close(done)
for _, t := range tasks {
if interrupt != nil && interrupt.Load() != 0 {
t.done <- errors.New("conversion is interrupted")
continue
}
sidecar := t.tx.BlobTxSidecar()
if sidecar == nil {
t.done <- errors.New("tx without sidecar")
continue
}
// Run the conversion, the original sidecar will be mutated in place
start := time.Now()
err := sidecar.ToV1()
t.done <- err
log.Trace("Converted legacy blob tx", "hash", t.tx.Hash(), "err", err, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func (q *conversionQueue) loop() {
defer close(q.closed)
var (
done chan struct{} // Non-nil if background routine is active
interrupt *atomic.Int32 // Flag to signal conversion interruption
// The pending tasks for sidecar conversion. We assume the number of legacy
// blob transactions requiring conversion will not be excessive. However,
// a hard cap is applied as a protective measure.
txTasks []*txConvert
firstBilly = true
)
for {
select {
case t := <-q.tasks:
if len(txTasks) >= maxPendingConversionTasks {
t.done <- errors.New("conversion queue is overloaded")
continue
}
txTasks = append(txTasks, t)
// Launch the background conversion thread if it's idle
if done == nil {
done, interrupt = make(chan struct{}), new(atomic.Int32)
tasks := slices.Clone(txTasks)
txTasks = txTasks[:0]
go q.run(tasks, done, interrupt)
}
case <-done:
done, interrupt = nil, nil
if len(txTasks) > 0 {
done, interrupt = make(chan struct{}), new(atomic.Int32)
tasks := slices.Clone(txTasks)
txTasks = txTasks[:0]
go q.run(tasks, done, interrupt)
}
case fn := <-q.startBilly:
q.billyQueue = append(q.billyQueue, fn)
q.runNextBillyTask()
case <-q.billyTaskDone:
if firstBilly {
close(q.anyBillyConversionDone)
firstBilly = false
}
q.runNextBillyTask()
case <-q.quit:
if done != nil {
log.Debug("Waiting for blob proof conversion to exit")
interrupt.Store(1)
<-done
}
if q.billyTaskDone != nil {
log.Debug("Waiting for blobpool billy conversion to exit")
<-q.billyTaskDone
}
// Signal any tasks that were queued for the next batch but never started
// so callers blocked in convert() receive an error instead of hanging.
for _, t := range txTasks {
// Best-effort notify; t.done is a buffered channel of size 1
// created by convert(), and we send exactly once per task.
t.done <- errors.New("conversion queue closed")
}
// Drop references to allow GC of the backing array.
txTasks = txTasks[:0]
return
}
}
}
func (q *conversionQueue) runNextBillyTask() {
if len(q.billyQueue) == 0 {
q.billyTaskDone = nil
return
}
fn := q.billyQueue[0]
q.billyQueue = append(q.billyQueue[:0], q.billyQueue[1:]...)
done := make(chan struct{})
go func() { defer close(done); fn() }()
q.billyTaskDone = done
}

View file

@ -1,171 +0,0 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blobpool
import (
"crypto/ecdsa"
"crypto/sha256"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
// createV1BlobTx creates a blob transaction with version 1 sidecar for testing.
func createV1BlobTx(nonce uint64, key *ecdsa.PrivateKey) *types.Transaction {
blob := &kzg4844.Blob{byte(nonce)}
commitment, _ := kzg4844.BlobToCommitment(blob)
cellProofs, _ := kzg4844.ComputeCellProofs(blob)
blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
Nonce: nonce,
GasTipCap: uint256.NewInt(1),
GasFeeCap: uint256.NewInt(1000),
Gas: 21000,
BlobFeeCap: uint256.NewInt(100),
BlobHashes: []common.Hash{kzg4844.CalcBlobHashV1(sha256.New(), &commitment)},
Value: uint256.NewInt(100),
Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion1, []kzg4844.Blob{*blob}, []kzg4844.Commitment{commitment}, cellProofs),
}
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
func TestConversionQueueBasic(t *testing.T) {
queue := newConversionQueue()
defer queue.close()
key, _ := crypto.GenerateKey()
tx := makeTx(0, 1, 1, 1, key)
if err := queue.convert(tx); err != nil {
t.Fatalf("Expected successful conversion, got error: %v", err)
}
if tx.BlobTxSidecar().Version != types.BlobSidecarVersion1 {
t.Errorf("Expected sidecar version to be %d, got %d", types.BlobSidecarVersion1, tx.BlobTxSidecar().Version)
}
}
func TestConversionQueueV1BlobTx(t *testing.T) {
queue := newConversionQueue()
defer queue.close()
key, _ := crypto.GenerateKey()
tx := createV1BlobTx(0, key)
version := tx.BlobTxSidecar().Version
err := queue.convert(tx)
if err != nil {
t.Fatalf("Expected successful conversion, got error: %v", err)
}
if tx.BlobTxSidecar().Version != version {
t.Errorf("Expected sidecar version to remain %d, got %d", version, tx.BlobTxSidecar().Version)
}
}
func TestConversionQueueClosed(t *testing.T) {
queue := newConversionQueue()
// Close the queue first
queue.close()
key, _ := crypto.GenerateKey()
tx := makeTx(0, 1, 1, 1, key)
err := queue.convert(tx)
if err == nil {
t.Fatal("Expected error when converting on closed queue, got nil")
}
}
func TestConversionQueueDoubleClose(t *testing.T) {
queue := newConversionQueue()
queue.close()
queue.close() // Should not panic
}
func TestConversionQueueAutoRestartBatch(t *testing.T) {
queue := newConversionQueue()
defer queue.close()
key, _ := crypto.GenerateKey()
// Create a heavy transaction to ensure the first batch runs long enough
// for subsequent tasks to be queued while it is active.
heavy := makeMultiBlobTx(0, 1, 1, 1, int(params.BlobTxMaxBlobs), 0, key, types.BlobSidecarVersion0)
var wg sync.WaitGroup
wg.Add(1)
heavyDone := make(chan error, 1)
go func() {
defer wg.Done()
heavyDone <- queue.convert(heavy)
}()
// Give the conversion worker a head start so that the following tasks are
// enqueued while the first batch is running.
time.Sleep(200 * time.Millisecond)
tx1 := makeTx(1, 1, 1, 1, key)
tx2 := makeTx(2, 1, 1, 1, key)
wg.Add(2)
done1 := make(chan error, 1)
done2 := make(chan error, 1)
go func() { defer wg.Done(); done1 <- queue.convert(tx1) }()
go func() { defer wg.Done(); done2 <- queue.convert(tx2) }()
select {
case err := <-done1:
if err != nil {
t.Fatalf("tx1 conversion error: %v", err)
}
case <-time.After(30 * time.Second):
t.Fatal("timeout waiting for tx1 conversion")
}
select {
case err := <-done2:
if err != nil {
t.Fatalf("tx2 conversion error: %v", err)
}
case <-time.After(30 * time.Second):
t.Fatal("timeout waiting for tx2 conversion")
}
select {
case err := <-heavyDone:
if err != nil {
t.Fatalf("heavy conversion error: %v", err)
}
case <-time.After(30 * time.Second):
t.Fatal("timeout waiting for heavy conversion")
}
wg.Wait()
if tx1.BlobTxSidecar().Version != types.BlobSidecarVersion1 {
t.Fatalf("tx1 sidecar version mismatch: have %d, want %d", tx1.BlobTxSidecar().Version, types.BlobSidecarVersion1)
}
if tx2.BlobTxSidecar().Version != types.BlobSidecarVersion1 {
t.Fatalf("tx2 sidecar version mismatch: have %d, want %d", tx2.BlobTxSidecar().Version, types.BlobSidecarVersion1)
}
}

View file

@ -20,7 +20,6 @@ import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -57,7 +56,7 @@ func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) {
}
// Create new slotter for pre-Osaka blob configuration.
slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config))
slotter := newSlotter(params.BlobTxMaxBlobs)
// See if we need to migrate the limbo after fusaka.
slotter, err := tryMigrate(config, slotter, datadir)

View file

@ -110,13 +110,3 @@ func (l *lookup) untrack(tx *blobTxMeta) {
}
}
}
// update updates the transaction index. It should only be used in the conversion.
func (l *lookup) update(hash common.Hash, id uint64, size uint64) bool {
meta, exists := l.txIndex[hash]
if !exists {
return false
}
meta.id, meta.size = id, size
return true
}

View file

@ -17,7 +17,6 @@
package blobpool
import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/billy"
)
@ -42,7 +41,7 @@ func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir st
// If the version found is less than the currently configured store version,
// perform a migration then write the updated version of the store.
if version < storeVersion {
newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
newSlotter := newSlotterEIP7594(params.BlobTxMaxBlobs)
if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil {
return nil, err
}
@ -54,7 +53,7 @@ func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir st
store.Close()
}
// Set the slotter to the format now that the Osaka is active.
slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
slotter = newSlotterEIP7594(params.BlobTxMaxBlobs)
}
return slotter, nil
}

View file

@ -288,7 +288,12 @@ func New(config Config, chain BlockChain) *LegacyPool {
// Filter returns whether the given transaction can be consumed by the legacy
// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction.
func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
switch tx.Type() {
return pool.FilterType(tx.Type())
}
// FilterType returns whether the legacy pool supports the given transaction type.
func (pool *LegacyPool) FilterType(kind byte) bool {
switch kind {
case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.SetCodeTxType:
return true
default:

View file

@ -100,6 +100,9 @@ type SubPool interface {
// to this particular subpool.
Filter(tx *types.Transaction) bool
// FilterType returns whether the subpool supports the given transaction type.
FilterType(kind byte) bool
// Init sets the base parameters of the subpool, allowing it to load any saved
// transactions from disk and also permitting internal maintenance routines to
// start up.

View file

@ -489,3 +489,14 @@ func (p *TxPool) Clear() {
subpool.Clear()
}
}
// FilterType returns whether a transaction with the given type is supported
// (can be added) by the pool.
func (p *TxPool) FilterType(kind byte) bool {
for _, subpool := range p.subpools {
if subpool.FilterType(kind) {
return true
}
}
return false
}

View file

@ -130,7 +130,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
return fmt.Errorf("%w: gas %v, minimum needed %v", core.ErrIntrinsicGas, tx.Gas(), intrGas)
}
// Ensure the transaction can cover floor data gas.
if opts.Config.IsPrague(head.Number, head.Time) {
if rules.IsPrague {
floorDataGas, err := core.FloorDataGas(tx.Data())
if err != nil {
return err
@ -160,6 +160,15 @@ func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationO
if sidecar == nil {
return errors.New("missing sidecar in blob transaction")
}
// Ensure the sidecar is constructed with the correct version, consistent
// with the current fork.
version := types.BlobSidecarVersion0
if opts.Config.IsOsaka(head.Number, head.Time) {
version = types.BlobSidecarVersion1
}
if sidecar.Version != version {
return fmt.Errorf("unexpected sidecar version, want: %d, got: %d", version, sidecar.Version)
}
// Ensure the blob fee cap satisfies the minimum blob gas price
if tx.BlobGasFeeCapIntCmp(blobTxMinBlobGasPrice) < 0 {
return fmt.Errorf("%w: blob fee cap %v, minimum needed %v", ErrTxGasPriceTooLow, tx.BlobGasFeeCap(), blobTxMinBlobGasPrice)

View file

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-verkle"
)
// A BlockNonce is a 64-bit hash which proves (combined with the
@ -61,13 +60,6 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
// ExecutionWitness represents the witness + proof used in a verkle context,
// to provide the ability to execute a block statelessly.
type ExecutionWitness struct {
StateDiff verkle.StateDiff `json:"stateDiff"`
VerkleProof *verkle.VerkleProof `json:"verkleProof"`
}
//go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
//go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go
@ -209,11 +201,6 @@ type Block struct {
transactions Transactions
withdrawals Withdrawals
// witness is not an encoded part of the block body.
// It is held in Block in order for easy relaying to the places
// that process it.
witness *ExecutionWitness
// caches
hash atomic.Pointer[common.Hash]
size atomic.Uint64
@ -429,9 +416,6 @@ func (b *Block) BlobGasUsed() *uint64 {
return blobGasUsed
}
// ExecutionWitness returns the verkle execution witneess + proof for a block
func (b *Block) ExecutionWitness() *ExecutionWitness { return b.witness }
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value.
func (b *Block) Size() uint64 {
@ -494,7 +478,6 @@ func (b *Block) WithSeal(header *Header) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
witness: b.witness,
}
}
@ -506,7 +489,6 @@ func (b *Block) WithBody(body Body) *Block {
transactions: slices.Clone(body.Transactions),
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
witness: b.witness,
}
for i := range body.Uncles {
block.uncles[i] = CopyHeader(body.Uncles[i])
@ -514,16 +496,6 @@ func (b *Block) WithBody(body Body) *Block {
return block
}
func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
return &Block{
header: b.header,
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
witness: witness,
}
}
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {

View file

@ -214,7 +214,7 @@ func (evm *EVM) SetJumpDestCache(jumpDests JumpDestCache) {
// This is not threadsafe and should only be done very cautiously.
func (evm *EVM) SetTxContext(txCtx TxContext) {
if evm.chainRules.IsEIP4762 {
txCtx.AccessEvents = state.NewAccessEvents(evm.StateDB.PointCache())
txCtx.AccessEvents = state.NewAccessEvents()
}
evm.TxContext = txCtx
}

View file

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
@ -84,9 +83,6 @@ type StateDB interface {
// even if the feature/fork is not active yet
AddSlotToAccessList(addr common.Address, slot common.Hash)
// PointCache returns the point cache used in computations
PointCache() *utils.PointCache
Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList)
RevertToSnapshot(int)

View file

@ -56,7 +56,7 @@ func Fuzz(data []byte) int {
fmt.Printf("untrusted: %v\n", untrustedComment)
fmt.Printf("trusted: %v\n", trustedComment)
err = SignifySignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, untrustedComment, trustedComment)
err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, untrustedComment, trustedComment)
if err != nil {
panic(err)
}
@ -68,7 +68,7 @@ func Fuzz(data []byte) int {
signify = path
}
_, err := exec.LookPath(signify)
_, err = exec.LookPath(signify)
if err != nil {
panic(err)
}

View file

@ -36,7 +36,6 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
@ -81,20 +80,6 @@ const (
beaconUpdateWarnFrequency = 5 * time.Minute
)
var (
// Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
// Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
// Number of times getBlobsV2 responded with “hit”
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
// Number of times getBlobsV2 responded with “miss”
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
)
type ConsensusAPI struct {
eth *eth.Ethereum
@ -137,6 +122,9 @@ type ConsensusAPI struct {
// NewConsensusAPI creates a new consensus api for the given backend.
// The underlying blockchain needs to have a valid terminal total difficulty set.
//
// This function creates a long-lived object with an attached background thread.
// For testing or other short-term use cases, please use newConsensusAPIWithoutHeartbeat.
func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
api := newConsensusAPIWithoutHeartbeat(eth)
go api.heartbeat()
@ -517,7 +505,7 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
}
blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0, false)
blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0)
if err != nil {
return nil, engine.InvalidParams.With(err)
}
@ -563,7 +551,7 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) {
head := api.eth.BlockChain().CurrentHeader()
if api.config().LatestFork(head.Time) < forks.Osaka {
return nil, unsupportedForkErr("engine_getBlobsV2 is not available before Osaka fork")
return nil, nil
}
if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
@ -578,7 +566,7 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
return nil, nil
}
blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1, false)
blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1)
if err != nil {
return nil, engine.InvalidParams.With(err)
}
@ -757,7 +745,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
return api.delayPayloadImport(block), nil
}
if block.Time() <= parent.Time() {
log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time())
log.Warn("Invalid timestamp", "parent", parent.Time(), "block", block.Time())
return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil
}
// Another corner case: if the node is in snap sync mode, but the CL client
@ -818,7 +806,7 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) engine.PayloadSt
return engine.PayloadStatusV1{Status: engine.SYNCING}
}
// Either no beacon sync was started yet, or it rejected the delivered
// payload as non-integratable on top of the existing sync. We'll just
// payload as non-integrate on top of the existing sync. We'll just
// have to rely on the beacon client to forcefully update the head with
// a forkchoice update request.
if api.eth.Downloader().ConfigSyncMode() == ethconfig.FullSync {
@ -916,8 +904,6 @@ func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.Pa
// heartbeat loops indefinitely, and checks if there have been beacon client updates
// received in the last while. If not - or if they but strange ones - it warns the
// user that something might be off with their consensus node.
//
// TODO(karalabe): Spin this goroutine down somehow
func (api *ConsensusAPI) heartbeat() {
// Sleep a bit on startup since there's obviously no beacon client yet
// attached, so no need to print scary warnings to the user.

33
eth/catalyst/metrics.go Normal file
View file

@ -0,0 +1,33 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package catalyst
import "github.com/ethereum/go-ethereum/metrics"
var (
// Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
// Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
// Number of times getBlobsV2 responded with “hit”
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
// Number of times getBlobsV2 responded with “miss”
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
)

View file

@ -36,7 +36,6 @@ type beaconBackfiller struct {
downloader *Downloader // Downloader to direct via this callback implementation
success func() // Callback to run on successful sync cycle completion
filling bool // Flag whether the downloader is backfilling or not
filled *types.Header // Last header filled by the last terminated sync loop
started chan struct{} // Notification channel whether the downloader inited
lock sync.Mutex // Mutex protecting the sync lock
}
@ -56,12 +55,15 @@ func (b *beaconBackfiller) suspend() *types.Header {
// If no filling is running, don't waste cycles
b.lock.Lock()
filling := b.filling
filled := b.filled
started := b.started
b.lock.Unlock()
if !filling {
return filled // Return the filled header on the previous sync completion
// Sync cycle was inactive, retrieve and return the latest snap block
// as the filled header.
log.Debug("Backfiller was inactive")
return b.downloader.blockchain.CurrentSnapBlock()
}
// A previous filling should be running, though it may happen that it hasn't
// yet started (being done on a new goroutine). Many concurrent beacon head
@ -73,9 +75,9 @@ func (b *beaconBackfiller) suspend() *types.Header {
// Now that we're sure the downloader successfully started up, we can cancel
// it safely without running the risk of data races.
b.downloader.Cancel()
log.Debug("Backfiller has been suspended")
// Sync cycle was just terminated, retrieve and return the last filled header.
// Can't use `filled` as that contains a stale value from before cancellation.
return b.downloader.blockchain.CurrentSnapBlock()
}
@ -86,10 +88,10 @@ func (b *beaconBackfiller) resume() {
// If a previous filling cycle is still running, just ignore this start
// request. // TODO(karalabe): We should make this channel driven
b.lock.Unlock()
log.Debug("Backfiller is running")
return
}
b.filling = true
b.filled = nil
b.started = make(chan struct{})
b.lock.Unlock()
@ -100,7 +102,6 @@ func (b *beaconBackfiller) resume() {
defer func() {
b.lock.Lock()
b.filling = false
b.filled = b.downloader.blockchain.CurrentSnapBlock()
b.lock.Unlock()
}()
// If the downloader fails, report an error as in beacon chain mode there
@ -110,11 +111,13 @@ func (b *beaconBackfiller) resume() {
return
}
// Synchronization succeeded. Since this happens async, notify the outer
// context to disable snap syncing and enable transaction propagation.
// context to enable transaction propagation.
if b.success != nil {
b.success()
}
log.Debug("Backfilling completed")
}()
log.Debug("Backfilling started")
}
// SetBadBlockCallback sets the callback to run when a bad block is hit by the
@ -183,6 +186,8 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
log.Error("Failed to retrieve beacon bounds", "err", err)
return 0, err
}
log.Debug("Searching beacon ancestor", "local", number, "beaconhead", beaconHead.Number, "beacontail", beaconTail.Number)
var linked bool
switch d.getMode() {
case ethconfig.FullSync:
@ -236,6 +241,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
}
start = check
}
log.Debug("Found beacon ancestor", "number", start)
return start, nil
}

View file

@ -193,8 +193,12 @@ type BlockChain interface {
// CurrentSnapBlock retrieves the head snap block from the local chain.
CurrentSnapBlock() *types.Header
// SnapSyncCommitHead directly commits the head block to a certain entity.
SnapSyncCommitHead(common.Hash) error
// SnapSyncStart explicitly notifies the chain that snap sync is scheduled and
// marks chain mutations as disallowed.
SnapSyncStart() error
// SnapSyncComplete directly commits the head block to a certain entity.
SnapSyncComplete(common.Hash) error
// InsertHeadersBeforeCutoff inserts a batch of headers before the configured
// chain cutoff into the ancient store.
@ -244,7 +248,7 @@ func New(stateDb ethdb.Database, mode ethconfig.SyncMode, mux *event.TypeMux, ch
syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(),
}
// Create the post-merge skeleton syncer and start the process
dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success), chain)
go dl.stateFetcher()
return dl
@ -361,28 +365,21 @@ func (d *Downloader) synchronise(beaconPing chan struct{}) (err error) {
if d.notified.CompareAndSwap(false, true) {
log.Info("Block synchronisation started")
}
mode := d.moder.get()
// Obtain the synchronized used in this cycle
mode := d.moder.get(true)
defer func() {
if err == nil && mode == ethconfig.SnapSync {
d.moder.disableSnap()
log.Info("Disabled snap-sync after the initial sync cycle")
}
}()
// Disable chain mutations when snap sync is selected, ensuring the
// downloader is the sole mutator.
if mode == ethconfig.SnapSync {
// Snap sync will directly modify the persistent state, making the entire
// trie database unusable until the state is fully synced. To prevent any
// subsequent state reads, explicitly disable the trie database and state
// syncer is responsible to address and correct any state missing.
if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
if err := d.blockchain.TrieDB().Disable(); err != nil {
return err
}
}
// Snap sync uses the snapshot namespace to store potentially flaky data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean-
// time to prevent access.
if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
snapshots.Disable()
if err := d.blockchain.SnapSyncStart(); err != nil {
return err
}
}
// Reset the queue, peer set and wake channels to clean any internal leftover state
@ -427,7 +424,7 @@ func (d *Downloader) getMode() SyncMode {
// ConfigSyncMode returns the sync mode configured for the node.
// The actual running sync mode can differ from this.
func (d *Downloader) ConfigSyncMode() SyncMode {
return d.moder.get()
return d.moder.get(false)
}
// syncToHead starts a block synchronization based on the hash chain from
@ -1086,7 +1083,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error {
if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []rlp.RawValue{result.Receipts}, d.ancientLimit); err != nil {
return err
}
if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
if err := d.blockchain.SnapSyncComplete(block.Hash()); err != nil {
return err
}
d.committed.Store(true)

View file

@ -64,6 +64,12 @@ var errSyncMerged = errors.New("sync merged")
// should abort and restart with the new state.
var errSyncReorged = errors.New("sync reorged")
// errSyncTrimmed is an internal helper error to signal that the local chain
// has been trimmed (e.g, via debug_setHead explicitly) and the skeleton chain
// is no longer linked with the local chain. In this case, the skeleton sync
// should be re-scheduled again.
var errSyncTrimmed = errors.New("sync trimmed")
// errTerminated is returned if the sync mechanism was terminated for this run of
// the process. This is usually the case when Geth is shutting down and some events
// might still be propagating.
@ -201,6 +207,7 @@ type backfiller interface {
type skeleton struct {
db ethdb.Database // Database backing the skeleton
filler backfiller // Chain syncer suspended/resumed by head events
chain chainReader // Underlying block chain
peers *peerSet // Set of peers we can sync from
idles map[string]*peerConnection // Set of idle peers in the current sync cycle
@ -225,12 +232,19 @@ type skeleton struct {
syncStarting func() // callback triggered after a sync cycle is inited but before started
}
// chainReader wraps the method to retrieve the head of the local chain.
type chainReader interface {
// CurrentSnapBlock retrieves the head snap block from the local chain.
CurrentSnapBlock() *types.Header
}
// newSkeleton creates a new sync skeleton that tracks a potentially dangling
// header chain until it's linked into an existing set of blocks.
func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller) *skeleton {
func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller, chain chainReader) *skeleton {
sk := &skeleton{
db: db,
filler: filler,
chain: chain,
peers: peers,
drop: drop,
requests: make(map[uint64]*headerRequest),
@ -296,6 +310,11 @@ func (s *skeleton) startup() {
// head to force a cleanup.
head = newhead
case err == errSyncTrimmed:
// The skeleton chain is not linked with the local chain anymore,
// restart the sync.
head = nil
case err == errTerminated:
// Sync was requested to be terminated from within, stop and
// return (no need to pass a message, was already done internally)
@ -343,6 +362,29 @@ func (s *skeleton) Sync(head *types.Header, final *types.Header, force bool) err
}
}
// linked returns the flag indicating whether the skeleton has been linked with
// the local chain.
func (s *skeleton) linked(number uint64, hash common.Hash) bool {
linked := rawdb.HasHeader(s.db, hash, number) &&
rawdb.HasBody(s.db, hash, number) &&
rawdb.HasReceipts(s.db, hash, number)
// Ensure the skeleton chain links to the local chain below the chain head.
// This accounts for edge cases where leftover chain segments above the head
// may still link to the skeleton chain. In such cases, synchronization is
// likely to fail due to potentially missing segments in the middle.
//
// You can try to produce the edge case by these steps:
// - sync the chain
// - debug.setHead(`0x1`)
// - kill the geth process (the chain segment will be left with chain head rewound)
// - restart
if s.chain.CurrentSnapBlock() != nil {
linked = linked && s.chain.CurrentSnapBlock().Number.Uint64() >= number
}
return linked
}
// sync is the internal version of Sync that executes a single sync cycle, either
// until some termination condition is reached, or until the current cycle merges
// with a previously aborted run.
@ -367,10 +409,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
// If the sync is already done, resume the backfiller. When the loop stops,
// terminate the backfiller too.
linked := len(s.progress.Subchains) == 1 &&
rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) &&
rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) &&
rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead)
linked := len(s.progress.Subchains) == 1 && s.linked(s.scratchHead, s.progress.Subchains[0].Next)
if linked {
s.filler.resume()
}
@ -486,7 +525,17 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
// is still running, it will pick it up. If it already terminated,
// a new cycle needs to be spun up.
if linked {
s.filler.resume()
if len(s.progress.Subchains) == 1 && s.linked(s.scratchHead, s.progress.Subchains[0].Next) {
// The skeleton chain has been extended and is still linked with the local
// chain, try to re-schedule the backfiller if it's already terminated.
s.filler.resume()
} else {
// The skeleton chain is no longer linked to the local chain for some reason
// (e.g. debug_setHead was used to trim the local chain). Re-schedule the
// skeleton sync to fill the chain gap.
log.Warn("Local chain has been trimmed", "tailnumber", s.scratchHead, "tailhash", s.progress.Subchains[0].Next)
return nil, errSyncTrimmed
}
}
case req := <-requestFails:
@ -649,9 +698,19 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header) error
// Not a noop / double head announce, abort with a reorg
return fmt.Errorf("%w, tail: %d, head: %d, newHead: %d", errChainReorged, lastchain.Tail, lastchain.Head, number)
}
// Terminate the sync if the chain head is gapped
if lastchain.Head+1 < number {
return fmt.Errorf("%w, head: %d, newHead: %d", errChainGapped, lastchain.Head, number)
}
// Ignore the duplicated beacon header announcement
if lastchain.Head == number {
local := rawdb.ReadSkeletonHeader(s.db, number)
if local != nil && local.Hash() == head.Hash() {
log.Debug("Ignored the identical beacon header", "number", number, "hash", local.Hash())
return nil
}
}
// Terminate the sync if the chain head is forked
if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash {
return fmt.Errorf("%w, ancestor: %d, hash: %s, want: %s", errChainForked, number-1, parent.Hash(), head.ParentHash)
}
@ -669,6 +728,7 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header) error
if err := batch.Write(); err != nil {
log.Crit("Failed to write skeleton sync status", "err", err)
}
log.Debug("Extended beacon header chain", "number", head.Number, "hash", head.Hash())
return nil
}
@ -909,6 +969,45 @@ func (s *skeleton) revertRequest(req *headerRequest) {
s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = ""
}
// mergeSubchains is invoked once certain beacon headers have been persisted locally
// and the subchains should be merged in case there are some overlaps between. An
// indicator will be returned if the last subchain is merged with previous subchain.
func (s *skeleton) mergeSubchains() bool {
// If the subchain extended into the next subchain, we need to handle
// the overlap. Since there could be many overlaps, do this in a loop.
var merged bool
for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail {
// Extract some stats from the second subchain
head := s.progress.Subchains[1].Head
tail := s.progress.Subchains[1].Tail
next := s.progress.Subchains[1].Next
// Since we just overwrote part of the next subchain, we need to trim
// its head independent of matching or mismatching content
if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail {
// Fully overwritten, get rid of the subchain as a whole
log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next)
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
continue
} else {
// Partially overwritten, trim the head to the overwritten size
log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next)
s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1
}
// If the old subchain is an extension of the new one, merge the two
// and let the skeleton syncer restart (to clean internal state)
if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next {
log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next)
s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail
s.progress.Subchains[0].Next = s.progress.Subchains[1].Next
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
merged = true
}
}
return merged
}
func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged bool) {
res.peer.log.Trace("Processing header response", "head", res.headers[0].Number, "hash", res.headers[0].Hash(), "count", len(res.headers))
@ -982,10 +1081,9 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
// processing is done, so it's just one more "needless" check.
//
// The weird cascading checks are done to minimize the database reads.
linked = rawdb.HasHeader(s.db, header.ParentHash, header.Number.Uint64()-1) &&
rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) &&
rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1)
linked = s.linked(header.Number.Uint64()-1, header.ParentHash)
if linked {
log.Debug("Primary subchain linked", "number", header.Number.Uint64()-1, "hash", header.ParentHash)
break
}
}
@ -999,6 +1097,9 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
// If the beacon chain was linked to the local chain, completely swap out
// all internal progress and abort header synchronization.
if linked {
// Merge all overlapped subchains beforehand
s.mergeSubchains()
// Linking into the local chain should also mean that there are no
// leftover subchains, but in the case of importing the blocks via
// the engine API, we will not push the subchains forward. This will
@ -1056,41 +1157,10 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
s.scratchHead -= uint64(consumed)
// If the subchain extended into the next subchain, we need to handle
// the overlap. Since there could be many overlaps (come on), do this
// in a loop.
for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail {
// Extract some stats from the second subchain
head := s.progress.Subchains[1].Head
tail := s.progress.Subchains[1].Tail
next := s.progress.Subchains[1].Next
// Since we just overwrote part of the next subchain, we need to trim
// its head independent of matching or mismatching content
if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail {
// Fully overwritten, get rid of the subchain as a whole
log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next)
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
continue
} else {
// Partially overwritten, trim the head to the overwritten size
log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next)
s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1
}
// If the old subchain is an extension of the new one, merge the two
// and let the skeleton syncer restart (to clean internal state)
if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next {
log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next)
s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail
s.progress.Subchains[0].Next = s.progress.Subchains[1].Next
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
merged = true
}
}
// If subchains were merged, all further available headers in the scratch
// space are invalid since we skipped ahead. Stop processing the scratch
// space to avoid dropping peers thinking they delivered invalid data.
merged = s.mergeSubchains()
if merged {
break
}
@ -1121,15 +1191,17 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
// due to the downloader backfilling past the tracked tail.
func (s *skeleton) cleanStales(filled *types.Header) error {
number := filled.Number.Uint64()
log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash())
log.Debug("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash())
// If the filled header is below the linked subchain, something's corrupted
// internally. Report and error and refuse to do anything.
// If the filled header is below the subchain, it means the skeleton is not
// linked with local chain yet, don't bother to do cleanup.
if number+1 < s.progress.Subchains[0].Tail {
return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail)
log.Debug("filled header below beacon header tail", "filled", number, "tail", s.progress.Subchains[0].Tail)
return nil
}
// If nothing in subchain is filled, don't bother to do cleanup.
if number+1 == s.progress.Subchains[0].Tail {
log.Debug("Skeleton chain not yet consumed", "filled", number, "hash", filled.Hash(), "tail", s.progress.Subchains[0].Tail)
return nil
}
// If the latest fill was on a different subchain, it means the backfiller
@ -1206,6 +1278,7 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
if err := batch.Write(); err != nil {
log.Crit("Failed to write beacon trim data", "err", err)
}
log.Debug("Cleaned stale beacon headers", "start", start, "end", end)
return nil
}

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"sync/atomic"
"testing"
@ -71,6 +72,12 @@ func (hf *hookedBackfiller) resume() {
}
}
type fakeChainReader struct{}
func (fc *fakeChainReader) CurrentSnapBlock() *types.Header {
return &types.Header{Number: big.NewInt(math.MaxInt64)}
}
// skeletonTestPeer is a mock peer that can only serve header requests from a
// pre-perated header chain (which may be arbitrarily wrong for testing).
//
@ -369,7 +376,7 @@ func TestSkeletonSyncInit(t *testing.T) {
// Create a skeleton sync and run a cycle
wait := make(chan struct{})
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller(), &fakeChainReader{})
skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, nil, true)
@ -472,7 +479,7 @@ func TestSkeletonSyncExtend(t *testing.T) {
// Create a skeleton sync and run a cycle
wait := make(chan struct{})
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller(), &fakeChainReader{})
skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, nil, true)
@ -885,7 +892,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
}
}
// Create a skeleton sync and run a cycle
skeleton := newSkeleton(db, peerset, drop, filler)
skeleton := newSkeleton(db, peerset, drop, filler, &fakeChainReader{})
skeleton.Sync(tt.head, nil, true)
// Wait a bit (bleah) for the initial sync loop to go to idle. This might

View file

@ -75,7 +75,7 @@ func newSyncModer(mode ethconfig.SyncMode, chain BlockChain, disk ethdb.KeyValue
// get retrieves the current sync mode, either explicitly set, or derived
// from the chain status.
func (m *syncModer) get() ethconfig.SyncMode {
func (m *syncModer) get(report bool) ethconfig.SyncMode {
m.lock.Lock()
defer m.lock.Unlock()
@ -83,12 +83,16 @@ func (m *syncModer) get() ethconfig.SyncMode {
if m.mode == ethconfig.SnapSync {
return ethconfig.SnapSync
}
logger := log.Debug
if report {
logger = log.Info
}
// We are probably in full sync, but we might have rewound to before the
// snap sync pivot, check if we should re-enable snap sync.
head := m.chain.CurrentBlock()
if pivot := rawdb.ReadLastPivotNumber(m.disk); pivot != nil {
if head.Number.Uint64() < *pivot {
log.Info("Reenabled snap-sync as chain is lagging behind the pivot", "head", head.Number, "pivot", pivot)
logger("Reenabled snap-sync as chain is lagging behind the pivot", "head", head.Number, "pivot", pivot)
return ethconfig.SnapSync
}
}
@ -96,7 +100,7 @@ func (m *syncModer) get() ethconfig.SyncMode {
// the head state, forcefully rerun the snap sync. Note it doesn't mean the
// persistent state is corrupted, just mismatch with the head block.
if !m.chain.HasState(head.Root) {
log.Info("Reenabled snap-sync as chain is stateless")
logger("Reenabled snap-sync as chain is stateless")
return ethconfig.SnapSync
}
// Nope, we're really full syncing

View file

@ -170,10 +170,10 @@ type TxFetcher struct {
alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
// Callbacks
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
dropPeer func(string) // Drops a peer in case of announcement violation
validateMeta func(common.Hash, byte) error // Validate a tx metadata based on the local txpool
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
dropPeer func(string) // Drops a peer in case of announcement violation
step chan struct{} // Notification channel when the fetcher loop iterates
clock mclock.Clock // Monotonic clock or simulated clock for tests
@ -183,36 +183,36 @@ type TxFetcher struct {
// NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements.
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil)
func NewTxFetcher(validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
return NewTxFetcherForTests(validateMeta, addTxs, fetchTxs, dropPeer, mclock.System{}, time.Now, nil)
}
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one.
func NewTxFetcherForTests(
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
validateMeta func(common.Hash, byte) error, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
clock mclock.Clock, realTime func() time.Time, rand *mrand.Rand) *TxFetcher {
return &TxFetcher{
notify: make(chan *txAnnounce),
cleanup: make(chan *txDelivery),
drop: make(chan *txDrop),
quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime),
waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq),
announces: make(map[string]map[common.Hash]*txMetadataWithSeq),
announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest),
alternates: make(map[common.Hash]map[string]struct{}),
underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize),
hasTx: hasTx,
addTxs: addTxs,
fetchTxs: fetchTxs,
dropPeer: dropPeer,
clock: clock,
realTime: realTime,
rand: rand,
notify: make(chan *txAnnounce),
cleanup: make(chan *txDelivery),
drop: make(chan *txDrop),
quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime),
waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq),
announces: make(map[string]map[common.Hash]*txMetadataWithSeq),
announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest),
alternates: make(map[common.Hash]map[string]struct{}),
underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize),
validateMeta: validateMeta,
addTxs: addTxs,
fetchTxs: fetchTxs,
dropPeer: dropPeer,
clock: clock,
realTime: realTime,
rand: rand,
}
}
@ -235,19 +235,26 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
underpriced int64
)
for i, hash := range hashes {
switch {
case f.hasTx(hash):
err := f.validateMeta(hash, types[i])
if errors.Is(err, txpool.ErrAlreadyKnown) {
duplicate++
case f.isKnownUnderpriced(hash):
underpriced++
default:
unknownHashes = append(unknownHashes, hash)
// Transaction metadata has been available since eth68, and all
// legacy eth protocols (prior to eth68) have been deprecated.
// Therefore, metadata is always expected in the announcement.
unknownMetas = append(unknownMetas, txMetadata{kind: types[i], size: sizes[i]})
continue
}
if err != nil {
continue
}
if f.isKnownUnderpriced(hash) {
underpriced++
continue
}
unknownHashes = append(unknownHashes, hash)
// Transaction metadata has been available since eth68, and all
// legacy eth protocols (prior to eth68) have been deprecated.
// Therefore, metadata is always expected in the announcement.
unknownMetas = append(unknownMetas, txMetadata{kind: types[i], size: sizes[i]})
}
txAnnounceKnownMeter.Mark(duplicate)
txAnnounceUnderpricedMeter.Mark(underpriced)

View file

@ -93,7 +93,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -295,7 +295,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -385,7 +385,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -490,7 +490,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(origin string, hashes []common.Hash) error {
<-proceed
@ -574,7 +574,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -618,7 +618,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -661,7 +661,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -722,7 +722,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -771,7 +771,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -827,7 +827,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -897,7 +897,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -975,7 +975,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -1053,7 +1053,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -1083,7 +1083,7 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -1200,7 +1200,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -1267,7 +1267,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
errs := make([]error, len(txs))
for i := 0; i < len(errs); i++ {
@ -1368,7 +1368,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
testTransactionFetcher(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
errs := make([]error, len(txs))
for i := 0; i < len(errs); i++ {
@ -1400,7 +1400,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1459,7 +1459,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1533,7 +1533,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1579,7 +1579,7 @@ func TestInvalidAnnounceMetadata(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1662,7 +1662,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1690,7 +1690,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1720,7 +1720,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1759,7 +1759,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1794,7 +1794,7 @@ func TestBlobTransactionAnnounce(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
nil,
func(string, []common.Hash) error { return nil },
nil,
@ -1862,7 +1862,7 @@ func TestTransactionFetcherDropAlternates(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
@ -1908,6 +1908,35 @@ func TestTransactionFetcherDropAlternates(t *testing.T) {
})
}
func TestTransactionFetcherWrongMetadata(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(_ common.Hash, kind byte) error {
switch kind {
case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType:
return nil
}
return types.ErrTxTypeNotSupported
},
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{0xff, types.LegacyTxType}, sizes: []uint32{111, 222}},
isWaiting(map[string][]announce{
"A": {
{common.Hash{0x02}, types.LegacyTxType, 222},
},
}),
},
})
}
func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
t.Parallel()
testTransactionFetcher(t, tt)
@ -2245,7 +2274,7 @@ func TestTransactionForgotten(t *testing.T) {
}
fetcher := NewTxFetcherForTests(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
errs := make([]error, len(txs))
for i := 0; i < len(errs); i++ {

View file

@ -92,6 +92,9 @@ type txPool interface {
// can decide whether to receive notifications only for newly seen transactions
// or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
// FilterType returns whether the given tx type is supported by the txPool.
FilterType(kind byte) bool
}
// handlerConfig is the collection of initialization parameters to create a full
@ -176,7 +179,18 @@ func newHandler(config *handlerConfig) (*handler, error) {
addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false)
}
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
validateMeta := func(tx common.Hash, kind byte) error {
if h.txpool.Has(tx) {
return txpool.ErrAlreadyKnown
}
if !h.txpool.FilterType(kind) {
return types.ErrTxTypeNotSupported
}
return nil
}
h.txFetcher = fetcher.NewTxFetcher(validateMeta, addTxs, fetchTx, h.removePeer)
return h, nil
}

View file

@ -163,6 +163,15 @@ func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bo
return p.txFeed.Subscribe(ch)
}
// FilterType should check whether the pool supports the given type of transactions.
func (p *testTxPool) FilterType(kind byte) bool {
switch kind {
case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.BlobTxType, types.SetCodeTxType:
return true
}
return false
}
// testHandler is a live implementation of the Ethereum protocol handler, just
// preinitialized with some sane testing defaults and the transaction pool mocked
// out.

View file

@ -513,7 +513,7 @@ func defaultIgnoredOpcodes() []hexutil.Uint64 {
ignored := make([]hexutil.Uint64, 0, 64)
// Allow all PUSHx, DUPx and SWAPx opcodes as they have sequential codes
for op := vm.PUSH0; op < vm.SWAP16; op++ {
for op := vm.PUSH0; op <= vm.SWAP16; op++ {
ignored = append(ignored, hexutil.Uint64(op))
}

View file

@ -66,7 +66,7 @@ type prestateTracer struct {
pre stateMap
post stateMap
to common.Address
config prestateTracerConfig
config PrestateTracerConfig
chainConfig *params.ChainConfig
interrupt atomic.Bool // Atomic flag to signal execution interruption
reason error // Textual reason for the interruption
@ -74,7 +74,7 @@ type prestateTracer struct {
deleted map[common.Address]bool
}
type prestateTracerConfig struct {
type PrestateTracerConfig struct {
DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications
DisableCode bool `json:"disableCode"` // If true, this tracer will not return the contract code
DisableStorage bool `json:"disableStorage"` // If true, this tracer will not return the contract storage
@ -82,7 +82,7 @@ type prestateTracerConfig struct {
}
func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config prestateTracerConfig
var config PrestateTracerConfig
if err := json.Unmarshal(cfg, &config); err != nil {
return nil, err
}

View file

@ -104,7 +104,10 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s
var res accountResult
err := ec.c.CallContext(ctx, &res, "eth_getProof", account, keys, toBlockNumArg(blockNumber))
// Turn hexutils back to normal datatypes
if err != nil {
return nil, err
}
// Turn hexutils back to normal data types
storageResults := make([]StorageResult, 0, len(res.StorageProof))
for _, st := range res.StorageProof {
storageResults = append(storageResults, StorageResult{
@ -122,7 +125,7 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s
StorageHash: res.StorageHash,
StorageProof: storageResults,
}
return &result, err
return &result, nil
}
// CallContract executes a message call transaction, which is directly executed in the VM

2
go.mod
View file

@ -15,7 +15,6 @@ require (
github.com/cockroachdb/pebble v1.1.5
github.com/consensys/gnark-crypto v0.18.1
github.com/crate-crypto/go-eth-kzg v1.4.0
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a
github.com/davecgh/go-spew v1.1.1
github.com/dchest/siphash v1.2.3
github.com/deckarep/golang-set/v2 v2.6.0
@ -24,7 +23,6 @@ require (
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844/v2 v2.1.5
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab
github.com/ethereum/go-verkle v0.2.2
github.com/fatih/color v1.16.0
github.com/ferranbt/fastssz v0.1.4
github.com/fsnotify/fsnotify v1.6.0

4
go.sum
View file

@ -81,8 +81,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -117,8 +115,6 @@ github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY=

View file

@ -707,6 +707,9 @@ func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
if err != nil {
return nil, err
}
if b.header == nil {
return nil, nil
}
if b.hash == (common.Hash{}) {
b.hash = b.header.Hash()
}

View file

@ -233,7 +233,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
if block.BlockOverrides.BlobBaseFee != nil {
blockContext.BlobBaseFee = block.BlockOverrides.BlobBaseFee.ToInt()
}
precompiles := sim.activePrecompiles(sim.base)
precompiles := sim.activePrecompiles(header)
// State overrides are applied prior to execution of a block
if err := block.StateOverrides.Apply(sim.state, precompiles); err != nil {
return nil, nil, nil, err

View file

@ -185,9 +185,10 @@ func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) {
return
}
// Everything matches, mark the request serviced and meter it
wasHead := req.expire.Prev() == nil
t.expire.Remove(req.expire)
delete(t.pending, id)
if req.expire.Prev() == nil {
if wasHead {
if t.wake.Stop() {
t.schedule()
}

View file

@ -32,7 +32,7 @@ const (
MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis.
ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction.
SloadGas uint64 = 50 // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added.
SloadGas uint64 = 50 //
CallValueTransferGas uint64 = 9000 // Paid for CALL when the value transfer is non-zero.
CallNewAccountGas uint64 = 25000 // Paid for CALL when the destination address didn't exist prior.
TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions.
@ -82,7 +82,7 @@ const (
CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack.
ExpGas uint64 = 10 // Once per EXP instruction
LogGas uint64 = 375 // Per LOG* operation.
CopyGas uint64 = 3 //
CopyGas uint64 = 3 // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added.
StackLimit uint64 = 1024 // Maximum size of VM stack allowed.
TierStepGas uint64 = 0 // Once per operation, for a selection of them.
LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas.

View file

@ -116,15 +116,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
if !ok {
return UnsupportedForkError{t.json.Network}
}
return t.run(config, snapshotter, scheme, witness, tracer, postCheck)
}
// Network returns the network/fork name for this test.
func (t *BlockTest) Network() string {
return t.json.Network
}
func (t *BlockTest) run(config *params.ChainConfig, snapshotter bool, scheme string, witness bool, tracer *tracing.Hooks, postCheck func(error, *core.BlockChain)) (result error) {
// import pre accounts & construct test genesis block & state root
// Commit genesis state
var (
@ -212,6 +204,11 @@ func (t *BlockTest) run(config *params.ChainConfig, snapshotter bool, scheme str
return t.validateImportedHeaders(chain, validBlocks)
}
// Network returns the network/fork name for this test.
func (t *BlockTest) Network() string {
return t.json.Network
}
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
Config: config,

View file

@ -32,7 +32,6 @@ import (
type kv struct {
k, v []byte
t bool
}
type fuzzer struct {
@ -62,8 +61,8 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
size := f.readInt()
// Fill it with some fluff
for i := byte(0); i < byte(size); i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}}
value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}}
trie.MustUpdate(value.k, value.v)
trie.MustUpdate(value2.k, value2.v)
vals[string(value.k)] = value
@ -76,7 +75,7 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
for i := 0; i < n; i++ {
k := f.randBytes(32)
v := f.randBytes(20)
value := &kv{k, v, false}
value := &kv{k, v}
trie.MustUpdate(k, v)
vals[string(k)] = value
if f.exhausted {

View file

@ -78,7 +78,7 @@ func fuzz(input []byte) int {
rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!!
f := fetcher.NewTxFetcherForTests(
func(common.Hash) bool { return false },
func(common.Hash, byte) error { return nil },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},

View file

@ -33,8 +33,17 @@ const (
)
var (
zeroHash = common.Hash{}
codeOffset = uint256.NewInt(128)
zeroInt = uint256.NewInt(0)
zeroHash = common.Hash{}
verkleNodeWidthLog2 = 8
headerStorageOffset = uint256.NewInt(64)
codeOffset = uint256.NewInt(128)
codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2))
CodeOffset = uint256.NewInt(128)
VerkleNodeWidth = uint256.NewInt(256)
HeaderStorageOffset = uint256.NewInt(64)
VerkleNodeWidthLog2 = 8
)
func GetBinaryTreeKey(addr common.Address, key []byte) []byte {
@ -83,3 +92,38 @@ func GetBinaryTreeKeyCodeChunk(address common.Address, chunknr *uint256.Int) []b
chunkOffset := new(uint256.Int).Add(codeOffset, chunknr).Bytes()
return GetBinaryTreeKey(address, chunkOffset)
}
func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
// If the storage slot is in the header, we need to add the header offset.
var key uint256.Int
key.SetBytes(storageKey)
if key.Cmp(codeStorageDelta) < 0 {
// This addition is always safe; it can't ever overflow since pos<codeStorageDelta.
key.Add(headerStorageOffset, &key)
// In this branch, the tree-index is zero since we're in the account header,
// and the sub-index is the LSB of the modified storage key.
return zeroInt, byte(key[0] & 0xFF)
}
// If the storage slot is in the main storage, we need to add the main storage offset.
// The first MAIN_STORAGE_OFFSET group will see its
// first 64 slots unreachable. This is either a typo in the
// spec or intended to conserve the 256-u256
// alignment. If we decide to ever access these 64
// slots, uncomment this.
// // Get the new offset since we now know that we are above 64.
// pos.Sub(&pos, codeStorageDelta)
// suffix := byte(pos[0] & 0xFF)
suffix := storageKey[len(storageKey)-1]
// We first divide by VerkleNodeWidth to create room to avoid an overflow next.
key.Rsh(&key, uint(verkleNodeWidthLog2))
// We add mainStorageOffset/VerkleNodeWidth which can't overflow.
key.Add(&key, mainStorageOffsetLshVerkleNodeWidth)
// The sub-index is the LSB of the original storage key, since mainStorageOffset
// doesn't affect this byte, so we can avoid masks or shifts.
return &key, suffix
}

View file

@ -79,10 +79,7 @@ func ChunkifyCode(code []byte) ChunkedCode {
chunks := make([]byte, chunkCount*HashSize)
for i := 0; i < chunkCount; i++ {
// number of bytes to copy, StemSize unless the end of the code has been reached.
end := StemSize * (i + 1)
if len(code) < end {
end = len(code)
}
end := min(len(code), StemSize*(i+1))
copy(chunks[i*HashSize+1:], code[StemSize*i:end]) // copy the code itself
// chunk offset = taken from the last chunk.

View file

@ -1,413 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"encoding/binary"
"sync"
"github.com/crate-crypto/go-ipa/bandersnatch/fr"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-verkle"
"github.com/holiman/uint256"
)
const (
BasicDataLeafKey = 0
CodeHashLeafKey = 1
BasicDataVersionOffset = 0
BasicDataCodeSizeOffset = 5
BasicDataNonceOffset = 8
BasicDataBalanceOffset = 16
)
var (
zero = uint256.NewInt(0)
verkleNodeWidthLog2 = 8
headerStorageOffset = uint256.NewInt(64)
codeOffset = uint256.NewInt(128)
verkleNodeWidth = uint256.NewInt(256)
codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2))
CodeOffset = uint256.NewInt(128)
VerkleNodeWidth = uint256.NewInt(256)
HeaderStorageOffset = uint256.NewInt(64)
VerkleNodeWidthLog2 = 8
index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
// cacheHitGauge is the metric to track how many cache hit occurred.
cacheHitGauge = metrics.NewRegisteredGauge("trie/verkle/cache/hit", nil)
// cacheMissGauge is the metric to track how many cache miss occurred.
cacheMissGauge = metrics.NewRegisteredGauge("trie/verkle/cache/miss", nil)
)
func init() {
// The byte array is the Marshalled output of the point computed as such:
//
// var (
// config = verkle.GetConfig()
// fr verkle.Fr
// )
// verkle.FromLEBytes(&fr, []byte{2, 64})
// point := config.CommitToPoly([]verkle.Fr{fr}, 1)
index0Point = new(verkle.Point)
err := index0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191})
if err != nil {
panic(err)
}
}
// PointCache is the LRU cache for storing evaluated address commitment.
type PointCache struct {
lru lru.BasicLRU[string, *verkle.Point]
lock sync.RWMutex
}
// NewPointCache returns the cache with specified size.
func NewPointCache(maxItems int) *PointCache {
return &PointCache{
lru: lru.NewBasicLRU[string, *verkle.Point](maxItems),
}
}
// Get returns the cached commitment for the specified address, or computing
// it on the flight.
func (c *PointCache) Get(addr []byte) *verkle.Point {
c.lock.Lock()
defer c.lock.Unlock()
p, ok := c.lru.Get(string(addr))
if ok {
cacheHitGauge.Inc(1)
return p
}
cacheMissGauge.Inc(1)
p = evaluateAddressPoint(addr)
c.lru.Add(string(addr), p)
return p
}
// GetStem returns the first 31 bytes of the tree key as the tree stem. It only
// works for the account metadata whose treeIndex is 0.
func (c *PointCache) GetStem(addr []byte) []byte {
p := c.Get(addr)
return pointToHash(p, 0)[:31]
}
// GetTreeKey performs both the work of the spec's get_tree_key function, and that
// of pedersen_hash: it builds the polynomial in pedersen_hash without having to
// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte
// array. Since at most the first 5 coefficients of the polynomial will be non-zero,
// these 5 coefficients are created directly.
func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
// poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high]
var poly [5]fr.Element
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
// treeIndex must be interpreted as a 32-byte aligned little-endian integer.
// e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00.
// poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes).
//
// To avoid unnecessary endianness conversions for go-ipa, we do some trick:
// - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of
// 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})).
// - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of
// the 32-byte aligned big-endian representation (BE({00,00,...}).
trieIndexBytes := treeIndex.Bytes32()
verkle.FromBytes(&poly[3], trieIndexBytes[16:])
verkle.FromBytes(&poly[4], trieIndexBytes[:16])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point corresponding to poly[0]=[2+256*64].
ret.Add(ret, index0Point)
return pointToHash(ret, subIndex)
}
// GetTreeKeyWithEvaluatedAddress is basically identical to GetTreeKey, the only
// difference is a part of polynomial is already evaluated.
//
// Specifically, poly = [2+256*64, address_le_low, address_le_high] is already
// evaluated.
func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
var poly [5]fr.Element
// little-endian, 32-byte aligned treeIndex
var index [32]byte
for i := 0; i < len(treeIndex); i++ {
binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i])
}
verkle.FromLEBytes(&poly[3], index[:16])
verkle.FromLEBytes(&poly[4], index[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add the pre-evaluated address
ret.Add(ret, evaluated)
return pointToHash(ret, subIndex)
}
// BasicDataKey returns the verkle tree key of the basic data field for
// the specified account.
func BasicDataKey(address []byte) []byte {
return GetTreeKey(address, zero, BasicDataLeafKey)
}
// CodeHashKey returns the verkle tree key of the code hash field for
// the specified account.
func CodeHashKey(address []byte) []byte {
return GetTreeKey(address, zero, CodeHashLeafKey)
}
func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
var (
chunkOffset = new(uint256.Int).Add(codeOffset, chunk)
treeIndex, subIndexMod = new(uint256.Int).DivMod(chunkOffset, verkleNodeWidth, new(uint256.Int))
)
return treeIndex, byte(subIndexMod.Uint64())
}
// CodeChunkKey returns the verkle tree key of the code chunk for the
// specified account.
func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
treeIndex, subIndex := codeChunkIndex(chunk)
return GetTreeKey(address, treeIndex, subIndex)
}
func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) {
chunkOffset := new(uint256.Int).Add(CodeOffset, chunk)
treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth)
subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth)
var subIndex byte
if len(subIndexMod) != 0 {
subIndex = byte(subIndexMod[0])
}
return treeIndex, subIndex
}
func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte {
treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk)
return GetTreeKey(address, treeIndex, subIndex)
}
func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
// If the storage slot is in the header, we need to add the header offset.
var key uint256.Int
key.SetBytes(storageKey)
if key.Cmp(codeStorageDelta) < 0 {
// This addition is always safe; it can't ever overflow since pos<codeStorageDelta.
key.Add(headerStorageOffset, &key)
// In this branch, the tree-index is zero since we're in the account header,
// and the sub-index is the LSB of the modified storage key.
return zero, byte(key[0] & 0xFF)
}
// If the storage slot is in the main storage, we need to add the main storage offset.
// The first MAIN_STORAGE_OFFSET group will see its
// first 64 slots unreachable. This is either a typo in the
// spec or intended to conserve the 256-u256
// alignment. If we decide to ever access these 64
// slots, uncomment this.
// // Get the new offset since we now know that we are above 64.
// pos.Sub(&pos, codeStorageDelta)
// suffix := byte(pos[0] & 0xFF)
suffix := storageKey[len(storageKey)-1]
// We first divide by VerkleNodeWidth to create room to avoid an overflow next.
key.Rsh(&key, uint(verkleNodeWidthLog2))
// We add mainStorageOffset/VerkleNodeWidth which can't overflow.
key.Add(&key, mainStorageOffsetLshVerkleNodeWidth)
// The sub-index is the LSB of the original storage key, since mainStorageOffset
// doesn't affect this byte, so we can avoid masks or shifts.
return &key, suffix
}
// StorageSlotKey returns the verkle tree key of the storage slot for the
// specified account.
func StorageSlotKey(address []byte, storageKey []byte) []byte {
treeIndex, subIndex := StorageIndex(storageKey)
return GetTreeKey(address, treeIndex, subIndex)
}
// BasicDataKeyWithEvaluatedAddress returns the verkle tree key of the basic data
// field for the specified account. The difference between BasicDataKey is the
// address evaluation is already computed to minimize the computational overhead.
func BasicDataKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, BasicDataLeafKey)
}
// CodeHashKeyWithEvaluatedAddress returns the verkle tree key of the code
// hash for the specified account. The difference between CodeHashKey is the
// address evaluation is already computed to minimize the computational overhead.
func CodeHashKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeHashLeafKey)
}
// CodeChunkKeyWithEvaluatedAddress returns the verkle tree key of the code
// chunk for the specified account. The difference between CodeChunkKey is the
// address evaluation is already computed to minimize the computational overhead.
func CodeChunkKeyWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte {
treeIndex, subIndex := codeChunkIndex(chunk)
return GetTreeKeyWithEvaluatedAddress(addressPoint, treeIndex, subIndex)
}
// StorageSlotKeyWithEvaluatedAddress returns the verkle tree key of the storage
// slot for the specified account. The difference between StorageSlotKey is the
// address evaluation is already computed to minimize the computational overhead.
func StorageSlotKeyWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte {
treeIndex, subIndex := StorageIndex(storageKey)
return GetTreeKeyWithEvaluatedAddress(evaluated, treeIndex, subIndex)
}
func pointToHash(evaluated *verkle.Point, suffix byte) []byte {
retb := verkle.HashPointToBytes(evaluated)
retb[31] = suffix
return retb[:]
}
func evaluateAddressPoint(address []byte) *verkle.Point {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
var poly [3]fr.Element
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point
ret.Add(ret, index0Point)
return ret
}
func EvaluateAddressPoint(address []byte) *verkle.Point {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
var poly [3]fr.Element
poly[0].SetZero()
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point
ret.Add(ret, index0Point)
return ret
}
func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte {
treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey)
return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex)
}
func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) {
var pos uint256.Int
pos.SetBytes(storageKey)
// If the storage slot is in the header, we need to add the header offset.
if pos.Cmp(codeStorageDelta) < 0 {
// This addition is always safe; it can't ever overflow since pos<codeStorageDelta.
pos.Add(HeaderStorageOffset, &pos)
// In this branch, the tree-index is zero since we're in the account header,
// and the sub-index is the LSB of the modified storage key.
return zero, byte(pos[0] & 0xFF)
}
// If the storage slot is in the main storage, we need to add the main storage offset.
// The first MAIN_STORAGE_OFFSET group will see its
// first 64 slots unreachable. This is either a typo in the
// spec or intended to conserve the 256-u256
// aligment. If we decide to ever access these 64
// slots, uncomment this.
// // Get the new offset since we now know that we are above 64.
// pos.Sub(&pos, codeStorageDelta)
// suffix := byte(pos[0] & 0xFF)
suffix := storageKey[len(storageKey)-1]
// We first divide by VerkleNodeWidth to create room to avoid an overflow next.
pos.Rsh(&pos, uint(VerkleNodeWidthLog2))
// We add mainStorageOffset/VerkleNodeWidth which can't overflow.
pos.Add(&pos, mainStorageOffsetLshVerkleNodeWidth)
// The sub-index is the LSB of the original storage key, since mainStorageOffset
// doesn't affect this byte, so we can avoid masks or shifts.
return &pos, suffix
}
func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
var poly [5]fr.Element
poly[0].SetZero()
poly[1].SetZero()
poly[2].SetZero()
trieIndexBytes := treeIndex.Bytes32()
verkle.FromBytes(&poly[3], trieIndexBytes[16:])
verkle.FromBytes(&poly[4], trieIndexBytes[:16])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add the pre-evaluated address
ret.Add(ret, evaluated)
return PointToHash(ret, subIndex)
}
func GetTreeKeyBasicDataEvaluatedAddress(addrp *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddess(addrp, zero, BasicDataLeafKey)
}
func PointToHash(evaluated *verkle.Point, suffix byte) []byte {
retb := verkle.HashPointToBytes(evaluated)
retb[31] = suffix
return retb[:]
}

View file

@ -1,130 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"bytes"
"testing"
"github.com/ethereum/go-verkle"
"github.com/holiman/uint256"
)
func TestTreeKey(t *testing.T) {
var (
address = []byte{0x01}
addressEval = evaluateAddressPoint(address)
smallIndex = uint256.NewInt(1)
largeIndex = uint256.NewInt(10000)
smallStorage = []byte{0x1}
largeStorage = bytes.Repeat([]byte{0xff}, 16)
)
if !bytes.Equal(BasicDataKey(address), BasicDataKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched basic data key")
}
if !bytes.Equal(CodeHashKey(address), CodeHashKeyWithEvaluatedAddress(addressEval)) {
t.Fatal("Unmatched code hash key")
}
if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
t.Fatal("Unmatched code chunk key")
}
if !bytes.Equal(CodeChunkKey(address, largeIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, largeIndex)) {
t.Fatal("Unmatched code chunk key")
}
if !bytes.Equal(StorageSlotKey(address, smallStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, smallStorage)) {
t.Fatal("Unmatched storage slot key")
}
if !bytes.Equal(StorageSlotKey(address, largeStorage), StorageSlotKeyWithEvaluatedAddress(addressEval, largeStorage)) {
t.Fatal("Unmatched storage slot key")
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/ethereum/go-ethereum/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkTreeKey
// BenchmarkTreeKey-8 398731 2961 ns/op 32 B/op 1 allocs/op
func BenchmarkTreeKey(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
BasicDataKey([]byte{0x01})
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/ethereum/go-ethereum/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkTreeKeyWithEvaluation
// BenchmarkTreeKeyWithEvaluation-8 513855 2324 ns/op 32 B/op 1 allocs/op
func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
addr := []byte{0x01}
eval := evaluateAddressPoint(addr)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
BasicDataKeyWithEvaluatedAddress(eval)
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/ethereum/go-ethereum/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkStorageKey
// BenchmarkStorageKey-8 230516 4584 ns/op 96 B/op 3 allocs/op
func BenchmarkStorageKey(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
StorageSlotKey([]byte{0x01}, bytes.Repeat([]byte{0xff}, 32))
}
}
// goos: darwin
// goarch: amd64
// pkg: github.com/ethereum/go-ethereum/trie/utils
// cpu: VirtualApple @ 2.50GHz
// BenchmarkStorageKeyWithEvaluation
// BenchmarkStorageKeyWithEvaluation-8 320125 3753 ns/op 96 B/op 3 allocs/op
func BenchmarkStorageKeyWithEvaluation(b *testing.B) {
// Initialize the IPA settings which can be pretty expensive.
verkle.GetConfig()
addr := []byte{0x01}
eval := evaluateAddressPoint(addr)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
StorageSlotKeyWithEvaluatedAddress(eval, bytes.Repeat([]byte{0xff}, 32))
}
}

View file

@ -1,458 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb/database"
"github.com/ethereum/go-verkle"
"github.com/holiman/uint256"
)
var (
errInvalidRootType = errors.New("invalid node type for root")
)
// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie
// interface so that Verkle trees can be reused verbatim.
type VerkleTrie struct {
root verkle.VerkleNode
cache *utils.PointCache
reader *Reader
tracer *PrevalueTracer
}
// NewVerkleTrie constructs a verkle tree based on the specified root hash.
func NewVerkleTrie(root common.Hash, db database.NodeDatabase, cache *utils.PointCache) (*VerkleTrie, error) {
reader, err := NewReader(root, common.Hash{}, db)
if err != nil {
return nil, err
}
t := &VerkleTrie{
root: verkle.New(),
cache: cache,
reader: reader,
tracer: NewPrevalueTracer(),
}
// Parse the root verkle node if it's not empty.
if root != types.EmptyVerkleHash && root != types.EmptyRootHash {
blob, err := t.nodeResolver(nil)
if err != nil {
return nil, err
}
node, err := verkle.ParseNode(blob, 0)
if err != nil {
return nil, err
}
t.root = node
}
return t, nil
}
// GetKey returns the sha3 preimage of a hashed key that was previously used
// to store a value.
func (t *VerkleTrie) GetKey(key []byte) []byte {
return key
}
// GetAccount implements state.Trie, retrieving the account with the specified
// account address. If the specified account is not in the verkle tree, nil will
// be returned. If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) {
var (
acc = &types.StateAccount{}
values [][]byte
err error
)
switch n := t.root.(type) {
case *verkle.InternalNode:
values, err = n.GetValuesAtStem(t.cache.GetStem(addr[:]), t.nodeResolver)
if err != nil {
return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err)
}
default:
return nil, errInvalidRootType
}
if values == nil {
return nil, nil
}
basicData := values[utils.BasicDataLeafKey]
acc.Nonce = binary.BigEndian.Uint64(basicData[utils.BasicDataNonceOffset:])
acc.Balance = new(uint256.Int).SetBytes(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
acc.CodeHash = values[utils.CodeHashLeafKey]
// TODO account.Root is leave as empty. How should we handle the legacy account?
return acc, nil
}
// PrefetchAccount attempts to resolve specific accounts from the database
// to accelerate subsequent trie operations.
func (t *VerkleTrie) PrefetchAccount(addresses []common.Address) error {
for _, addr := range addresses {
if _, err := t.GetAccount(addr); err != nil {
return err
}
}
return nil
}
// GetStorage implements state.Trie, retrieving the storage slot with the specified
// account address and storage key. If the specified slot is not in the verkle tree,
// nil will be returned. If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
val, err := t.root.Get(k, t.nodeResolver)
if err != nil {
return nil, err
}
return common.TrimLeftZeroes(val), nil
}
// PrefetchStorage attempts to resolve specific storage slots from the database
// to accelerate subsequent trie operations.
func (t *VerkleTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
for _, key := range keys {
if _, err := t.GetStorage(addr, key); err != nil {
return err
}
}
return nil
}
// UpdateAccount implements state.Trie, writing the provided account into the tree.
// If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var (
err error
basicData [32]byte
values = make([][]byte, verkle.NodeWidth)
stem = t.cache.GetStem(addr[:])
)
// Code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
// before the code size to support bigger integers in the future. PutUint32(...) requires
// 4 bytes, so we need to shift the offset 1 byte to the left.
binary.BigEndian.PutUint32(basicData[utils.BasicDataCodeSizeOffset-1:], uint32(codeLen))
binary.BigEndian.PutUint64(basicData[utils.BasicDataNonceOffset:], acc.Nonce)
if acc.Balance.ByteLen() > 16 {
panic("balance too large")
}
acc.Balance.WriteToSlice(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
values[utils.BasicDataLeafKey] = basicData[:]
values[utils.CodeHashLeafKey] = acc.CodeHash[:]
switch root := t.root.(type) {
case *verkle.InternalNode:
err = root.InsertValuesAtStem(stem, values, t.nodeResolver)
default:
return errInvalidRootType
}
if err != nil {
return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
}
return nil
}
// UpdateStorage implements state.Trie, writing the provided storage slot into
// the tree. If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error {
// Left padding the slot value to 32 bytes.
var v [32]byte
if len(value) >= 32 {
copy(v[:], value[:32])
} else {
copy(v[32-len(value):], value[:])
}
k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(address.Bytes()), key)
return t.root.Insert(k, v[:], t.nodeResolver)
}
// DeleteAccount leaves the account untouched, as no account deletion can happen
// in verkle.
// There is a special corner case, in which an account that is prefunded, CREATE2-d
// and then SELFDESTRUCT-d should see its funds drained. EIP161 says that account
// should be removed, but this is verboten by the verkle spec. This contains a
// workaround in which the method checks for this corner case, and if so, overwrites
// the balance with 0. This will be removed once the spec has been clarified.
func (t *VerkleTrie) DeleteAccount(addr common.Address) error {
k := utils.BasicDataKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()))
values, err := t.root.(*verkle.InternalNode).GetValuesAtStem(k, t.nodeResolver)
if err != nil {
return fmt.Errorf("Error getting data at %x in delete: %w", k, err)
}
var prefunded bool
for i, v := range values {
switch i {
case 0:
prefunded = len(v) == 32
case 1:
prefunded = len(v) == 32 && bytes.Equal(v, types.EmptyCodeHash[:])
default:
prefunded = v == nil
}
if !prefunded {
break
}
}
if prefunded {
t.root.Insert(k, common.Hash{}.Bytes(), t.nodeResolver)
}
return nil
}
// RollBackAccount removes the account info + code from the tree, unlike DeleteAccount
// that will overwrite it with 0s. The first 64 storage slots are also removed.
func (t *VerkleTrie) RollBackAccount(addr common.Address) error {
var (
evaluatedAddr = t.cache.Get(addr.Bytes())
basicDataKey = utils.BasicDataKeyWithEvaluatedAddress(evaluatedAddr)
)
basicDataBytes, err := t.root.Get(basicDataKey, t.nodeResolver)
if err != nil {
return fmt.Errorf("rollback: error finding code size: %w", err)
}
if len(basicDataBytes) == 0 {
return errors.New("rollback: basic data is not existent")
}
// The code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
// before the code size to support bigger integers in the future.
// LittleEndian.Uint32(...) expects 4-bytes, so we need to shift the offset 1-byte to the left.
codeSize := binary.BigEndian.Uint32(basicDataBytes[utils.BasicDataCodeSizeOffset-1:])
// Delete the account header + first 64 slots + first 128 code chunks
_, err = t.root.(*verkle.InternalNode).DeleteAtStem(basicDataKey[:31], t.nodeResolver)
if err != nil {
return fmt.Errorf("error rolling back account header: %w", err)
}
// Delete all further code
for i, chunknr := uint64(31*128), uint64(128); i < uint64(codeSize); i, chunknr = i+31*256, chunknr+256 {
// evaluate group key at the start of a new group
offset := uint256.NewInt(chunknr)
key := utils.CodeChunkKeyWithEvaluatedAddress(evaluatedAddr, offset)
if _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver); err != nil {
return fmt.Errorf("error deleting code chunk stem (addr=%x, offset=%d) error: %w", addr[:], offset, err)
}
}
return nil
}
// DeleteStorage implements state.Trie, deleting the specified storage slot from
// the trie. If the storage slot was not existent in the trie, no error will be
// returned. If the trie is corrupted, an error will be returned.
func (t *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error {
var zero [32]byte
k := utils.StorageSlotKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), key)
return t.root.Insert(k, zero[:], t.nodeResolver)
}
// Hash returns the root hash of the tree. It does not write to the database and
// can be used even if the tree doesn't have one.
func (t *VerkleTrie) Hash() common.Hash {
return t.root.Commit().Bytes()
}
// Commit writes all nodes to the tree's memory database.
func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
root := t.root.(*verkle.InternalNode)
nodes, err := root.BatchSerialize()
if err != nil {
// Error return from this function indicates error in the code logic
// of BatchSerialize, and we fail catastrophically if this is the case.
panic(fmt.Errorf("BatchSerialize failed: %v", err))
}
nodeset := trienode.NewNodeSet(common.Hash{})
for _, node := range nodes {
// Hash parameter is not used in pathdb
nodeset.AddNode(node.Path, trienode.NewNodeWithPrev(common.Hash{}, node.SerializedBytes, t.tracer.Get(node.Path)))
}
// Serialize root commitment form
return t.Hash(), nodeset
}
// NodeIterator implements state.Trie, returning an iterator that returns
// nodes of the trie. Iteration starts at the key after the given start key.
//
// TODO(gballet, rjl493456442) implement it.
func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
// TODO(@CPerezz): remove.
return nil, errors.New("not implemented")
}
// Prove implements state.Trie, constructing a Merkle proof for key. The result
// contains all encoded nodes on the path to the value at key. The value itself
// is also included in the last node and can be retrieved by verifying the proof.
//
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
//
// TODO(gballet, rjl493456442) implement it.
func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
panic("not implemented")
}
// Copy returns a deep-copied verkle tree.
func (t *VerkleTrie) Copy() *VerkleTrie {
return &VerkleTrie{
root: t.root.Copy(),
cache: t.cache,
reader: t.reader,
tracer: t.tracer.Copy(),
}
}
// IsVerkle indicates if the trie is a Verkle trie.
func (t *VerkleTrie) IsVerkle() bool {
return true
}
// Proof builds and returns the verkle multiproof for keys, built against
// the pre tree. The post tree is passed in order to add the post values
// to that proof.
func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) {
var postroot verkle.VerkleNode
if posttrie != nil {
postroot = posttrie.root
}
proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.nodeResolver)
if err != nil {
return nil, nil, err
}
p, kvps, err := verkle.SerializeProof(proof)
if err != nil {
return nil, nil, err
}
return p, kvps, nil
}
// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which
// are actual code, and 1 byte is the pushdata offset).
type ChunkedCode []byte
// Copy the values here so as to avoid an import cycle
const (
PUSH1 = byte(0x60)
PUSH32 = byte(0x7f)
)
// ChunkifyCode generates the chunked version of an array representing EVM bytecode
func ChunkifyCode(code []byte) ChunkedCode {
var (
chunkOffset = 0 // offset in the chunk
chunkCount = len(code) / 31
codeOffset = 0 // offset in the code
)
if len(code)%31 != 0 {
chunkCount++
}
chunks := make([]byte, chunkCount*32)
for i := 0; i < chunkCount; i++ {
// number of bytes to copy, 31 unless the end of the code has been reached.
end := 31 * (i + 1)
if len(code) < end {
end = len(code)
}
copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
// chunk offset = taken from the last chunk.
if chunkOffset > 31 {
// skip offset calculation if push data covers the whole chunk
chunks[i*32] = 31
chunkOffset = 1
continue
}
chunks[32*i] = byte(chunkOffset)
chunkOffset = 0
// Check each instruction and update the offset it should be 0 unless
// a PUSH-N overflows.
for ; codeOffset < end; codeOffset++ {
if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 {
codeOffset += int(code[codeOffset] - PUSH1 + 1)
if codeOffset+1 >= 31*(i+1) {
codeOffset++
chunkOffset = codeOffset - 31*(i+1)
break
}
}
}
}
return chunks
}
// UpdateContractCode implements state.Trie, writing the provided contract code
// into the trie.
// Note that the code-size *must* be already saved by a previous UpdateAccount call.
func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
var (
chunks = ChunkifyCode(code)
values [][]byte
key []byte
err error
)
for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
groupOffset := (chunknr + 128) % 256
if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
values = make([][]byte, verkle.NodeWidth)
key = utils.CodeChunkKeyWithEvaluatedAddress(t.cache.Get(addr.Bytes()), uint256.NewInt(chunknr))
}
values[groupOffset] = chunks[i : i+32]
if groupOffset == 255 || len(chunks)-i <= 32 {
switch root := t.root.(type) {
case *verkle.InternalNode:
err = root.InsertValuesAtStem(key[:31], values, t.nodeResolver)
if err != nil {
return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)
}
default:
return errInvalidRootType
}
}
}
return nil
}
func (t *VerkleTrie) ToDot() string {
return verkle.ToDot(t.root)
}
func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) {
blob, err := t.reader.Node(path, common.Hash{})
if err != nil {
return nil, err
}
t.tracer.Put(path, blob)
return blob, nil
}
// Witness returns a set containing all trie nodes that have been accessed.
func (t *VerkleTrie) Witness() map[string][]byte {
panic("not implemented")
}

View file

@ -1,173 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"bytes"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
var (
accounts = map[common.Address]*types.StateAccount{
{1}: {
Nonce: 100,
Balance: uint256.NewInt(100),
CodeHash: common.Hash{0x1}.Bytes(),
},
{2}: {
Nonce: 200,
Balance: uint256.NewInt(200),
CodeHash: common.Hash{0x2}.Bytes(),
},
}
storages = map[common.Address]map[common.Hash][]byte{
{1}: {
common.Hash{10}: []byte{10},
common.Hash{11}: []byte{11},
common.MaxHash: []byte{0xff},
},
{2}: {
common.Hash{20}: []byte{20},
common.Hash{21}: []byte{21},
common.MaxHash: []byte{0xff},
},
}
)
func TestVerkleTreeReadWrite(t *testing.T) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts {
if err := tr.UpdateAccount(addr, acct, 0); err != nil {
t.Fatalf("Failed to update account, %v", err)
}
for key, val := range storages[addr] {
if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
t.Fatalf("Failed to update storage, %v", err)
}
}
}
for addr, acct := range accounts {
stored, err := tr.GetAccount(addr)
if err != nil {
t.Fatalf("Failed to get account, %v", err)
}
if !reflect.DeepEqual(stored, acct) {
t.Fatal("account is not matched")
}
for key, val := range storages[addr] {
stored, err := tr.GetStorage(addr, key.Bytes())
if err != nil {
t.Fatalf("Failed to get storage, %v", err)
}
if !bytes.Equal(stored, val) {
t.Fatal("storage is not matched")
}
}
}
}
func TestVerkleRollBack(t *testing.T) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts {
// create more than 128 chunks of code
code := make([]byte, 129*32)
for i := 0; i < len(code); i += 2 {
code[i] = 0x60
code[i+1] = byte(i % 256)
}
if err := tr.UpdateAccount(addr, acct, len(code)); err != nil {
t.Fatalf("Failed to update account, %v", err)
}
for key, val := range storages[addr] {
if err := tr.UpdateStorage(addr, key.Bytes(), val); err != nil {
t.Fatalf("Failed to update storage, %v", err)
}
}
hash := crypto.Keccak256Hash(code)
if err := tr.UpdateContractCode(addr, hash, code); err != nil {
t.Fatalf("Failed to update contract, %v", err)
}
}
// Check that things were created
for addr, acct := range accounts {
stored, err := tr.GetAccount(addr)
if err != nil {
t.Fatalf("Failed to get account, %v", err)
}
if !reflect.DeepEqual(stored, acct) {
t.Fatal("account is not matched")
}
for key, val := range storages[addr] {
stored, err := tr.GetStorage(addr, key.Bytes())
if err != nil {
t.Fatalf("Failed to get storage, %v", err)
}
if !bytes.Equal(stored, val) {
t.Fatal("storage is not matched")
}
}
}
// ensure there is some code in the 2nd group of the 1st account
keyOf2ndGroup := utils.CodeChunkKeyWithEvaluatedAddress(tr.cache.Get(common.Address{1}.Bytes()), uint256.NewInt(128))
chunk, err := tr.root.Get(keyOf2ndGroup, nil)
if err != nil {
t.Fatalf("Failed to get account, %v", err)
}
if len(chunk) == 0 {
t.Fatal("account was not created ")
}
// Rollback first account and check that it is gone
addr1 := common.Address{1}
err = tr.RollBackAccount(addr1)
if err != nil {
t.Fatalf("error rolling back address 1: %v", err)
}
// ensure the account is gone
stored, err := tr.GetAccount(addr1)
if err != nil {
t.Fatalf("Failed to get account, %v", err)
}
if stored != nil {
t.Fatal("account was not deleted")
}
// ensure that the last code chunk is also gone from the tree
chunk, err = tr.root.Get(keyOf2ndGroup, nil)
if err != nil {
t.Fatalf("Failed to get account, %v", err)
}
if len(chunk) != 0 {
t.Fatal("account was not deleted")
}
}

View file

@ -275,7 +275,7 @@ func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// If the layer is being generated, ensure the requested storage slot
// has already been covered by the generator.
key := append(accountHash[:], storageHash[:]...)
key := storageKeySlice(accountHash, storageHash)
marker := dl.genMarker()
if marker != nil && bytes.Compare(key, marker) > 0 {
return nil, errNotCoveredYet

View file

@ -116,15 +116,16 @@ func writeStates(batch ethdb.Batch, genMarker []byte, accountData map[common.Has
continue
}
slots += 1
key := storageKeySlice(addrHash, storageHash)
if len(blob) == 0 {
rawdb.DeleteStorageSnapshot(batch, addrHash, storageHash)
if clean != nil {
clean.Set(append(addrHash[:], storageHash[:]...), nil)
clean.Set(key, nil)
}
} else {
rawdb.WriteStorageSnapshot(batch, addrHash, storageHash, blob)
if clean != nil {
clean.Set(append(addrHash[:], storageHash[:]...), blob)
clean.Set(key, blob)
}
}
}

View file

@ -148,6 +148,7 @@ func (g *generator) stop() {
g.abort <- ch
<-ch
g.running = false
log.Debug("Snapshot generation has been terminated")
}
// completed returns the flag indicating if the whole generation is done.

View file

@ -163,12 +163,15 @@ type indexWriter struct {
db ethdb.KeyValueReader
}
// newIndexWriter constructs the index writer for the specified state.
func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, error) {
// newIndexWriter constructs the index writer for the specified state. Additionally,
// it takes an integer as the limit and prunes all existing elements above that ID.
// It's essential as the recovery mechanism after unclean shutdown during the history
// indexing.
func newIndexWriter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexWriter, error) {
blob := readStateIndex(state, db)
if len(blob) == 0 {
desc := newIndexBlockDesc(0)
bw, _ := newBlockWriter(nil, desc)
bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */)
return &indexWriter{
descList: []*indexBlockDesc{desc},
bw: bw,
@ -180,15 +183,27 @@ func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, er
if err != nil {
return nil, err
}
// Trim trailing blocks whose elements all exceed the limit.
for i := len(descList) - 1; i > 0 && descList[i].max > limit; i-- {
// The previous block has the elements that exceed the limit,
// therefore the current block can be entirely dropped.
if descList[i-1].max >= limit {
descList = descList[:i]
}
}
// Take the last block for appending new elements
lastDesc := descList[len(descList)-1]
indexBlock := readStateIndexBlock(state, db, lastDesc.id)
bw, err := newBlockWriter(indexBlock, lastDesc)
// Construct the writer for the last block. All elements in this block
// that exceed the limit will be truncated.
bw, err := newBlockWriter(indexBlock, lastDesc, limit)
if err != nil {
return nil, err
}
return &indexWriter{
descList: descList,
lastID: lastDesc.max,
lastID: bw.last(),
bw: bw,
state: state,
db: db,
@ -221,7 +236,7 @@ func (w *indexWriter) rotate() error {
desc = newIndexBlockDesc(w.bw.desc.id + 1)
)
w.frozen = append(w.frozen, w.bw)
w.bw, err = newBlockWriter(nil, desc)
w.bw, err = newBlockWriter(nil, desc, 0 /* useless if the block is empty */)
if err != nil {
return err
}
@ -271,13 +286,13 @@ type indexDeleter struct {
}
// newIndexDeleter constructs the index deleter for the specified state.
func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter, error) {
func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent, limit uint64) (*indexDeleter, error) {
blob := readStateIndex(state, db)
if len(blob) == 0 {
// TODO(rjl493456442) we can probably return an error here,
// deleter with no data is meaningless.
desc := newIndexBlockDesc(0)
bw, _ := newBlockWriter(nil, desc)
bw, _ := newBlockWriter(nil, desc, 0 /* useless if the block is empty */)
return &indexDeleter{
descList: []*indexBlockDesc{desc},
bw: bw,
@ -289,22 +304,34 @@ func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter,
if err != nil {
return nil, err
}
// Trim trailing blocks whose elements all exceed the limit.
for i := len(descList) - 1; i > 0 && descList[i].max > limit; i-- {
// The previous block has the elements that exceed the limit,
// therefore the current block can be entirely dropped.
if descList[i-1].max >= limit {
descList = descList[:i]
}
}
// Take the block for deleting element from
lastDesc := descList[len(descList)-1]
indexBlock := readStateIndexBlock(state, db, lastDesc.id)
bw, err := newBlockWriter(indexBlock, lastDesc)
// Construct the writer for the last block. All elements in this block
// that exceed the limit will be truncated.
bw, err := newBlockWriter(indexBlock, lastDesc, limit)
if err != nil {
return nil, err
}
return &indexDeleter{
descList: descList,
lastID: lastDesc.max,
lastID: bw.last(),
bw: bw,
state: state,
db: db,
}, nil
}
// empty returns an flag indicating whether the state index is empty.
// empty returns whether the state index is empty.
func (d *indexDeleter) empty() bool {
return d.bw.empty() && len(d.descList) == 1
}
@ -337,7 +364,7 @@ func (d *indexDeleter) pop(id uint64) error {
// Open the previous block writer for deleting
lastDesc := d.descList[len(d.descList)-1]
indexBlock := readStateIndexBlock(d.state, d.db, lastDesc.id)
bw, err := newBlockWriter(indexBlock, lastDesc)
bw, err := newBlockWriter(indexBlock, lastDesc, lastDesc.max)
if err != nil {
return err
}

View file

@ -21,13 +21,15 @@ import (
"errors"
"fmt"
"math"
"github.com/ethereum/go-ethereum/log"
)
const (
indexBlockDescSize = 14 // The size of index block descriptor
indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
indexBlockRestartLen = 256 // The restart interval length of index block
historyIndexBatch = 512 * 1024 // The number of state history indexes for constructing or deleting as batch
indexBlockDescSize = 14 // The size of index block descriptor
indexBlockEntriesCap = 4096 // The maximum number of entries can be grouped in a block
indexBlockRestartLen = 256 // The restart interval length of index block
historyIndexBatch = 8 * 1024 * 1024 // The number of state history indexes for constructing or deleting as batch
)
// indexBlockDesc represents a descriptor for an index block, which contains a
@ -180,7 +182,11 @@ type blockWriter struct {
data []byte // Aggregated encoded data slice
}
func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) {
// newBlockWriter constructs a block writer. In addition to the existing data
// and block description, it takes an element ID and prunes all existing elements
// above that ID. It's essential as the recovery mechanism after unclean shutdown
// during the history indexing.
func newBlockWriter(blob []byte, desc *indexBlockDesc, limit uint64) (*blockWriter, error) {
if len(blob) == 0 {
return &blockWriter{
desc: desc,
@ -191,11 +197,22 @@ func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) {
if err != nil {
return nil, err
}
return &blockWriter{
writer := &blockWriter{
desc: desc,
restarts: restarts,
data: data, // safe to own the slice
}, nil
}
var trimmed int
for !writer.empty() && writer.last() > limit {
if err := writer.pop(writer.last()); err != nil {
return nil, err
}
trimmed += 1
}
if trimmed > 0 {
log.Debug("Truncated extraneous elements", "count", trimmed, "limit", limit)
}
return writer, nil
}
// append adds a new element to the block. The new element must be greater than
@ -271,6 +288,7 @@ func (b *blockWriter) sectionLast(section int) uint64 {
// sectionSearch looks up the specified value in the given section,
// the position and the preceding value will be returned if found.
// It assumes that the preceding element exists in the section.
func (b *blockWriter) sectionSearch(section int, n uint64) (found bool, prev uint64, pos int) {
b.scanSection(section, func(v uint64, p int) bool {
if n == v {
@ -295,7 +313,6 @@ func (b *blockWriter) pop(id uint64) error {
}
// If there is only one entry left, the entire block should be reset
if b.desc.entries == 1 {
//b.desc.min = 0
b.desc.max = 0
b.desc.entries = 0
b.restarts = nil
@ -331,6 +348,15 @@ func (b *blockWriter) full() bool {
return b.desc.full()
}
// last returns the last element in the block. It should only be called when
// writer is not empty, otherwise the returned data is meaningless.
func (b *blockWriter) last() uint64 {
if b.empty() {
return 0
}
return b.desc.max
}
// finish finalizes the index block encoding by appending the encoded restart points
// and the restart counter to the end of the block.
//

View file

@ -28,7 +28,7 @@ func TestBlockReaderBasic(t *testing.T) {
elements := []uint64{
1, 5, 10, 11, 20,
}
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < len(elements); i++ {
bw.append(elements[i])
}
@ -66,7 +66,7 @@ func TestBlockReaderLarge(t *testing.T) {
}
slices.Sort(elements)
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < len(elements); i++ {
bw.append(elements[i])
}
@ -95,7 +95,7 @@ func TestBlockReaderLarge(t *testing.T) {
}
func TestBlockWriterBasic(t *testing.T) {
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
if !bw.empty() {
t.Fatal("expected empty block")
}
@ -103,11 +103,13 @@ func TestBlockWriterBasic(t *testing.T) {
if err := bw.append(1); err == nil {
t.Fatal("out-of-order insertion is not expected")
}
var maxElem uint64
for i := 0; i < 10; i++ {
bw.append(uint64(i + 3))
maxElem = uint64(i + 3)
}
bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0))
bw, err := newBlockWriter(bw.finish(), newIndexBlockDesc(0), maxElem)
if err != nil {
t.Fatalf("Failed to construct the block writer, %v", err)
}
@ -119,8 +121,71 @@ func TestBlockWriterBasic(t *testing.T) {
bw.finish()
}
func TestBlockWriterWithLimit(t *testing.T) {
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
var maxElem uint64
for i := 0; i < indexBlockRestartLen*2; i++ {
bw.append(uint64(i + 1))
maxElem = uint64(i + 1)
}
suites := []struct {
limit uint64
expMax uint64
}{
// nothing to truncate
{
maxElem, maxElem,
},
// truncate the last element
{
maxElem - 1, maxElem - 1,
},
// truncation around the restart boundary
{
uint64(indexBlockRestartLen + 1),
uint64(indexBlockRestartLen + 1),
},
// truncation around the restart boundary
{
uint64(indexBlockRestartLen),
uint64(indexBlockRestartLen),
},
{
uint64(1), uint64(1),
},
// truncate the entire block, it's in theory invalid
{
uint64(0), uint64(0),
},
}
for i, suite := range suites {
desc := *bw.desc
block, err := newBlockWriter(bw.finish(), &desc, suite.limit)
if err != nil {
t.Fatalf("Failed to construct the block writer, %v", err)
}
if block.desc.max != suite.expMax {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, suite.expMax)
}
// Re-fill the elements
var maxElem uint64
for elem := suite.limit + 1; elem < indexBlockRestartLen*4; elem++ {
if err := block.append(elem); err != nil {
t.Fatalf("Failed to append value %d: %v", elem, err)
}
maxElem = elem
}
if block.desc.max != maxElem {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, block.desc.max, maxElem)
}
}
}
func TestBlockWriterDelete(t *testing.T) {
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < 10; i++ {
bw.append(uint64(i + 1))
}
@ -147,7 +212,7 @@ func TestBlcokWriterDeleteWithData(t *testing.T) {
elements := []uint64{
1, 5, 10, 11, 20,
}
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < len(elements); i++ {
bw.append(elements[i])
}
@ -158,7 +223,7 @@ func TestBlcokWriterDeleteWithData(t *testing.T) {
max: 20,
entries: 5,
}
bw, err := newBlockWriter(bw.finish(), desc)
bw, err := newBlockWriter(bw.finish(), desc, elements[len(elements)-1])
if err != nil {
t.Fatalf("Failed to construct block writer %v", err)
}
@ -201,15 +266,18 @@ func TestBlcokWriterDeleteWithData(t *testing.T) {
}
func TestCorruptedIndexBlock(t *testing.T) {
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
var maxElem uint64
for i := 0; i < 10; i++ {
bw.append(uint64(i + 1))
maxElem = uint64(i + 1)
}
buf := bw.finish()
// Mutate the buffer manually
buf[len(buf)-1]++
_, err := newBlockWriter(buf, newIndexBlockDesc(0))
_, err := newBlockWriter(buf, newIndexBlockDesc(0), maxElem)
if err == nil {
t.Fatal("Corrupted index block data is not detected")
}
@ -218,7 +286,7 @@ func TestCorruptedIndexBlock(t *testing.T) {
// BenchmarkParseIndexBlock benchmarks the performance of parseIndexBlock.
func BenchmarkParseIndexBlock(b *testing.B) {
// Generate a realistic index block blob
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < 4096; i++ {
bw.append(uint64(i * 2))
}
@ -238,13 +306,15 @@ func BenchmarkBlockWriterAppend(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
desc := newIndexBlockDesc(0)
writer, _ := newBlockWriter(nil, desc)
var blockID uint32
desc := newIndexBlockDesc(blockID)
writer, _ := newBlockWriter(nil, desc, 0)
for i := 0; i < b.N; i++ {
if writer.full() {
desc = newIndexBlockDesc(0)
writer, _ = newBlockWriter(nil, desc)
blockID += 1
desc = newIndexBlockDesc(blockID)
writer, _ = newBlockWriter(nil, desc, 0)
}
if err := writer.append(writer.desc.max + 1); err != nil {
b.Error(err)

View file

@ -33,7 +33,7 @@ func makeTestIndexBlock(count int) ([]byte, []uint64) {
marks = make(map[uint64]bool)
elements []uint64
)
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0))
bw, _ := newBlockWriter(nil, newIndexBlockDesc(0), 0)
for i := 0; i < count; i++ {
n := uint64(rand.Uint32())
if marks[n] {
@ -67,7 +67,7 @@ func makeTestIndexBlocks(db ethdb.KeyValueStore, stateIdent stateIdent, count in
}
sort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] })
iw, _ := newIndexWriter(db, stateIdent)
iw, _ := newIndexWriter(db, stateIdent, 0)
for i := 0; i < len(elements); i++ {
iw.append(elements[i])
}

View file

@ -33,7 +33,7 @@ func TestIndexReaderBasic(t *testing.T) {
1, 5, 10, 11, 20,
}
db := rawdb.NewMemoryDatabase()
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}))
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
for i := 0; i < len(elements); i++ {
bw.append(elements[i])
}
@ -75,7 +75,7 @@ func TestIndexReaderLarge(t *testing.T) {
slices.Sort(elements)
db := rawdb.NewMemoryDatabase()
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}))
bw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
for i := 0; i < len(elements); i++ {
bw.append(elements[i])
}
@ -122,19 +122,21 @@ func TestEmptyIndexReader(t *testing.T) {
func TestIndexWriterBasic(t *testing.T) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}))
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
iw.append(2)
if err := iw.append(1); err == nil {
t.Fatal("out-of-order insertion is not expected")
}
var maxElem uint64
for i := 0; i < 10; i++ {
iw.append(uint64(i + 3))
maxElem = uint64(i + 3)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}))
iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), maxElem)
if err != nil {
t.Fatalf("Failed to construct the block writer, %v", err)
}
@ -146,18 +148,87 @@ func TestIndexWriterBasic(t *testing.T) {
iw.finish(db.NewBatch())
}
func TestIndexWriterDelete(t *testing.T) {
func TestIndexWriterWithLimit(t *testing.T) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}))
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
var maxElem uint64
for i := 0; i < indexBlockEntriesCap*2; i++ {
iw.append(uint64(i + 1))
maxElem = uint64(i + 1)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
suites := []struct {
limit uint64
expMax uint64
}{
// nothing to truncate
{
maxElem, maxElem,
},
// truncate the last element
{
maxElem - 1, maxElem - 1,
},
// truncation around the block boundary
{
uint64(indexBlockEntriesCap + 1),
uint64(indexBlockEntriesCap + 1),
},
// truncation around the block boundary
{
uint64(indexBlockEntriesCap),
uint64(indexBlockEntriesCap),
},
{
uint64(1), uint64(1),
},
// truncate the entire index, it's in theory invalid
{
uint64(0), uint64(0),
},
}
for i, suite := range suites {
iw, err := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), suite.limit)
if err != nil {
t.Fatalf("Failed to construct the index writer, %v", err)
}
if iw.lastID != suite.expMax {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, suite.expMax)
}
// Re-fill the elements
var maxElem uint64
for elem := suite.limit + 1; elem < indexBlockEntriesCap*4; elem++ {
if err := iw.append(elem); err != nil {
t.Fatalf("Failed to append value %d: %v", elem, err)
}
maxElem = elem
}
if iw.lastID != maxElem {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, iw.lastID, maxElem)
}
}
}
func TestIndexDeleterBasic(t *testing.T) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
var maxElem uint64
for i := 0; i < indexBlockEntriesCap*4; i++ {
iw.append(uint64(i + 1))
maxElem = uint64(i + 1)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
// Delete unknown id, the request should be rejected
id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}))
id, _ := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), maxElem)
if err := id.pop(indexBlockEntriesCap * 5); err == nil {
t.Fatal("Expect error to occur for unknown id")
}
@ -168,10 +239,66 @@ func TestIndexWriterDelete(t *testing.T) {
if id.lastID != uint64(i-1) {
t.Fatalf("Unexpected lastID, want: %d, got: %d", uint64(i-1), iw.lastID)
}
if rand.Intn(10) == 0 {
batch := db.NewBatch()
id.finish(batch)
batch.Write()
}
}
func TestIndexDeleterWithLimit(t *testing.T) {
db := rawdb.NewMemoryDatabase()
iw, _ := newIndexWriter(db, newAccountIdent(common.Hash{0xa}), 0)
var maxElem uint64
for i := 0; i < indexBlockEntriesCap*2; i++ {
iw.append(uint64(i + 1))
maxElem = uint64(i + 1)
}
batch := db.NewBatch()
iw.finish(batch)
batch.Write()
suites := []struct {
limit uint64
expMax uint64
}{
// nothing to truncate
{
maxElem, maxElem,
},
// truncate the last element
{
maxElem - 1, maxElem - 1,
},
// truncation around the block boundary
{
uint64(indexBlockEntriesCap + 1),
uint64(indexBlockEntriesCap + 1),
},
// truncation around the block boundary
{
uint64(indexBlockEntriesCap),
uint64(indexBlockEntriesCap),
},
{
uint64(1), uint64(1),
},
// truncate the entire index, it's in theory invalid
{
uint64(0), uint64(0),
},
}
for i, suite := range suites {
id, err := newIndexDeleter(db, newAccountIdent(common.Hash{0xa}), suite.limit)
if err != nil {
t.Fatalf("Failed to construct the index writer, %v", err)
}
if id.lastID != suite.expMax {
t.Fatalf("Test %d, unexpected max value, got %d, want %d", i, id.lastID, suite.expMax)
}
// Keep removing elements
for elem := id.lastID; elem > 0; elem-- {
if err := id.pop(elem); err != nil {
t.Fatalf("Failed to pop value %d: %v", elem, err)
}
}
}
}

View file

@ -40,11 +40,6 @@ const (
stateHistoryIndexVersion = stateHistoryIndexV0 // the current state index version
trienodeHistoryIndexV0 = uint8(0) // initial version of trienode index structure
trienodeHistoryIndexVersion = trienodeHistoryIndexV0 // the current trienode index version
// estimations for calculating the batch size for atomic database commit
estimatedStateHistoryIndexSize = 3 // The average size of each state history index entry is approximately 23 bytes
estimatedTrienodeHistoryIndexSize = 3 // The average size of each trienode history index entry is approximately 2-3 bytes
estimatedIndexBatchSizeFactor = 32 // The factor counts for the write amplification for each entry
)
// indexVersion returns the latest index version for the given history type.
@ -155,22 +150,6 @@ func (b *batchIndexer) process(h history, id uint64) error {
return b.finish(false)
}
// makeBatch constructs a database batch based on the number of pending entries.
// The batch size is roughly estimated to minimize repeated resizing rounds,
// as accurately predicting the exact size is technically challenging.
func (b *batchIndexer) makeBatch() ethdb.Batch {
var size int
switch b.typ {
case typeStateHistory:
size = estimatedStateHistoryIndexSize
case typeTrienodeHistory:
size = estimatedTrienodeHistoryIndexSize
default:
panic(fmt.Sprintf("unknown history type %d", b.typ))
}
return b.db.NewBatchWithSize(size * estimatedIndexBatchSizeFactor * b.pending)
}
// finish writes the accumulated state indexes into the disk if either the
// memory limitation is reached or it's requested forcibly.
func (b *batchIndexer) finish(force bool) error {
@ -181,17 +160,38 @@ func (b *batchIndexer) finish(force bool) error {
return nil
}
var (
batch = b.makeBatch()
batchMu sync.RWMutex
start = time.Now()
eg errgroup.Group
start = time.Now()
eg errgroup.Group
batch = b.db.NewBatchWithSize(ethdb.IdealBatchSize)
batchSize int
batchMu sync.RWMutex
writeBatch = func(fn func(batch ethdb.Batch)) error {
batchMu.Lock()
defer batchMu.Unlock()
fn(batch)
if batch.ValueSize() >= ethdb.IdealBatchSize {
batchSize += batch.ValueSize()
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
return nil
}
)
eg.SetLimit(runtime.NumCPU())
var indexed uint64
if metadata := loadIndexMetadata(b.db, b.typ); metadata != nil {
indexed = metadata.Last
}
for ident, list := range b.index {
eg.Go(func() error {
if !b.delete {
iw, err := newIndexWriter(b.db, ident)
iw, err := newIndexWriter(b.db, ident, indexed)
if err != nil {
return err
}
@ -200,11 +200,11 @@ func (b *batchIndexer) finish(force bool) error {
return err
}
}
batchMu.Lock()
iw.finish(batch)
batchMu.Unlock()
return writeBatch(func(batch ethdb.Batch) {
iw.finish(batch)
})
} else {
id, err := newIndexDeleter(b.db, ident)
id, err := newIndexDeleter(b.db, ident, indexed)
if err != nil {
return err
}
@ -213,11 +213,10 @@ func (b *batchIndexer) finish(force bool) error {
return err
}
}
batchMu.Lock()
id.finish(batch)
batchMu.Unlock()
return writeBatch(func(batch ethdb.Batch) {
id.finish(batch)
})
}
return nil
})
}
if err := eg.Wait(); err != nil {
@ -233,10 +232,12 @@ func (b *batchIndexer) finish(force bool) error {
storeIndexMetadata(batch, b.typ, b.lastID-1)
}
}
batchSize += batch.ValueSize()
if err := batch.Write(); err != nil {
return err
}
log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "elapsed", common.PrettyDuration(time.Since(start)))
log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "size", common.StorageSize(batchSize), "elapsed", common.PrettyDuration(time.Since(start)))
b.pending = 0
b.index = make(map[stateIdent][]uint64)
return nil

View file

@ -33,6 +33,13 @@ func storageKey(accountHash common.Hash, slotHash common.Hash) [64]byte {
return key
}
// storageKeySlice returns a key for uniquely identifying the storage slot in
// the slice format.
func storageKeySlice(accountHash common.Hash, slotHash common.Hash) []byte {
key := storageKey(accountHash, slotHash)
return key[:]
}
// lookup is an internal structure used to efficiently determine the layer in
// which a state entry resides.
type lookup struct {