cmd/evm/internal/t8ntool, trie: support for verkle-at-genesis, use UBT, and move the transition tree to its own package (#32445)
Some checks are pending
/ Linux Build (push) Waiting to run
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run

This is broken off of #31730 to only focus on testing networks that
start with verkle at genesis.

The PR has seen a lot of work since its creation, and it now targets
creating and re-executing tests for a binary tree testnet without the
transition (so it starts at genesis). The transition tree has been moved
to its own package. It also replaces verkle with the binary tree for
this specific application.

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
This commit is contained in:
Guillaume Ballet 2025-11-14 15:25:30 +01:00 committed by GitHub
parent 81c5b43029
commit 2a2f106a01
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 1142 additions and 1468 deletions

View file

@ -18,6 +18,7 @@ package t8ntool
import (
"fmt"
stdmath "math"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -43,8 +44,9 @@ import (
)
type Prestate struct {
Env stEnv `json:"env"`
Pre types.GenesisAlloc `json:"pre"`
Env stEnv `json:"env"`
Pre types.GenesisAlloc `json:"pre"`
TreeLeaves map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"`
}
//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
@ -142,7 +144,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return h
}
var (
statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre)
isEIP4762 = chainConfig.IsVerkle(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp)
statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre, isEIP4762)
signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp)
gaspool = new(core.GasPool)
blockHash = common.Hash{0x13, 0x37}
@ -301,6 +304,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
// Amount is in gwei, turn into wei
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
if isEIP4762 {
statedb.AccessEvents().AddAccount(w.Address, true, stdmath.MaxUint64)
}
}
// Gather the execution-layer triggered requests.
@ -361,8 +368,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
execRs.Requests = requests
}
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
// Re-create statedb instance with new root for MPT mode
statedb, err = state.New(root, statedb.Database())
if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
@ -371,12 +377,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return statedb, execRs, body, nil
}
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, isBintrie bool) *state.StateDB {
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true, IsVerkle: isBintrie})
sdb := state.NewDatabase(tdb, nil)
statedb, err := state.New(types.EmptyRootHash, sdb)
root := types.EmptyRootHash
if isBintrie {
root = types.EmptyBinaryHash
}
statedb, err := state.New(root, sdb)
if err != nil {
panic(fmt.Errorf("failed to create initial state: %v", err))
panic(fmt.Errorf("failed to create initial statedb: %v", err))
}
for addr, a := range accounts {
statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
@ -387,10 +398,15 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
root, err := statedb.Commit(0, false, false)
root, err = statedb.Commit(0, false, false)
if err != nil {
panic(fmt.Errorf("failed to commit initial state: %v", err))
}
// If bintrie mode started, check if conversion happened
if isBintrie {
return statedb
}
// For MPT mode, reopen the state with the committed root
statedb, err = state.New(root, sdb)
if err != nil {
panic(fmt.Errorf("failed to reopen state after commit: %v", err))
@ -398,7 +414,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
return statedb
}
func rlpHash(x interface{}) (h common.Hash) {
func rlpHash(x any) (h common.Hash) {
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])

View file

@ -88,6 +88,14 @@ var (
"\t<file> - into the file <file> ",
Value: "block.json",
}
OutputBTFlag = &cli.StringFlag{
Name: "output.vkt",
Usage: "Determines where to put the `BT` of the post-state.\n" +
"\t`stdout` - into the stdout output\n" +
"\t`stderr` - into the stderr output\n" +
"\t<file> - into the file <file> ",
Value: "vkt.json",
}
InputAllocFlag = &cli.StringFlag{
Name: "input.alloc",
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
@ -123,6 +131,11 @@ var (
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
Value: "txs.rlp",
}
// TODO(@CPerezz): rename `Name` of the file in a follow-up PR (relays on EEST -> https://github.com/ethereum/execution-spec-tests/tree/verkle/main)
InputBTFlag = &cli.StringFlag{
Name: "input.vkt",
Usage: "`stdin` or file name of where to find the prestate BT.",
}
SealCliqueFlag = &cli.StringFlag{
Name: "seal.clique",
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",

View file

@ -28,15 +28,22 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/tests"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/database"
"github.com/holiman/uint256"
"github.com/urfave/cli/v2"
)
@ -75,10 +82,11 @@ var (
)
type input struct {
Alloc types.GenesisAlloc `json:"alloc,omitempty"`
Env *stEnv `json:"env,omitempty"`
Txs []*txWithKey `json:"txs,omitempty"`
TxRlp string `json:"txsRlp,omitempty"`
Alloc types.GenesisAlloc `json:"alloc,omitempty"`
Env *stEnv `json:"env,omitempty"`
BT map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"`
Txs []*txWithKey `json:"txs,omitempty"`
TxRlp string `json:"txsRlp,omitempty"`
}
func Transition(ctx *cli.Context) error {
@ -90,16 +98,16 @@ func Transition(ctx *cli.Context) error {
// stdin input or in files.
// Check if anything needs to be read from stdin
var (
prestate Prestate
txIt txIterator // txs to apply
allocStr = ctx.String(InputAllocFlag.Name)
prestate Prestate
txIt txIterator // txs to apply
allocStr = ctx.String(InputAllocFlag.Name)
btStr = ctx.String(InputBTFlag.Name)
envStr = ctx.String(InputEnvFlag.Name)
txStr = ctx.String(InputTxsFlag.Name)
inputData = &input{}
)
// Figure out the prestate alloc
if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
if allocStr == stdinSelector || btStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
@ -112,6 +120,13 @@ func Transition(ctx *cli.Context) error {
}
prestate.Pre = inputData.Alloc
if btStr != stdinSelector && btStr != "" {
if err := readFile(btStr, "BT", &inputData.BT); err != nil {
return err
}
}
prestate.TreeLeaves = inputData.BT
// Set the block environment
if envStr != stdinSelector {
var env stEnv
@ -182,9 +197,21 @@ func Transition(ctx *cli.Context) error {
return err
}
// Dump the execution result
collector := make(Alloc)
s.DumpToCollector(collector, nil)
return dispatchOutput(ctx, baseDir, result, collector, body)
var (
collector = make(Alloc)
btleaves map[common.Hash]hexutil.Bytes
)
isBinary := chainConfig.IsVerkle(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp)
if !isBinary {
s.DumpToCollector(collector, nil)
} else {
btleaves = make(map[common.Hash]hexutil.Bytes)
if err := s.DumpBinTrieLeaves(btleaves); err != nil {
return err
}
}
return dispatchOutput(ctx, baseDir, result, collector, body, btleaves)
}
func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
@ -306,7 +333,7 @@ func saveFile(baseDir, filename string, data interface{}) error {
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
// files
func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error {
func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes, bt map[common.Hash]hexutil.Bytes) error {
stdOutObject := make(map[string]interface{})
stdErrObject := make(map[string]interface{})
dispatch := func(baseDir, fName, name string, obj interface{}) error {
@ -333,6 +360,13 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil {
return err
}
// Only write bt output if we actually have binary trie leaves
if bt != nil {
if err := dispatch(baseDir, ctx.String(OutputBTFlag.Name), "vkt", bt); err != nil {
return err
}
}
if len(stdOutObject) > 0 {
b, err := json.MarshalIndent(stdOutObject, "", " ")
if err != nil {
@ -351,3 +385,168 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
}
return nil
}
// BinKey computes the tree key given an address and an optional slot number.
func BinKey(ctx *cli.Context) error {
if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 {
return errors.New("invalid number of arguments: expecting an address and an optional slot number")
}
addr, err := hexutil.Decode(ctx.Args().Get(0))
if err != nil {
return fmt.Errorf("error decoding address: %w", err)
}
if ctx.Args().Len() == 2 {
slot, err := hexutil.Decode(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("error decoding slot: %w", err)
}
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyStorageSlot(common.BytesToAddress(addr), slot))
} else {
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyBasicData(common.BytesToAddress(addr)))
}
return nil
}
// BinKeys computes a set of tree keys given a genesis alloc.
func BinKeys(ctx *cli.Context) error {
var allocStr = ctx.String(InputAllocFlag.Name)
var alloc core.GenesisAlloc
// Figure out the prestate alloc
if allocStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(&alloc); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
}
}
if allocStr != stdinSelector {
if err := readFile(allocStr, "alloc", &alloc); err != nil {
return err
}
}
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
defer db.Close()
bt, err := genBinTrieFromAlloc(alloc, db)
if err != nil {
return fmt.Errorf("error generating bt: %w", err)
}
collector := make(map[common.Hash]hexutil.Bytes)
it, err := bt.NodeIterator(nil)
if err != nil {
panic(err)
}
for it.Next(true) {
if it.Leaf() {
collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob()
}
}
output, err := json.MarshalIndent(collector, "", "")
if err != nil {
return fmt.Errorf("error outputting tree: %w", err)
}
fmt.Println(string(output))
return nil
}
// BinTrieRoot computes the root of a Binary Trie from a genesis alloc.
func BinTrieRoot(ctx *cli.Context) error {
var allocStr = ctx.String(InputAllocFlag.Name)
var alloc core.GenesisAlloc
if allocStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(&alloc); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
}
}
if allocStr != stdinSelector {
if err := readFile(allocStr, "alloc", &alloc); err != nil {
return err
}
}
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
defer db.Close()
bt, err := genBinTrieFromAlloc(alloc, db)
if err != nil {
return fmt.Errorf("error generating bt: %w", err)
}
fmt.Println(bt.Hash().Hex())
return nil
}
// TODO(@CPerezz): Should this go to `bintrie` module?
func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase) (*bintrie.BinaryTrie, error) {
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db)
if err != nil {
return nil, err
}
for addr, acc := range alloc {
for slot, value := range acc.Storage {
err := bt.UpdateStorage(addr, slot.Bytes(), value.Big().Bytes())
if err != nil {
return nil, fmt.Errorf("error inserting storage: %w", err)
}
}
account := &types.StateAccount{
Balance: uint256.MustFromBig(acc.Balance),
Nonce: acc.Nonce,
CodeHash: crypto.Keccak256Hash(acc.Code).Bytes(),
Root: common.Hash{},
}
err := bt.UpdateAccount(addr, account, len(acc.Code))
if err != nil {
return nil, fmt.Errorf("error inserting account: %w", err)
}
err = bt.UpdateContractCode(addr, common.BytesToHash(account.CodeHash), acc.Code)
if err != nil {
return nil, fmt.Errorf("error inserting code: %w", err)
}
}
return bt, nil
}
// BinaryCodeChunkKey computes the tree key of a code-chunk for a given address.
func BinaryCodeChunkKey(ctx *cli.Context) error {
if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 {
return errors.New("invalid number of arguments: expecting an address and an code-chunk number")
}
addr, err := hexutil.Decode(ctx.Args().Get(0))
if err != nil {
return fmt.Errorf("error decoding address: %w", err)
}
chunkNumberBytes, err := hexutil.Decode(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("error decoding chunk number: %w", err)
}
var chunkNumber uint256.Int
chunkNumber.SetBytes(chunkNumberBytes)
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyCodeChunk(common.BytesToAddress(addr), &chunkNumber))
return nil
}
// BinaryCodeChunkCode returns the code chunkification for a given code.
func BinaryCodeChunkCode(ctx *cli.Context) error {
if ctx.Args().Len() == 0 || ctx.Args().Len() > 1 {
return errors.New("invalid number of arguments: expecting a bytecode")
}
bytecode, err := hexutil.Decode(ctx.Args().Get(0))
if err != nil {
return fmt.Errorf("error decoding address: %w", err)
}
chunkedCode := bintrie.ChunkifyCode(bytecode)
fmt.Printf("%#x\n", chunkedCode)
return nil
}

View file

@ -146,16 +146,63 @@ var (
t8ntool.TraceEnableCallFramesFlag,
t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag,
t8ntool.OutputBTFlag,
t8ntool.OutputResultFlag,
t8ntool.OutputBodyFlag,
t8ntool.InputAllocFlag,
t8ntool.InputEnvFlag,
t8ntool.InputBTFlag,
t8ntool.InputTxsFlag,
t8ntool.ForknameFlag,
t8ntool.ChainIDFlag,
t8ntool.RewardFlag,
},
}
verkleCommand = &cli.Command{
Name: "verkle",
Aliases: []string{"vkt"},
Usage: "Binary Trie helpers",
Subcommands: []*cli.Command{
{
Name: "tree-keys",
Aliases: []string{"v"},
Usage: "compute a set of binary trie keys, given their source addresses and optional slot numbers",
Action: t8ntool.BinKeys,
Flags: []cli.Flag{
t8ntool.InputAllocFlag,
},
},
{
Name: "single-key",
Aliases: []string{"vk"},
Usage: "compute the binary trie key given an address and optional slot number",
Action: t8ntool.BinKey,
},
{
Name: "code-chunk-key",
Aliases: []string{"vck"},
Usage: "compute the binary trie key given an address and chunk number",
Action: t8ntool.BinaryCodeChunkKey,
},
{
Name: "chunkify-code",
Aliases: []string{"vcc"},
Usage: "chunkify a given bytecode for a binary trie",
Action: t8ntool.BinaryCodeChunkCode,
},
{
Name: "state-root",
Aliases: []string{"vsr"},
Usage: "compute the state-root of a binary trie for the given alloc",
Action: t8ntool.BinTrieRoot,
Flags: []cli.Flag{
t8ntool.InputAllocFlag,
},
},
},
}
transactionCommand = &cli.Command{
Name: "transaction",
Aliases: []string{"t9n"},
@ -210,6 +257,7 @@ func init() {
stateTransitionCommand,
transactionCommand,
blockBuilderCommand,
verkleCommand,
}
app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)

View file

@ -0,0 +1,237 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"encoding/binary"
"fmt"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/triedb"
)
var (
testVerkleChainConfig = &params.ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig),
ShanghaiTime: u64(0),
VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0,
EnableVerkleAtGenesis: true,
BlobScheduleConfig: &params.BlobScheduleConfig{
Verkle: params.DefaultPragueBlobConfig,
},
}
)
func TestProcessVerkle(t *testing.T) {
var (
code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`)
intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, nil, true, true, true, true)
// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness
// will not contain that copied data.
// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985
codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`)
intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, nil, true, true, true, true)
signer = types.LatestSigner(testVerkleChainConfig)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain
coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
gspec = &Genesis{
Config: testVerkleChainConfig,
Alloc: GenesisAlloc{
coinbase: {
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
params.WithdrawalQueueAddress: {Nonce: 1, Code: params.WithdrawalQueueCode, Balance: common.Big0},
params.ConsolidationQueueAddress: {Nonce: 1, Code: params.ConsolidationQueueCode, Balance: common.Big0},
},
}
)
// Verkle trees use the snapshot, which must be enabled before the
// data is saved into the tree+database.
// genesis := gspec.MustCommit(bcdb, triedb)
options := DefaultConfig().WithStateScheme(rawdb.PathScheme)
options.SnapshotLimit = 0
blockchain, _ := NewBlockChain(bcdb, gspec, beacon.New(ethash.NewFaker()), options)
defer blockchain.Stop()
txCost1 := params.TxGas
txCost2 := params.TxGas
contractCreationCost := intrinsicContractCreationGas +
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */
739 /* execution costs */
codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas +
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
params.WitnessChunkReadCost + /* SLOAD in constructor */
params.WitnessChunkWriteCost + /* SSTORE in constructor */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
params.WitnessChunkReadCost + /* SLOAD in constructor */
params.WitnessChunkWriteCost + /* SSTORE in constructor */
params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */
15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */
uint64(4844) /* execution costs */
blockGasUsagesExpected := []uint64{
txCost1*2 + txCost2,
txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas,
}
_, chain, _ := GenerateChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) {
gen.SetPoS()
// TODO need to check that the tx cost provided is the exact amount used (no remaining left-over)
tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
// Add two contract creations in block #2
if i == 1 {
tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 6,
Value: big.NewInt(16),
Gas: 3000000,
GasPrice: big.NewInt(875000000),
Data: code,
})
gen.AddTx(tx)
tx, _ = types.SignNewTx(testKey, signer, &types.LegacyTx{Nonce: 7,
Value: big.NewInt(0),
Gas: 3000000,
GasPrice: big.NewInt(875000000),
Data: codeWithExtCodeCopy,
})
gen.AddTx(tx)
}
})
for i, b := range chain {
fmt.Printf("%d %x\n", i, b.Root())
}
endnum, err := blockchain.InsertChain(chain)
if err != nil {
t.Fatalf("block %d imported with error: %v", endnum, err)
}
for i := range 2 {
b := blockchain.GetBlockByNumber(uint64(i) + 1)
if b == nil {
t.Fatalf("expected block %d to be present in chain", i+1)
}
if b.Hash() != chain[i].Hash() {
t.Fatalf("block #%d not found at expected height", b.NumberU64())
}
if b.GasUsed() != blockGasUsagesExpected[i] {
t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed())
}
}
}
func TestProcessParentBlockHash(t *testing.T) {
// This test uses blocks where,
// block 1 parent hash is 0x0100....
// block 2 parent hash is 0x0200....
// etc
checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) {
statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified)
statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified)
// Process n blocks, from 1 .. num
var num = 2
for i := 1; i <= num; i++ {
header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)}
chainConfig := params.MergedTestChainConfig
if isVerkle {
chainConfig = testVerkleChainConfig
}
vmContext := NewEVMBlockContext(header, nil, new(common.Address))
evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{})
ProcessParentBlockHash(header.ParentHash, evm)
}
// Read block hashes for block 0 .. num-1
for i := 0; i < num; i++ {
have, want := getContractStoredBlockHash(statedb, uint64(i), isVerkle), common.Hash{byte(i + 1)}
if have != want {
t.Errorf("block %d, verkle=%v, have parent hash %v, want %v", i, isVerkle, have, want)
}
}
}
t.Run("MPT", func(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
checkBlockHashes(statedb, false)
})
t.Run("Verkle", func(t *testing.T) {
db := rawdb.NewMemoryDatabase()
cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme)
cacheConfig.SnapshotLimit = 0
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil))
checkBlockHashes(statedb, true)
})
}
// getContractStoredBlockHash is a utility method which reads the stored parent blockhash for block 'number'
func getContractStoredBlockHash(statedb *state.StateDB, number uint64, isVerkle bool) common.Hash {
ringIndex := number % params.HistoryServeWindow
var key common.Hash
binary.BigEndian.PutUint64(key[24:], ringIndex)
if isVerkle {
return statedb.GetState(params.HistoryStorageAddress, key)
}
return statedb.GetState(params.HistoryStorageAddress, key)
}

View file

@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-verkle"
"github.com/holiman/uint256"
)
@ -427,7 +426,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Forcibly use hash-based state scheme for retaining all nodes in disk.
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
var triedbConfig *triedb.Config = triedb.HashDefaults
if config.IsVerkle(config.ChainID, 0) {
triedbConfig = triedb.VerkleDefaults
}
triedb := triedb.NewDatabase(db, triedbConfig)
defer triedb.Close()
for i := 0; i < n; i++ {
@ -472,7 +475,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
// then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase()
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
var triedbConfig *triedb.Config = triedb.HashDefaults
if genesis.Config != nil && genesis.Config.IsVerkle(genesis.Config.ChainID, 0) {
triedbConfig = triedb.VerkleDefaults
}
triedb := triedb.NewDatabase(db, triedbConfig)
defer triedb.Close()
_, err := genesis.Commit(db, triedb)
if err != nil {
@ -482,117 +489,6 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int,
return db, blocks, receipts
}
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, trdb *triedb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
if config == nil {
config = params.TestChainConfig
}
proofs := make([]*verkle.VerkleProof, 0, n)
keyvals := make([]verkle.StateDiff, 0, n)
cm := newChainMaker(parent, config, engine)
genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
b.header = cm.makeHeader(parent, statedb, b.engine)
// TODO uncomment when proof generation is merged
// Save pre state for proof generation
// preState := statedb.Copy()
// EIP-2935 / 7709
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
blockContext.Random = &common.Hash{} // enable post-merge instruction set
evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
ProcessParentBlockHash(b.header.ParentHash, evm)
// Execute any user modifications to the block.
if gen != nil {
gen(i, b)
}
requests := b.collectRequests(false)
if requests != nil {
reqHash := types.CalcRequestsHash(requests)
b.header.RequestsHash = &reqHash
}
body := &types.Body{
Transactions: b.txs,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts)
if err != nil {
panic(err)
}
// Write state changes to DB.
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
if err = triedb.Commit(root, false); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
proofs = append(proofs, block.ExecutionWitness().VerkleProof)
keyvals = append(keyvals, block.ExecutionWitness().StateDiff)
return block, b.receipts
}
sdb := state.NewDatabase(trdb, nil)
for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), sdb)
if err != nil {
panic(err)
}
block, receipts := genblock(i, parent, trdb, statedb)
// Post-process the receipts.
// Here we assign the final block hash and other info into the receipt.
// In order for DeriveFields to work, the transaction and receipt lists need to be
// of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be
// extra ones, so we just trim the lists here.
receiptsCount := len(receipts)
txs := block.Transactions()
if len(receipts) > len(txs) {
receipts = receipts[:len(txs)]
} else if len(receipts) < len(txs) {
txs = txs[:len(receipts)]
}
var blobGasPrice *big.Int
if block.ExcessBlobGas() != nil {
blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header())
}
if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil {
panic(err)
}
// Re-expand to ensure all receipts are returned.
receipts = receipts[:receiptsCount]
// Advance the chain.
cm.add(block, receipts)
parent = block
}
return cm.chain, cm.receipts, proofs, keyvals
}
func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (common.Hash, ethdb.Database, []*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
db := rawdb.NewMemoryDatabase()
cacheConfig := DefaultConfig().WithStateScheme(rawdb.PathScheme)
cacheConfig.SnapshotLimit = 0
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
defer triedb.Close()
genesisBlock, err := genesis.Commit(db, triedb)
if err != nil {
panic(err)
}
blocks, receipts, proofs, keyvals := GenerateVerkleChain(genesis.Config, genesisBlock, engine, db, triedb, n, gen)
return genesisBlock.Hash(), db, blocks, receipts, proofs, keyvals
}
func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
time := parent.Time() + 10 // block time is fixed at 10 seconds
parentHeader := parent.Header()

View file

@ -308,7 +308,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
},
}
expected := common.FromHex("018d20eebb130b5e2b796465fe36aafab650650729a92435aec071bf2386f080")
expected := common.FromHex("19056b480530799a4fdaa9fd9407043b965a3a5c37b4d2a1a9a4f3395a327561")
got := genesis.ToBlock().Root().Bytes()
if !bytes.Equal(got, expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)

View file

@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
@ -239,10 +241,12 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {
ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle())
if ts.InTransition() {
panic("transition isn't supported yet")
panic("state tree transition isn't supported yet")
}
if ts.Transitioned() {
return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
// Use BinaryTrie instead of VerkleTrie when IsVerkle is set
// (IsVerkle actually means Binary Trie mode in this codebase)
return bintrie.NewBinaryTrie(root, db.triedb)
}
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
@ -302,7 +306,7 @@ func mustCopyTrie(t Trie) Trie {
return t.Copy()
case *trie.VerkleTrie:
return t.Copy()
case *trie.TransitionTrie:
case *transitiontrie.TransitionTrie:
return t.Copy()
default:
panic(fmt.Errorf("unknown trie type %T", t))

View file

@ -18,6 +18,7 @@ package state
import (
"encoding/json"
"errors"
"fmt"
"time"
@ -27,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
)
// DumpConfig is a set of options to control what portions of the state will be
@ -221,6 +223,28 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
return nextKey
}
// DumpBinTrieLeaves collects all binary trie leaf nodes into the provided map.
func (s *StateDB) DumpBinTrieLeaves(collector map[common.Hash]hexutil.Bytes) error {
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
return err
}
btr, ok := tr.(*bintrie.BinaryTrie)
if !ok {
return errors.New("trie is not a binary trie")
}
it, err := btr.NodeIterator(nil)
if err != nil {
return err
}
for it.Next(true) {
if it.Leaf() {
collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob()
}
}
return nil
}
// RawDump returns the state. If the processing is aborted e.g. due to options
// reaching Max, the `Next` key is set on the returned Dump.
func (s *StateDB) RawDump(opts *DumpConfig) Dump {

View file

@ -30,6 +30,8 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/database"
@ -242,7 +244,11 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
if !db.IsVerkle() {
tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
} else {
tr, err = trie.NewVerkleTrie(root, db, cache)
// When IsVerkle() is true, create a BinaryTrie wrapped in TransitionTrie
binTrie, binErr := bintrie.NewBinaryTrie(root, db)
if binErr != nil {
return nil, binErr
}
// Based on the transition status, determine if the overlay
// tree needs to be created, or if a single, target tree is
@ -253,7 +259,22 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
if err != nil {
return nil, err
}
tr = trie.NewTransitionTrie(mpt, tr.(*trie.VerkleTrie), false)
tr = transitiontrie.NewTransitionTrie(mpt, binTrie, false)
} else {
// HACK: Use TransitionTrie with nil base as a wrapper to make BinaryTrie
// satisfy the Trie interface. This works around the import cycle between
// trie and trie/bintrie packages.
//
// TODO: In future PRs, refactor the package structure to avoid this hack:
// - Option 1: Move common interfaces (Trie, NodeIterator) to a separate
// package that both trie and trie/bintrie can import
// - Option 2: Create a factory function in the trie package that returns
// BinaryTrie as a Trie interface without direct import
// - Option 3: Move BinaryTrie to the main trie package
//
// The current approach works but adds unnecessary overhead and complexity
// by using TransitionTrie when there's no actual transition happening.
tr = transitiontrie.NewTransitionTrie(nil, binTrie, false)
}
}
if err != nil {

View file

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/transitiontrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
)
@ -501,7 +502,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
// Verkle uses only one tree, and the copy has already been
// made in mustCopyTrie.
obj.trie = db.trie
case *trie.TransitionTrie:
case *transitiontrie.TransitionTrie:
// Same thing for the transition tree, since the MPT is
// read-only.
obj.trie = db.trie

File diff suppressed because it is too large Load diff

View file

@ -117,19 +117,20 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
// Commit genesis state
var (
gspec = t.genesis(config)
db = rawdb.NewMemoryDatabase()
tconf = &triedb.Config{
Preimages: true,
IsVerkle: gspec.Config.VerkleTime != nil && *gspec.Config.VerkleTime <= gspec.Timestamp,
}
)
if scheme == rawdb.PathScheme {
if scheme == rawdb.PathScheme || tconf.IsVerkle {
tconf.PathDB = pathdb.Defaults
} else {
tconf.HashDB = hashdb.Defaults
}
// Commit genesis state
gspec := t.genesis(config)
// if ttd is not specified, set an arbitrary huge value
if gspec.Config.TerminalTotalDifficulty == nil {

View file

@ -720,6 +720,25 @@ var Forks = map[string]*params.ChainConfig{
BPO4: params.DefaultBPO4BlobConfig,
},
},
"Verkle": {
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
VerkleTime: u64(0),
},
}
var bpo1BlobConfig = &params.BlobConfig{

View file

@ -31,8 +31,11 @@ type (
var zero [32]byte
const (
NodeWidth = 256 // Number of child per leaf node
StemSize = 31 // Number of bytes to travel before reaching a group of leaves
StemNodeWidth = 256 // Number of child per leaf node
StemSize = 31 // Number of bytes to travel before reaching a group of leaves
NodeTypeBytes = 1 // Size of node type prefix in serialization
HashSize = 32 // Size of a hash in bytes
BitmapSize = 32 // Size of the bitmap in a stem node
)
const (
@ -58,25 +61,28 @@ type BinaryNode interface {
func SerializeNode(node BinaryNode) []byte {
switch n := (node).(type) {
case *InternalNode:
var serialized [65]byte
// InternalNode: 1 byte type + 32 bytes left hash + 32 bytes right hash
var serialized [NodeTypeBytes + HashSize + HashSize]byte
serialized[0] = nodeTypeInternal
copy(serialized[1:33], n.left.Hash().Bytes())
copy(serialized[33:65], n.right.Hash().Bytes())
return serialized[:]
case *StemNode:
var serialized [32 + 32 + 256*32]byte
// StemNode: 1 byte type + 31 bytes stem + 32 bytes bitmap + 256*32 bytes values
var serialized [NodeTypeBytes + StemSize + BitmapSize + StemNodeWidth*HashSize]byte
serialized[0] = nodeTypeStem
copy(serialized[1:32], node.(*StemNode).Stem)
bitmap := serialized[32:64]
offset := 64
for i, v := range node.(*StemNode).Values {
copy(serialized[NodeTypeBytes:NodeTypeBytes+StemSize], n.Stem)
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
for i, v := range n.Values {
if v != nil {
bitmap[i/8] |= 1 << (7 - (i % 8))
copy(serialized[offset:offset+32], v)
offset += 32
copy(serialized[offset:offset+HashSize], v)
offset += HashSize
}
}
return serialized[:]
// Only return the actual data, not the entire array
return serialized[:offset]
default:
panic("invalid node type")
}
@ -104,21 +110,21 @@ func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) {
if len(serialized) < 64 {
return nil, invalidSerializedLength
}
var values [256][]byte
bitmap := serialized[32:64]
offset := 64
var values [StemNodeWidth][]byte
bitmap := serialized[NodeTypeBytes+StemSize : NodeTypeBytes+StemSize+BitmapSize]
offset := NodeTypeBytes + StemSize + BitmapSize
for i := range 256 {
for i := range StemNodeWidth {
if bitmap[i/8]>>(7-(i%8))&1 == 1 {
if len(serialized) < offset+32 {
if len(serialized) < offset+HashSize {
return nil, invalidSerializedLength
}
values[i] = serialized[offset : offset+32]
offset += 32
values[i] = serialized[offset : offset+HashSize]
offset += HashSize
}
}
return &StemNode{
Stem: serialized[1:32],
Stem: serialized[NodeTypeBytes : NodeTypeBytes+StemSize],
Values: values[:],
depth: depth,
}, nil

View file

@ -77,12 +77,12 @@ func TestSerializeDeserializeInternalNode(t *testing.T) {
// TestSerializeDeserializeStemNode tests serialization and deserialization of StemNode
func TestSerializeDeserializeStemNode(t *testing.T) {
// Create a stem node with some values
stem := make([]byte, 31)
stem := make([]byte, StemSize)
for i := range stem {
stem[i] = byte(i)
}
var values [256][]byte
var values [StemNodeWidth][]byte
// Add some values at different indices
values[0] = common.HexToHash("0x0101010101010101010101010101010101010101010101010101010101010101").Bytes()
values[10] = common.HexToHash("0x0202020202020202020202020202020202020202020202020202020202020202").Bytes()
@ -103,7 +103,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) {
}
// Check the stem is correctly serialized
if !bytes.Equal(serialized[1:32], stem) {
if !bytes.Equal(serialized[1:1+StemSize], stem) {
t.Errorf("Stem mismatch in serialized data")
}
@ -136,7 +136,7 @@ func TestSerializeDeserializeStemNode(t *testing.T) {
}
// Check that other values are nil
for i := range NodeWidth {
for i := range StemNodeWidth {
if i == 0 || i == 10 || i == 255 {
continue
}
@ -218,15 +218,15 @@ func TestKeyToPath(t *testing.T) {
},
{
name: "max valid depth",
depth: 31 * 8,
key: make([]byte, 32),
expected: make([]byte, 31*8+1),
depth: StemSize * 8,
key: make([]byte, HashSize),
expected: make([]byte, StemSize*8+1),
wantErr: false,
},
{
name: "depth too large",
depth: 31*8 + 1,
key: make([]byte, 32),
depth: StemSize*8 + 1,
key: make([]byte, HashSize),
wantErr: true,
},
}

View file

@ -46,8 +46,31 @@ func (h HashedNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error
return nil, errors.New("attempted to get values from an unresolved node")
}
func (h HashedNode) InsertValuesAtStem(key []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
return nil, errors.New("insertValuesAtStem not implemented for hashed node")
func (h HashedNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
// Step 1: Generate the path for this node's position in the tree
path, err := keyToPath(depth, stem)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem path generation error: %w", err)
}
if resolver == nil {
return nil, errors.New("InsertValuesAtStem resolve error: resolver is nil")
}
// Step 2: Resolve the hashed node to get the actual node data
data, err := resolver(path, common.Hash(h))
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err)
}
// Step 3: Deserialize the resolved data into a concrete node
node, err := DeserializeNode(data, depth)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err)
}
// Step 4: Call InsertValuesAtStem on the resolved concrete node
return node.InsertValuesAtStem(stem, values, resolver, depth)
}
func (h HashedNode) toDot(parent string, path string) string {
@ -58,7 +81,8 @@ func (h HashedNode) toDot(parent string, path string) string {
}
func (h HashedNode) CollectNodes([]byte, NodeFlushFn) error {
return errors.New("collectNodes not implemented for hashed node")
// HashedNodes are already persisted in the database and don't need to be collected.
return nil
}
func (h HashedNode) GetHeight() int {

View file

@ -17,6 +17,7 @@
package bintrie
import (
"bytes"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -59,8 +60,8 @@ func TestHashedNodeCopy(t *testing.T) {
func TestHashedNodeInsert(t *testing.T) {
node := HashedNode(common.HexToHash("0x1234"))
key := make([]byte, 32)
value := make([]byte, 32)
key := make([]byte, HashSize)
value := make([]byte, HashSize)
_, err := node.Insert(key, value, nil, 0)
if err == nil {
@ -76,7 +77,7 @@ func TestHashedNodeInsert(t *testing.T) {
func TestHashedNodeGetValuesAtStem(t *testing.T) {
node := HashedNode(common.HexToHash("0x1234"))
stem := make([]byte, 31)
stem := make([]byte, StemSize)
_, err := node.GetValuesAtStem(stem, nil)
if err == nil {
t.Fatal("Expected error for GetValuesAtStem on HashedNode")
@ -91,17 +92,85 @@ func TestHashedNodeGetValuesAtStem(t *testing.T) {
func TestHashedNodeInsertValuesAtStem(t *testing.T) {
node := HashedNode(common.HexToHash("0x1234"))
stem := make([]byte, 31)
values := make([][]byte, 256)
stem := make([]byte, StemSize)
values := make([][]byte, StemNodeWidth)
// Test 1: nil resolver should return an error
_, err := node.InsertValuesAtStem(stem, values, nil, 0)
if err == nil {
t.Fatal("Expected error for InsertValuesAtStem on HashedNode")
t.Fatal("Expected error for InsertValuesAtStem on HashedNode with nil resolver")
}
if err.Error() != "insertValuesAtStem not implemented for hashed node" {
if err.Error() != "InsertValuesAtStem resolve error: resolver is nil" {
t.Errorf("Unexpected error message: %v", err)
}
// Test 2: mock resolver returning invalid data should return deserialization error
mockResolver := func(path []byte, hash common.Hash) ([]byte, error) {
// Return invalid/nonsense data that cannot be deserialized
return []byte{0xff, 0xff, 0xff}, nil
}
_, err = node.InsertValuesAtStem(stem, values, mockResolver, 0)
if err == nil {
t.Fatal("Expected error for InsertValuesAtStem on HashedNode with invalid resolver data")
}
expectedPrefix := "InsertValuesAtStem node deserialization error:"
if len(err.Error()) < len(expectedPrefix) || err.Error()[:len(expectedPrefix)] != expectedPrefix {
t.Errorf("Expected deserialization error, got: %v", err)
}
// Test 3: mock resolver returning valid serialized node should succeed
stem = make([]byte, StemSize)
stem[0] = 0xaa
var originalValues [StemNodeWidth][]byte
originalValues[0] = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()
originalValues[1] = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222").Bytes()
originalNode := &StemNode{
Stem: stem,
Values: originalValues[:],
depth: 0,
}
// Serialize the node
serialized := SerializeNode(originalNode)
// Create a mock resolver that returns the serialized node
validResolver := func(path []byte, hash common.Hash) ([]byte, error) {
return serialized, nil
}
var newValues [StemNodeWidth][]byte
newValues[2] = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333").Bytes()
resolvedNode, err := node.InsertValuesAtStem(stem, newValues[:], validResolver, 0)
if err != nil {
t.Fatalf("Expected successful resolution and insertion, got error: %v", err)
}
resultStem, ok := resolvedNode.(*StemNode)
if !ok {
t.Fatalf("Expected resolved node to be *StemNode, got %T", resolvedNode)
}
if !bytes.Equal(resultStem.Stem, stem) {
t.Errorf("Stem mismatch: expected %x, got %x", stem, resultStem.Stem)
}
// Verify the original values are preserved
if !bytes.Equal(resultStem.Values[0], originalValues[0]) {
t.Errorf("Original value at index 0 not preserved: expected %x, got %x", originalValues[0], resultStem.Values[0])
}
if !bytes.Equal(resultStem.Values[1], originalValues[1]) {
t.Errorf("Original value at index 1 not preserved: expected %x, got %x", originalValues[1], resultStem.Values[1])
}
// Verify the new value was inserted
if !bytes.Equal(resultStem.Values[2], newValues[2]) {
t.Errorf("New value at index 2 not inserted correctly: expected %x, got %x", newValues[2], resultStem.Values[2])
}
}
// TestHashedNodeToDot tests the toDot method for visualization

View file

@ -49,14 +49,26 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([
}
bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
var child *BinaryNode
if bit == 0 {
child = &bt.left
} else {
child = &bt.right
if hn, ok := bt.left.(HashedNode); ok {
path, err := keyToPath(bt.depth, stem)
if err != nil {
return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err)
}
data, err := resolver(path, common.Hash(hn))
if err != nil {
return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err)
}
node, err := DeserializeNode(data, bt.depth+1)
if err != nil {
return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err)
}
bt.left = node
}
return bt.left.GetValuesAtStem(stem, resolver)
}
if hn, ok := (*child).(HashedNode); ok {
if hn, ok := bt.right.(HashedNode); ok {
path, err := keyToPath(bt.depth, stem)
if err != nil {
return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err)
@ -69,9 +81,9 @@ func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([
if err != nil {
return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err)
}
*child = node
bt.right = node
}
return (*child).GetValuesAtStem(stem, resolver)
return bt.right.GetValuesAtStem(stem, resolver)
}
// Get retrieves the value for the given key.
@ -80,6 +92,9 @@ func (bt *InternalNode) Get(key []byte, resolver NodeResolverFn) ([]byte, error)
if err != nil {
return nil, fmt.Errorf("get error: %w", err)
}
if values == nil {
return nil, nil
}
return values[key[31]], nil
}
@ -118,17 +133,54 @@ func (bt *InternalNode) Hash() common.Hash {
// InsertValuesAtStem inserts a full value group at the given stem in the internal node.
// Already-existing values will be overwritten.
func (bt *InternalNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
var (
child *BinaryNode
err error
)
var err error
bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
if bit == 0 {
child = &bt.left
} else {
child = &bt.right
if bt.left == nil {
bt.left = Empty{}
}
if hn, ok := bt.left.(HashedNode); ok {
path, err := keyToPath(bt.depth, stem)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err)
}
data, err := resolver(path, common.Hash(hn))
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err)
}
node, err := DeserializeNode(data, bt.depth+1)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err)
}
bt.left = node
}
bt.left, err = bt.left.InsertValuesAtStem(stem, values, resolver, depth+1)
return bt, err
}
*child, err = (*child).InsertValuesAtStem(stem, values, resolver, depth+1)
if bt.right == nil {
bt.right = Empty{}
}
if hn, ok := bt.right.(HashedNode); ok {
path, err := keyToPath(bt.depth, stem)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err)
}
data, err := resolver(path, common.Hash(hn))
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem resolve error: %w", err)
}
node, err := DeserializeNode(data, bt.depth+1)
if err != nil {
return nil, fmt.Errorf("InsertValuesAtStem node deserialization error: %w", err)
}
bt.right = node
}
bt.right, err = bt.right.InsertValuesAtStem(stem, values, resolver, depth+1)
return bt, err
}

View file

@ -108,6 +108,11 @@ func (it *binaryNodeIterator) Next(descend bool) bool {
}
// go back to parent to get the next leaf
// Check if we're at the root before popping
if len(it.stack) == 1 {
it.lastErr = errIteratorEnd
return false
}
it.stack = it.stack[:len(it.stack)-1]
it.current = it.stack[len(it.stack)-1].Node
it.stack[len(it.stack)-1].Index++
@ -183,9 +188,31 @@ func (it *binaryNodeIterator) NodeBlob() []byte {
}
// Leaf returns true iff the current node is a leaf node.
// In a Binary Trie, a StemNode contains up to 256 leaf values.
// The iterator is only considered to be "at a leaf" when it's positioned
// at a specific non-nil value within the StemNode, not just at the StemNode itself.
func (it *binaryNodeIterator) Leaf() bool {
_, ok := it.current.(*StemNode)
return ok
sn, ok := it.current.(*StemNode)
if !ok {
return false
}
// Check if we have a valid stack position
if len(it.stack) == 0 {
return false
}
// The Index in the stack state points to the NEXT position after the current value.
// So if Index is 0, we haven't started iterating through the values yet.
// If Index is 5, we're currently at value[4] (the 5th value, 0-indexed).
idx := it.stack[len(it.stack)-1].Index
if idx == 0 || idx > 256 {
return false
}
// Check if there's actually a value at the current position
currentValueIndex := idx - 1
return sn.Values[currentValueIndex] != nil
}
// LeafKey returns the key of the leaf. The method panics if the iterator is not
@ -219,7 +246,7 @@ func (it *binaryNodeIterator) LeafProof() [][]byte {
panic("LeafProof() called on an binary node iterator not at a leaf location")
}
proof := make([][]byte, 0, len(it.stack)+NodeWidth)
proof := make([][]byte, 0, len(it.stack)+StemNodeWidth)
// Build proof by walking up the stack and collecting sibling hashes
for i := range it.stack[:len(it.stack)-2] {

View file

@ -1,83 +0,0 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bintrie
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
func newTestDatabase(diskdb ethdb.Database, scheme string) *triedb.Database {
config := &triedb.Config{Preimages: true}
if scheme == rawdb.HashScheme {
config.HashDB = &hashdb.Config{CleanCacheSize: 0}
} else {
config.PathDB = &pathdb.Config{TrieCleanSize: 0, StateCleanSize: 0}
}
return triedb.NewDatabase(diskdb, config)
}
func TestBinaryIterator(t *testing.T) {
trie, err := NewBinaryTrie(types.EmptyVerkleHash, newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme))
if err != nil {
t.Fatal(err)
}
account0 := &types.StateAccount{
Nonce: 1,
Balance: uint256.NewInt(2),
Root: types.EmptyRootHash,
CodeHash: nil,
}
// NOTE: the code size isn't written to the trie via TryUpdateAccount
// so it will be missing from the test nodes.
trie.UpdateAccount(common.Address{}, account0, 0)
account1 := &types.StateAccount{
Nonce: 1337,
Balance: uint256.NewInt(2000),
Root: types.EmptyRootHash,
CodeHash: nil,
}
// This address is meant to hash to a value that has the same first byte as 0xbf
var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be")
trie.UpdateAccount(clash, account1, 0)
// Manually go over every node to check that we get all
// the correct nodes.
it, err := trie.NodeIterator(nil)
if err != nil {
t.Fatal(err)
}
var leafcount int
for it.Next(true) {
t.Logf("Node: %x", it.Path())
if it.Leaf() {
leafcount++
t.Logf("\tLeaf: %x", it.LeafKey())
}
}
if leafcount != 2 {
t.Fatalf("invalid leaf count: %d != 6", leafcount)
}
}

View file

@ -47,6 +47,12 @@ func GetBinaryTreeKey(addr common.Address, key []byte) []byte {
return k
}
func GetBinaryTreeKeyBasicData(addr common.Address) []byte {
var k [32]byte
k[31] = BasicDataLeafKey
return GetBinaryTreeKey(addr, k[:])
}
func GetBinaryTreeKeyCodeHash(addr common.Address) []byte {
var k [32]byte
k[31] = CodeHashLeafKey

View file

@ -28,7 +28,7 @@ import (
// StemNode represents a group of `NodeWith` values sharing the same stem.
type StemNode struct {
Stem []byte // Stem path to get to 256 values
Stem []byte // Stem path to get to StemNodeWidth values
Values [][]byte // All values, indexed by the last byte of the key.
depth int // Depth of the node
}
@ -40,7 +40,7 @@ func (bt *StemNode) Get(key []byte, _ NodeResolverFn) ([]byte, error) {
// Insert inserts a new key-value pair into the node.
func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
if !bytes.Equal(bt.Stem, key[:31]) {
if !bytes.Equal(bt.Stem, key[:StemSize]) {
bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
n := &InternalNode{depth: bt.depth}
@ -65,26 +65,26 @@ func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int
}
*other = Empty{}
} else {
var values [256][]byte
values[key[31]] = value
var values [StemNodeWidth][]byte
values[key[StemSize]] = value
*other = &StemNode{
Stem: slices.Clone(key[:31]),
Stem: slices.Clone(key[:StemSize]),
Values: values[:],
depth: depth + 1,
}
}
return n, nil
}
if len(value) != 32 {
if len(value) != HashSize {
return bt, errors.New("invalid insertion: value length")
}
bt.Values[key[31]] = value
bt.Values[key[StemSize]] = value
return bt, nil
}
// Copy creates a deep copy of the node.
func (bt *StemNode) Copy() BinaryNode {
var values [256][]byte
var values [StemNodeWidth][]byte
for i, v := range bt.Values {
values[i] = slices.Clone(v)
}
@ -102,7 +102,7 @@ func (bt *StemNode) GetHeight() int {
// Hash returns the hash of the node.
func (bt *StemNode) Hash() common.Hash {
var data [NodeWidth]common.Hash
var data [StemNodeWidth]common.Hash
for i, v := range bt.Values {
if v != nil {
h := sha256.Sum256(v)
@ -112,7 +112,7 @@ func (bt *StemNode) Hash() common.Hash {
h := sha256.New()
for level := 1; level <= 8; level++ {
for i := range NodeWidth / (1 << level) {
for i := range StemNodeWidth / (1 << level) {
h.Reset()
if data[i*2] == (common.Hash{}) && data[i*2+1] == (common.Hash{}) {
@ -141,14 +141,17 @@ func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error {
}
// GetValuesAtStem retrieves the group of values located at the given stem key.
func (bt *StemNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) {
func (bt *StemNode) GetValuesAtStem(stem []byte, _ NodeResolverFn) ([][]byte, error) {
if !bytes.Equal(bt.Stem, stem) {
return nil, nil
}
return bt.Values[:], nil
}
// InsertValuesAtStem inserts a full value group at the given stem in the internal node.
// Already-existing values will be overwritten.
func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
if !bytes.Equal(bt.Stem, key[:31]) {
if !bytes.Equal(bt.Stem, key[:StemSize]) {
bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
n := &InternalNode{depth: bt.depth}
@ -174,7 +177,7 @@ func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolv
*other = Empty{}
} else {
*other = &StemNode{
Stem: slices.Clone(key[:31]),
Stem: slices.Clone(key[:StemSize]),
Values: values,
depth: n.depth + 1,
}
@ -206,7 +209,7 @@ func (bt *StemNode) toDot(parent, path string) string {
// Key returns the full key for the given index.
func (bt *StemNode) Key(i int) []byte {
var ret [32]byte
var ret [HashSize]byte
copy(ret[:], bt.Stem)
ret[StemSize] = byte(i)
return ret[:]

View file

@ -251,27 +251,23 @@ func TestStemNodeGetValuesAtStem(t *testing.T) {
}
// Check that all values match
for i := 0; i < 256; i++ {
for i := range 256 {
if !bytes.Equal(retrievedValues[i], values[i]) {
t.Errorf("Value mismatch at index %d", i)
}
}
// GetValuesAtStem with different stem also returns the same values
// (implementation ignores the stem parameter)
// GetValuesAtStem with different stem should return nil
differentStem := make([]byte, 31)
differentStem[0] = 0xFF
retrievedValues2, err := node.GetValuesAtStem(differentStem, nil)
shouldBeNil, err := node.GetValuesAtStem(differentStem, nil)
if err != nil {
t.Fatalf("Failed to get values with different stem: %v", err)
}
// Should still return the same values (stem is ignored)
for i := 0; i < 256; i++ {
if !bytes.Equal(retrievedValues2[i], values[i]) {
t.Errorf("Value mismatch at index %d with different stem", i)
}
if shouldBeNil != nil {
t.Error("Expected nil for different stem, got non-nil")
}
}

View file

@ -33,6 +33,84 @@ import (
var errInvalidRootType = errors.New("invalid root type")
// ChunkedCode represents a sequence of HashSize-byte chunks of code (StemSize bytes of which
// are actual code, and NodeTypeBytes byte is the pushdata offset).
type ChunkedCode []byte
// Copy the values here so as to avoid an import cycle
const (
PUSH1 = byte(0x60)
PUSH32 = byte(0x7f)
)
// ChunkifyCode generates the chunked version of an array representing EVM bytecode
// according to EIP-7864 specification.
//
// The code is divided into HashSize-byte chunks, where each chunk contains:
// - Byte 0: Metadata byte indicating the number of leading bytes that are PUSHDATA (0-StemSize)
// - Bytes 1-StemSize: Actual code bytes
//
// This format enables stateless clients to validate jump destinations within a chunk
// without requiring additional context. When a PUSH instruction's data spans multiple
// chunks, the metadata byte tells us how many bytes at the start of the chunk are
// part of the previous chunk's PUSH instruction data.
//
// For example:
// - If a chunk starts with regular code: metadata byte = 0
// - If a PUSH32 instruction starts at byte 30 of chunk N:
// - Chunk N: normal, contains PUSH32 opcode + 1 byte of data
// - Chunk N+1: metadata = StemSize (entire chunk is PUSH data)
// - Chunk N+2: metadata = 1 (first byte is PUSH data, then normal code resumes)
//
// This chunking approach ensures that jump destination validity can be determined
// by examining only the chunk containing the potential JUMPDEST, making it ideal
// for stateless execution and verkle/binary tries.
//
// Reference: https://eips.ethereum.org/EIPS/eip-7864
func ChunkifyCode(code []byte) ChunkedCode {
var (
chunkOffset = 0 // offset in the chunk
chunkCount = len(code) / StemSize
codeOffset = 0 // offset in the code
)
if len(code)%StemSize != 0 {
chunkCount++
}
chunks := make([]byte, chunkCount*HashSize)
for i := 0; i < chunkCount; i++ {
// number of bytes to copy, StemSize unless the end of the code has been reached.
end := StemSize * (i + 1)
if len(code) < end {
end = len(code)
}
copy(chunks[i*HashSize+1:], code[StemSize*i:end]) // copy the code itself
// chunk offset = taken from the last chunk.
if chunkOffset > StemSize {
// skip offset calculation if push data covers the whole chunk
chunks[i*HashSize] = StemSize
chunkOffset = 1
continue
}
chunks[HashSize*i] = byte(chunkOffset)
chunkOffset = 0
// Check each instruction and update the offset it should be 0 unless
// a PUSH-N overflows.
for ; codeOffset < end; codeOffset++ {
if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 {
codeOffset += int(code[codeOffset] - PUSH1 + 1)
if codeOffset+1 >= StemSize*(i+1) {
codeOffset++
chunkOffset = codeOffset - StemSize*(i+1)
break
}
}
}
}
return chunks
}
// NewBinaryNode creates a new empty binary trie
func NewBinaryNode() BinaryNode {
return Empty{}
@ -114,7 +192,7 @@ func (t *BinaryTrie) GetAccount(addr common.Address) (*types.StateAccount, error
)
switch r := t.root.(type) {
case *InternalNode:
values, err = r.GetValuesAtStem(key[:31], t.nodeResolver)
values, err = r.GetValuesAtStem(key[:StemSize], t.nodeResolver)
case *StemNode:
values = r.Values
case Empty:
@ -168,8 +246,8 @@ func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var (
err error
basicData [32]byte
values = make([][]byte, NodeWidth)
basicData [HashSize]byte
values = make([][]byte, StemNodeWidth)
stem = GetBinaryTreeKey(addr, zero[:])
)
binary.BigEndian.PutUint32(basicData[BasicDataCodeSizeOffset-1:], uint32(codeLen))
@ -177,14 +255,14 @@ func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount,
// Because the balance is a max of 16 bytes, truncate
// the extra values. This happens in devmode, where
// 0xff**32 is allocated to the developer account.
// 0xff**HashSize is allocated to the developer account.
balanceBytes := acc.Balance.Bytes()
// TODO: reduce the size of the allocation in devmode, then panic instead
// of truncating.
if len(balanceBytes) > 16 {
balanceBytes = balanceBytes[16:]
}
copy(basicData[32-len(balanceBytes):], balanceBytes[:])
copy(basicData[HashSize-len(balanceBytes):], balanceBytes[:])
values[BasicDataLeafKey] = basicData[:]
values[CodeHashLeafKey] = acc.CodeHash[:]
@ -205,11 +283,11 @@ func (t *BinaryTrie) UpdateStem(key []byte, values [][]byte) error {
// database, a trie.MissingNodeError is returned.
func (t *BinaryTrie) UpdateStorage(address common.Address, key, value []byte) error {
k := GetBinaryTreeKeyStorageSlot(address, key)
var v [32]byte
if len(value) >= 32 {
copy(v[:], value[:32])
var v [HashSize]byte
if len(value) >= HashSize {
copy(v[:], value[:HashSize])
} else {
copy(v[32-len(value):], value[:])
copy(v[HashSize-len(value):], value[:])
}
root, err := t.root.Insert(k, v[:], t.nodeResolver, 0)
if err != nil {
@ -228,7 +306,7 @@ func (t *BinaryTrie) DeleteAccount(addr common.Address) error {
// found in the database, a trie.MissingNodeError is returned.
func (t *BinaryTrie) DeleteStorage(addr common.Address, key []byte) error {
k := GetBinaryTreeKey(addr, key)
var zero [32]byte
var zero [HashSize]byte
root, err := t.root.Insert(k, zero[:], t.nodeResolver, 0)
if err != nil {
return fmt.Errorf("DeleteStorage (%x) error: %v", addr, err)
@ -246,12 +324,12 @@ func (t *BinaryTrie) Hash() common.Hash {
// Commit writes all nodes to the trie's memory database, tracking the internal
// and external (for account tries) references.
func (t *BinaryTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
root := t.root.(*InternalNode)
nodeset := trienode.NewNodeSet(common.Hash{})
err := root.CollectNodes(nil, func(path []byte, node BinaryNode) {
// The root can be any type of BinaryNode (InternalNode, StemNode, etc.)
err := t.root.CollectNodes(nil, func(path []byte, node BinaryNode) {
serialized := SerializeNode(node)
nodeset.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, serialized, t.tracer.Get(path)))
nodeset.AddNode(path, trienode.NewNodeWithPrev(node.Hash(), serialized, t.tracer.Get(path)))
})
if err != nil {
panic(fmt.Errorf("CollectNodes failed: %v", err))
@ -299,23 +377,23 @@ func (t *BinaryTrie) IsVerkle() bool {
// Note: the basic data leaf needs to have been previously created for this to work
func (t *BinaryTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
var (
chunks = trie.ChunkifyCode(code)
chunks = ChunkifyCode(code)
values [][]byte
key []byte
err error
)
for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
groupOffset := (chunknr + 128) % 256
for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+HashSize, chunknr+1 {
groupOffset := (chunknr + 128) % StemNodeWidth
if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
values = make([][]byte, NodeWidth)
var offset [32]byte
values = make([][]byte, StemNodeWidth)
var offset [HashSize]byte
binary.LittleEndian.PutUint64(offset[24:], chunknr+128)
key = GetBinaryTreeKey(addr, offset[:])
}
values[groupOffset] = chunks[i : i+32]
values[groupOffset] = chunks[i : i+HashSize]
if groupOffset == 255 || len(chunks)-i <= 32 {
err = t.UpdateStem(key[:31], values)
if groupOffset == StemNodeWidth-1 || len(chunks)-i <= HashSize {
err = t.UpdateStem(key[:StemSize], values)
if err != nil {
return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)

View file

@ -25,7 +25,7 @@ import (
)
var (
zeroKey = [32]byte{}
zeroKey = [HashSize]byte{}
oneKey = common.HexToHash("0101010101010101010101010101010101010101010101010101010101010101")
twoKey = common.HexToHash("0202020202020202020202020202020202020202020202020202020202020202")
threeKey = common.HexToHash("0303030303030303030303030303030303030303030303030303030303030303")
@ -158,8 +158,8 @@ func TestInsertDuplicateKey(t *testing.T) {
func TestLargeNumberOfEntries(t *testing.T) {
var err error
tree := NewBinaryNode()
for i := range 256 {
var key [32]byte
for i := range StemNodeWidth {
var key [HashSize]byte
key[0] = byte(i)
tree, err = tree.Insert(key[:], ffKey[:], nil, 0)
if err != nil {
@ -182,7 +182,7 @@ func TestMerkleizeMultipleEntries(t *testing.T) {
common.HexToHash("8100000000000000000000000000000000000000000000000000000000000000").Bytes(),
}
for i, key := range keys {
var v [32]byte
var v [HashSize]byte
binary.LittleEndian.PutUint64(v[:8], uint64(i))
tree, err = tree.Insert(key, v[:], nil, 0)
if err != nil {

View file

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
package transitiontrie
import (
"fmt"
@ -22,8 +22,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-verkle"
)
// TransitionTrie is a trie that implements a façade design pattern, presenting
@ -31,13 +32,16 @@ import (
// first from the overlay trie, and falls back to the base trie if the key isn't
// found. All writes go to the overlay trie.
type TransitionTrie struct {
overlay *VerkleTrie
base *SecureTrie
overlay *bintrie.BinaryTrie
base *trie.SecureTrie
storage bool
}
// NewTransitionTrie creates a new TransitionTrie.
func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie {
// Note: base can be nil when using TransitionTrie as a wrapper for BinaryTrie
// to work around import cycles. This is a temporary hack that should be
// refactored in future PRs (see core/state/reader.go for details).
func NewTransitionTrie(base *trie.SecureTrie, overlay *bintrie.BinaryTrie, st bool) *TransitionTrie {
return &TransitionTrie{
overlay: overlay,
base: base,
@ -46,12 +50,12 @@ func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *Transiti
}
// Base returns the base trie.
func (t *TransitionTrie) Base() *SecureTrie {
func (t *TransitionTrie) Base() *trie.SecureTrie {
return t.base
}
// Overlay returns the overlay trie.
func (t *TransitionTrie) Overlay() *VerkleTrie {
func (t *TransitionTrie) Overlay() *bintrie.BinaryTrie {
return t.overlay
}
@ -61,7 +65,10 @@ func (t *TransitionTrie) GetKey(key []byte) []byte {
if key := t.overlay.GetKey(key); key != nil {
return key
}
return t.base.GetKey(key)
if t.base != nil {
return t.base.GetKey(key)
}
return nil
}
// GetStorage returns the value for key stored in the trie. The value bytes must
@ -74,8 +81,11 @@ func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, er
if len(val) != 0 {
return val, nil
}
// TODO also insert value into overlay
return t.base.GetStorage(addr, key)
if t.base != nil {
// TODO also insert value into overlay
return t.base.GetStorage(addr, key)
}
return nil, nil
}
// PrefetchStorage attempts to resolve specific storage slots from the database
@ -102,7 +112,10 @@ func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount
if data != nil {
return data, nil
}
return t.base.GetAccount(address)
if t.base != nil {
return t.base.GetAccount(address)
}
return nil, nil
}
// PrefetchAccount attempts to resolve specific accounts from the database
@ -174,7 +187,7 @@ func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSe
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
func (t *TransitionTrie) NodeIterator(startKey []byte) (trie.NodeIterator, error) {
panic("not implemented") // TODO: Implement
}
@ -197,14 +210,10 @@ func (t *TransitionTrie) IsVerkle() bool {
// UpdateStem updates a group of values, given the stem they are using. If
// a value already exists, it is overwritten.
// TODO: This is Verkle-specific and requires access to private fields.
// Not currently used in the codebase.
func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error {
trie := t.overlay
switch root := trie.root.(type) {
case *verkle.InternalNode:
return root.InsertValuesAtStem(key, values, t.overlay.nodeResolver)
default:
panic("invalid root type")
}
panic("UpdateStem is not implemented for TransitionTrie")
}
// Copy creates a deep copy of the transition trie.

View file

@ -45,6 +45,10 @@ var (
verkleNodeWidth = uint256.NewInt(256)
codeStorageDelta = uint256.NewInt(0).Sub(codeOffset, headerStorageOffset)
mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(verkleNodeWidthLog2))
CodeOffset = uint256.NewInt(128)
VerkleNodeWidth = uint256.NewInt(256)
HeaderStorageOffset = uint256.NewInt(64)
VerkleNodeWidthLog2 = 8
index0Point *verkle.Point // pre-computed commitment of polynomial [2+256*64]
@ -200,6 +204,22 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte {
return GetTreeKey(address, treeIndex, subIndex)
}
func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) {
chunkOffset := new(uint256.Int).Add(CodeOffset, chunk)
treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth)
subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth)
var subIndex byte
if len(subIndexMod) != 0 {
subIndex = byte(subIndexMod[0])
}
return treeIndex, subIndex
}
func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte {
treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk)
return GetTreeKey(address, treeIndex, subIndex)
}
func StorageIndex(storageKey []byte) (*uint256.Int, byte) {
// If the storage slot is in the header, we need to add the header offset.
var key uint256.Int
@ -297,3 +317,97 @@ func evaluateAddressPoint(address []byte) *verkle.Point {
ret.Add(ret, index0Point)
return ret
}
func EvaluateAddressPoint(address []byte) *verkle.Point {
if len(address) < 32 {
var aligned [32]byte
address = append(aligned[:32-len(address)], address...)
}
var poly [3]fr.Element
poly[0].SetZero()
// 32-byte address, interpreted as two little endian
// 16-byte numbers.
verkle.FromLEBytes(&poly[1], address[:16])
verkle.FromLEBytes(&poly[2], address[16:])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add a constant point
ret.Add(ret, index0Point)
return ret
}
func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte {
treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey)
return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex)
}
func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) {
var pos uint256.Int
pos.SetBytes(storageKey)
// If the storage slot is in the header, we need to add the header offset.
if pos.Cmp(codeStorageDelta) < 0 {
// This addition is always safe; it can't ever overflow since pos<codeStorageDelta.
pos.Add(HeaderStorageOffset, &pos)
// In this branch, the tree-index is zero since we're in the account header,
// and the sub-index is the LSB of the modified storage key.
return zero, byte(pos[0] & 0xFF)
}
// If the storage slot is in the main storage, we need to add the main storage offset.
// The first MAIN_STORAGE_OFFSET group will see its
// first 64 slots unreachable. This is either a typo in the
// spec or intended to conserve the 256-u256
// aligment. If we decide to ever access these 64
// slots, uncomment this.
// // Get the new offset since we now know that we are above 64.
// pos.Sub(&pos, codeStorageDelta)
// suffix := byte(pos[0] & 0xFF)
suffix := storageKey[len(storageKey)-1]
// We first divide by VerkleNodeWidth to create room to avoid an overflow next.
pos.Rsh(&pos, uint(VerkleNodeWidthLog2))
// We add mainStorageOffset/VerkleNodeWidth which can't overflow.
pos.Add(&pos, mainStorageOffsetLshVerkleNodeWidth)
// The sub-index is the LSB of the original storage key, since mainStorageOffset
// doesn't affect this byte, so we can avoid masks or shifts.
return &pos, suffix
}
func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte {
var poly [5]fr.Element
poly[0].SetZero()
poly[1].SetZero()
poly[2].SetZero()
trieIndexBytes := treeIndex.Bytes32()
verkle.FromBytes(&poly[3], trieIndexBytes[16:])
verkle.FromBytes(&poly[4], trieIndexBytes[:16])
cfg := verkle.GetConfig()
ret := cfg.CommitToPoly(poly[:], 0)
// add the pre-evaluated address
ret.Add(ret, evaluated)
return PointToHash(ret, subIndex)
}
func GetTreeKeyBasicDataEvaluatedAddress(addrp *verkle.Point) []byte {
return GetTreeKeyWithEvaluatedAddess(addrp, zero, BasicDataLeafKey)
}
func PointToHash(evaluated *verkle.Point, suffix byte) []byte {
retb := verkle.HashPointToBytes(evaluated)
retb[31] = suffix
return retb[:]
}

View file

@ -300,7 +300,8 @@ func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
//
// TODO(gballet, rjl493456442) implement it.
func (t *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
panic("not implemented")
// TODO(@CPerezz): remove.
return nil, errors.New("not implemented")
}
// Prove implements state.Trie, constructing a Merkle proof for key. The result

View file

@ -31,8 +31,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/bintrie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-verkle"
)
// layer is the interface implemented by all state layers which includes some
@ -97,16 +97,16 @@ func merkleNodeHasher(blob []byte) (common.Hash, error) {
return crypto.Keccak256Hash(blob), nil
}
// verkleNodeHasher computes the hash of the given verkle node.
func verkleNodeHasher(blob []byte) (common.Hash, error) {
// binaryNodeHasher computes the hash of the given verkle node.
func binaryNodeHasher(blob []byte) (common.Hash, error) {
if len(blob) == 0 {
return types.EmptyVerkleHash, nil
}
n, err := verkle.ParseNode(blob, 0)
n, err := bintrie.DeserializeNode(blob, 0)
if err != nil {
return common.Hash{}, err
}
return n.Commit().Bytes(), nil
return n.Hash(), nil
}
// Database is a multiple-layered structure for maintaining in-memory states
@ -163,7 +163,7 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
// compress the shared key prefix.
if isVerkle {
db.diskdb = rawdb.NewTable(diskdb, string(rawdb.VerklePrefix))
db.hasher = verkleNodeHasher
db.hasher = binaryNodeHasher
}
// Construct the layer tree by resolving the in-disk singleton state
// and in-memory layer journal.