mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-02-26 15:47:21 +00:00
Sets the fusaka, bpo1, bpo2 timestamps for mainnet see: https://notes.ethereum.org/@bbusa/fusaka-bpo-timeline
2363 lines
81 KiB
Go
2363 lines
81 KiB
Go
// Copyright 2023 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package blobpool
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/ecdsa"
|
|
"crypto/sha256"
|
|
"errors"
|
|
"fmt"
|
|
"math"
|
|
"math/big"
|
|
"math/rand"
|
|
"os"
|
|
"path/filepath"
|
|
"reflect"
|
|
"slices"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
|
"github.com/ethereum/go-ethereum/core"
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
"github.com/ethereum/go-ethereum/core/tracing"
|
|
"github.com/ethereum/go-ethereum/core/txpool"
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
|
"github.com/ethereum/go-ethereum/internal/testrand"
|
|
"github.com/ethereum/go-ethereum/params"
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
"github.com/holiman/billy"
|
|
"github.com/holiman/uint256"
|
|
)
|
|
|
|
var (
|
|
testBlobs []*kzg4844.Blob
|
|
testBlobCommits []kzg4844.Commitment
|
|
testBlobProofs []kzg4844.Proof
|
|
testBlobCellProofs [][]kzg4844.Proof
|
|
testBlobVHashes [][32]byte
|
|
testBlobIndices = make(map[[32]byte]int)
|
|
)
|
|
|
|
const testMaxBlobsPerBlock = 6
|
|
|
|
func init() {
|
|
for i := 0; i < 24; i++ {
|
|
testBlob := &kzg4844.Blob{byte(i)}
|
|
testBlobs = append(testBlobs, testBlob)
|
|
|
|
testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob)
|
|
testBlobCommits = append(testBlobCommits, testBlobCommit)
|
|
|
|
testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
|
|
testBlobProofs = append(testBlobProofs, testBlobProof)
|
|
|
|
testBlobCellProof, _ := kzg4844.ComputeCellProofs(testBlob)
|
|
testBlobCellProofs = append(testBlobCellProofs, testBlobCellProof)
|
|
|
|
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
|
|
testBlobIndices[testBlobVHash] = len(testBlobVHashes)
|
|
testBlobVHashes = append(testBlobVHashes, testBlobVHash)
|
|
}
|
|
}
|
|
|
|
// testBlockChain is a mock of the live chain for testing the pool.
|
|
type testBlockChain struct {
|
|
config *params.ChainConfig
|
|
basefee *uint256.Int
|
|
blobfee *uint256.Int
|
|
statedb *state.StateDB
|
|
|
|
blocks map[uint64]*types.Block
|
|
|
|
blockTime *uint64
|
|
}
|
|
|
|
func (bc *testBlockChain) setHeadTime(time uint64) {
|
|
bc.blockTime = &time
|
|
}
|
|
|
|
func (bc *testBlockChain) Config() *params.ChainConfig {
|
|
return bc.config
|
|
}
|
|
|
|
func (bc *testBlockChain) CurrentBlock() *types.Header {
|
|
// Yolo, life is too short to invert misc.CalcBaseFee and misc.CalcBlobFee,
|
|
// just binary search it them.
|
|
|
|
// The base fee at 5714 ETH translates into the 21000 base gas higher than
|
|
// mainnet ether existence, use that as a cap for the tests.
|
|
var (
|
|
blockNumber = new(big.Int).Add(bc.config.LondonBlock, big.NewInt(1))
|
|
blockTime = *bc.config.CancunTime + 1
|
|
gasLimit = uint64(30_000_000)
|
|
)
|
|
if bc.blockTime != nil {
|
|
blockTime = *bc.blockTime
|
|
}
|
|
|
|
lo := new(big.Int)
|
|
hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil))
|
|
|
|
for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
|
|
mid := new(big.Int).Add(lo, hi)
|
|
mid.Div(mid, big.NewInt(2))
|
|
|
|
if eip1559.CalcBaseFee(bc.config, &types.Header{
|
|
Number: blockNumber,
|
|
GasLimit: gasLimit,
|
|
GasUsed: 0,
|
|
BaseFee: mid,
|
|
}).Cmp(bc.basefee.ToBig()) > 0 {
|
|
hi = mid
|
|
} else {
|
|
lo = mid
|
|
}
|
|
}
|
|
baseFee := lo
|
|
|
|
// The excess blob gas at 2^27 translates into a blob fee higher than mainnet
|
|
// ether existence, use that as a cap for the tests.
|
|
lo = new(big.Int)
|
|
hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil)
|
|
|
|
for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
|
|
mid := new(big.Int).Add(lo, hi)
|
|
mid.Div(mid, big.NewInt(2))
|
|
|
|
tmp := mid.Uint64()
|
|
if eip4844.CalcBlobFee(bc.Config(), &types.Header{
|
|
Number: blockNumber,
|
|
Time: blockTime,
|
|
ExcessBlobGas: &tmp,
|
|
}).Cmp(bc.blobfee.ToBig()) > 0 {
|
|
hi = mid
|
|
} else {
|
|
lo = mid
|
|
}
|
|
}
|
|
excessBlobGas := lo.Uint64()
|
|
|
|
return &types.Header{
|
|
Number: blockNumber,
|
|
Time: blockTime,
|
|
GasLimit: gasLimit,
|
|
BaseFee: baseFee,
|
|
ExcessBlobGas: &excessBlobGas,
|
|
Difficulty: common.Big0,
|
|
}
|
|
}
|
|
|
|
func (bc *testBlockChain) CurrentFinalBlock() *types.Header {
|
|
return &types.Header{
|
|
Number: big.NewInt(0),
|
|
}
|
|
}
|
|
|
|
func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
|
// This is very yolo for the tests. If the number is the origin block we use
|
|
// to init the pool, return an empty block with the correct starting header.
|
|
//
|
|
// If it is something else, return some baked in mock data.
|
|
if number == bc.config.LondonBlock.Uint64()+1 {
|
|
return types.NewBlockWithHeader(bc.CurrentBlock())
|
|
}
|
|
return bc.blocks[number]
|
|
}
|
|
|
|
func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
|
|
return bc.statedb, nil
|
|
}
|
|
|
|
// reserver is a utility struct to sanity check that accounts are
|
|
// properly reserved by the blobpool (no duplicate reserves or unreserves).
|
|
type reserver struct {
|
|
accounts map[common.Address]struct{}
|
|
lock sync.RWMutex
|
|
}
|
|
|
|
func newReserver() txpool.Reserver {
|
|
return &reserver{accounts: make(map[common.Address]struct{})}
|
|
}
|
|
|
|
func (r *reserver) Hold(addr common.Address) error {
|
|
r.lock.Lock()
|
|
defer r.lock.Unlock()
|
|
if _, exists := r.accounts[addr]; exists {
|
|
panic("already reserved")
|
|
}
|
|
r.accounts[addr] = struct{}{}
|
|
return nil
|
|
}
|
|
|
|
func (r *reserver) Release(addr common.Address) error {
|
|
r.lock.Lock()
|
|
defer r.lock.Unlock()
|
|
if _, exists := r.accounts[addr]; !exists {
|
|
panic("not reserved")
|
|
}
|
|
delete(r.accounts, addr)
|
|
return nil
|
|
}
|
|
|
|
func (r *reserver) Has(address common.Address) bool {
|
|
r.lock.RLock()
|
|
defer r.lock.RUnlock()
|
|
_, exists := r.accounts[address]
|
|
return exists
|
|
}
|
|
|
|
// makeTx is a utility method to construct a random blob transaction and sign it
|
|
// with a valid key, only setting the interesting fields from the perspective of
|
|
// the blob pool.
|
|
func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
|
|
blobtx := makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap)
|
|
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
|
|
}
|
|
|
|
// makeMultiBlobTx is a utility method to construct a ramdom blob tx with
|
|
// certain number of blobs in its sidecar.
|
|
func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, blobOffset int, key *ecdsa.PrivateKey, version byte) *types.Transaction {
|
|
var (
|
|
blobs []kzg4844.Blob
|
|
blobHashes []common.Hash
|
|
commitments []kzg4844.Commitment
|
|
proofs []kzg4844.Proof
|
|
)
|
|
for i := 0; i < blobCount; i++ {
|
|
blobs = append(blobs, *testBlobs[blobOffset+i])
|
|
commitments = append(commitments, testBlobCommits[blobOffset+i])
|
|
if version == types.BlobSidecarVersion0 {
|
|
proofs = append(proofs, testBlobProofs[blobOffset+i])
|
|
} else {
|
|
cellProofs, _ := kzg4844.ComputeCellProofs(testBlobs[blobOffset+i])
|
|
proofs = append(proofs, cellProofs...)
|
|
}
|
|
blobHashes = append(blobHashes, testBlobVHashes[blobOffset+i])
|
|
}
|
|
blobtx := &types.BlobTx{
|
|
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
|
|
Nonce: nonce,
|
|
GasTipCap: uint256.NewInt(gasTipCap),
|
|
GasFeeCap: uint256.NewInt(gasFeeCap),
|
|
Gas: 21000,
|
|
BlobFeeCap: uint256.NewInt(blobFeeCap),
|
|
BlobHashes: blobHashes,
|
|
Value: uint256.NewInt(100),
|
|
Sidecar: types.NewBlobTxSidecar(version, blobs, commitments, proofs),
|
|
}
|
|
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
|
|
}
|
|
|
|
// makeUnsignedTx is a utility method to construct a random blob transaction
|
|
// without signing it.
|
|
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
|
|
return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rnd.Intn(len(testBlobs)))
|
|
}
|
|
|
|
// makeUnsignedTxWithTestBlob is a utility method to construct a random blob transaction
|
|
// with a specific test blob without signing it.
|
|
func makeUnsignedTxWithTestBlob(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobIdx int) *types.BlobTx {
|
|
return &types.BlobTx{
|
|
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
|
|
Nonce: nonce,
|
|
GasTipCap: uint256.NewInt(gasTipCap),
|
|
GasFeeCap: uint256.NewInt(gasFeeCap),
|
|
Gas: 21000,
|
|
BlobFeeCap: uint256.NewInt(blobFeeCap),
|
|
BlobHashes: []common.Hash{testBlobVHashes[blobIdx]},
|
|
Value: uint256.NewInt(100),
|
|
Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, []kzg4844.Blob{*testBlobs[blobIdx]}, []kzg4844.Commitment{testBlobCommits[blobIdx]}, []kzg4844.Proof{testBlobProofs[blobIdx]}),
|
|
}
|
|
}
|
|
|
|
// verifyPoolInternals iterates over all the transactions in the pool and checks
|
|
// that sort orders, calculated fields, cumulated fields are correct.
|
|
func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|
// Mark this method as a helper to remove from stack traces
|
|
t.Helper()
|
|
|
|
// Verify that all items in the index are present in the tx lookup and nothing more
|
|
seen := make(map[common.Hash]struct{})
|
|
for addr, txs := range pool.index {
|
|
for _, tx := range txs {
|
|
if _, ok := seen[tx.hash]; ok {
|
|
t.Errorf("duplicate hash #%x in transaction index: address %s, nonce %d", tx.hash, addr, tx.nonce)
|
|
}
|
|
seen[tx.hash] = struct{}{}
|
|
}
|
|
}
|
|
for hash, id := range pool.lookup.txIndex {
|
|
if _, ok := seen[hash]; !ok {
|
|
t.Errorf("tx lookup entry missing from transaction index: hash #%x, id %d", hash, id)
|
|
}
|
|
delete(seen, hash)
|
|
}
|
|
for hash := range seen {
|
|
t.Errorf("indexed transaction hash #%x missing from tx lookup table", hash)
|
|
}
|
|
// Verify that all blobs in the index are present in the blob lookup and nothing more
|
|
blobs := make(map[common.Hash]map[common.Hash]struct{})
|
|
for _, txs := range pool.index {
|
|
for _, tx := range txs {
|
|
for _, vhash := range tx.vhashes {
|
|
if blobs[vhash] == nil {
|
|
blobs[vhash] = make(map[common.Hash]struct{})
|
|
}
|
|
blobs[vhash][tx.hash] = struct{}{}
|
|
}
|
|
}
|
|
}
|
|
for vhash, txs := range pool.lookup.blobIndex {
|
|
for txhash := range txs {
|
|
if _, ok := blobs[vhash][txhash]; !ok {
|
|
t.Errorf("blob lookup entry missing from transaction index: blob hash #%x, tx hash #%x", vhash, txhash)
|
|
}
|
|
delete(blobs[vhash], txhash)
|
|
if len(blobs[vhash]) == 0 {
|
|
delete(blobs, vhash)
|
|
}
|
|
}
|
|
}
|
|
for vhash := range blobs {
|
|
t.Errorf("indexed transaction blob hash #%x missing from blob lookup table", vhash)
|
|
}
|
|
// Verify that transactions are sorted per account and contain no nonce gaps,
|
|
// and that the first nonce is the next expected one based on the state.
|
|
for addr, txs := range pool.index {
|
|
for i := 1; i < len(txs); i++ {
|
|
if txs[i].nonce != txs[i-1].nonce+1 {
|
|
t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1)
|
|
}
|
|
}
|
|
if txs[0].nonce != pool.state.GetNonce(addr) {
|
|
t.Errorf("addr %v, first tx nonce mismatch: have %d, want %d", addr, txs[0].nonce, pool.state.GetNonce(addr))
|
|
}
|
|
}
|
|
// Verify that calculated evacuation thresholds are correct
|
|
for addr, txs := range pool.index {
|
|
if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) {
|
|
t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap)
|
|
}
|
|
if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 {
|
|
t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps)
|
|
}
|
|
if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 {
|
|
t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps)
|
|
}
|
|
for i := 1; i < len(txs); i++ {
|
|
wantExecTip := txs[i-1].evictionExecTip
|
|
if wantExecTip.Gt(txs[i].execTipCap) {
|
|
wantExecTip = txs[i].execTipCap
|
|
}
|
|
if !txs[i].evictionExecTip.Eq(wantExecTip) {
|
|
t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
|
|
}
|
|
|
|
wantExecFeeJumps := txs[i-1].evictionExecFeeJumps
|
|
if wantExecFeeJumps > txs[i].basefeeJumps {
|
|
wantExecFeeJumps = txs[i].basefeeJumps
|
|
}
|
|
if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
|
|
t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
|
|
}
|
|
|
|
wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps
|
|
if wantBlobFeeJumps > txs[i].blobfeeJumps {
|
|
wantBlobFeeJumps = txs[i].blobfeeJumps
|
|
}
|
|
if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
|
|
t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
|
|
}
|
|
}
|
|
}
|
|
// Verify that account balance accumulations are correct
|
|
for addr, txs := range pool.index {
|
|
spent := new(uint256.Int)
|
|
for _, tx := range txs {
|
|
spent.Add(spent, tx.costCap)
|
|
}
|
|
if !pool.spent[addr].Eq(spent) {
|
|
t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent)
|
|
}
|
|
}
|
|
// Verify that pool storage size is correct
|
|
var stored uint64
|
|
for _, txs := range pool.index {
|
|
for _, tx := range txs {
|
|
stored += uint64(tx.storageSize)
|
|
}
|
|
}
|
|
if pool.stored != stored {
|
|
t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored)
|
|
}
|
|
// Verify the price heap internals
|
|
verifyHeapInternals(t, pool.evict)
|
|
|
|
// Verify that all the blobs can be retrieved
|
|
verifyBlobRetrievals(t, pool)
|
|
}
|
|
|
|
// verifyBlobRetrievals attempts to retrieve all testing blobs and checks that
|
|
// whatever is in the pool, it can be retrieved correctly.
|
|
func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
|
|
// Collect all the blobs tracked by the pool
|
|
var (
|
|
hashes []common.Hash
|
|
known = make(map[common.Hash]struct{})
|
|
)
|
|
for _, txs := range pool.index {
|
|
for _, tx := range txs {
|
|
for _, vhash := range tx.vhashes {
|
|
known[vhash] = struct{}{}
|
|
}
|
|
hashes = append(hashes, tx.vhashes...)
|
|
}
|
|
}
|
|
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0, false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1, false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Cross validate what we received vs what we wanted
|
|
if len(blobs1) != len(hashes) || len(proofs1) != len(hashes) {
|
|
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs1), len(proofs1), len(hashes))
|
|
return
|
|
}
|
|
if len(blobs2) != len(hashes) || len(proofs2) != len(hashes) {
|
|
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want blobs %d, want proofs: %d", len(blobs2), len(proofs2), len(hashes), len(hashes))
|
|
return
|
|
}
|
|
for i, hash := range hashes {
|
|
// If an item is missing from both, but shouldn't, error
|
|
if (blobs1[i] == nil || proofs1[i] == nil) && (blobs2[i] == nil || proofs2[i] == nil) {
|
|
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
|
|
continue
|
|
}
|
|
// Item retrieved, make sure it matches the expectation
|
|
index := testBlobIndices[hash]
|
|
if blobs1[i] != nil && (*blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index]) {
|
|
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
|
|
continue
|
|
}
|
|
if blobs2[i] != nil && (*blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index])) {
|
|
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
|
|
continue
|
|
}
|
|
delete(known, hash)
|
|
}
|
|
for hash := range known {
|
|
t.Errorf("indexed blob #%x missing from retrieval", hash)
|
|
}
|
|
}
|
|
|
|
// Tests that transactions can be loaded from disk on startup and that they are
|
|
// correctly discarded if invalid.
|
|
//
|
|
// - 1. A transaction that cannot be decoded must be dropped
|
|
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
|
|
// - 3. All transactions after a nonce gap must be dropped
|
|
// - 4. All transactions after an already included nonce must be dropped
|
|
// - 5. All transactions after an underpriced one (including it) must be dropped
|
|
// - 6. All transactions after an overdrafting sequence must be dropped
|
|
// - 7. All transactions exceeding the per-account limit must be dropped
|
|
//
|
|
// Furthermore, some strange corner-cases can also occur after a crash, as Billy's
|
|
// simplicity also allows it to resurrect past deleted entities:
|
|
//
|
|
// - 8. Fully duplicate transactions (matching hash) must be dropped
|
|
// - 9. Duplicate nonces from the same account must be dropped
|
|
func TestOpenDrops(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
|
|
|
|
// Insert a malformed transaction to verify that decoding errors (or format
|
|
// changes) are handled gracefully (case 1)
|
|
malformed, _ := store.Put([]byte("this is a badly encoded transaction"))
|
|
|
|
// Insert a transaction with a bad signature to verify that stale junk after
|
|
// potential hard-forks can get evicted (case 2)
|
|
tx := types.NewTx(&types.BlobTx{
|
|
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
|
|
GasTipCap: new(uint256.Int),
|
|
GasFeeCap: new(uint256.Int),
|
|
Gas: 0,
|
|
Value: new(uint256.Int),
|
|
Data: nil,
|
|
BlobFeeCap: new(uint256.Int),
|
|
V: new(uint256.Int),
|
|
R: new(uint256.Int),
|
|
S: new(uint256.Int),
|
|
})
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
badsig, _ := store.Put(blob)
|
|
|
|
// Insert a sequence of transactions with a nonce gap in between to verify
|
|
// that anything gapped will get evicted (case 3).
|
|
var (
|
|
gapper, _ = crypto.GenerateKey()
|
|
|
|
valids = make(map[uint64]struct{})
|
|
gapped = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
|
|
tx := makeTx(nonce, 1, 1, 1, gapper)
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
if nonce < 2 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
gapped[id] = struct{}{}
|
|
}
|
|
}
|
|
// Insert a sequence of transactions with a gapped starting nonce to verify
|
|
// that the entire set will get dropped (case 3).
|
|
var (
|
|
dangler, _ = crypto.GenerateKey()
|
|
dangling = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
|
|
tx := makeTx(nonce, 1, 1, 1, dangler)
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
dangling[id] = struct{}{}
|
|
}
|
|
// Insert a sequence of transactions with already passed nonces to veirfy
|
|
// that the entire set will get dropped (case 4).
|
|
var (
|
|
filler, _ = crypto.GenerateKey()
|
|
filled = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
|
|
tx := makeTx(nonce, 1, 1, 1, filler)
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
filled[id] = struct{}{}
|
|
}
|
|
// Insert a sequence of transactions with partially passed nonces to verify
|
|
// that the included part of the set will get dropped (case 4).
|
|
var (
|
|
overlapper, _ = crypto.GenerateKey()
|
|
overlapped = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
|
|
tx := makeTx(nonce, 1, 1, 1, overlapper)
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
if nonce >= 2 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
overlapped[id] = struct{}{}
|
|
}
|
|
}
|
|
// Insert a sequence of transactions with an underpriced first to verify that
|
|
// the entire set will get dropped (case 5).
|
|
var (
|
|
underpayer, _ = crypto.GenerateKey()
|
|
underpaid = make(map[uint64]struct{})
|
|
)
|
|
for i := 0; i < 5; i++ { // make #0 underpriced
|
|
var tx *types.Transaction
|
|
if i == 0 {
|
|
tx = makeTx(uint64(i), 0, 0, 0, underpayer)
|
|
} else {
|
|
tx = makeTx(uint64(i), 1, 1, 1, underpayer)
|
|
}
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
underpaid[id] = struct{}{}
|
|
}
|
|
|
|
// Insert a sequence of transactions with an underpriced in between to verify
|
|
// that it and anything newly gapped will get evicted (case 5).
|
|
var (
|
|
outpricer, _ = crypto.GenerateKey()
|
|
outpriced = make(map[uint64]struct{})
|
|
)
|
|
for i := 0; i < 5; i++ { // make #2 underpriced
|
|
var tx *types.Transaction
|
|
if i == 2 {
|
|
tx = makeTx(uint64(i), 0, 0, 0, outpricer)
|
|
} else {
|
|
tx = makeTx(uint64(i), 1, 1, 1, outpricer)
|
|
}
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
if i < 2 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
outpriced[id] = struct{}{}
|
|
}
|
|
}
|
|
// Insert a sequence of transactions fully overdrafted to verify that the
|
|
// entire set will get invalidated (case 6).
|
|
var (
|
|
exceeder, _ = crypto.GenerateKey()
|
|
exceeded = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account
|
|
var tx *types.Transaction
|
|
if nonce == 0 {
|
|
tx = makeTx(nonce, 1, 100, 1, exceeder)
|
|
} else {
|
|
tx = makeTx(nonce, 1, 1, 1, exceeder)
|
|
}
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
exceeded[id] = struct{}{}
|
|
}
|
|
// Insert a sequence of transactions partially overdrafted to verify that part
|
|
// of the set will get invalidated (case 6).
|
|
var (
|
|
overdrafter, _ = crypto.GenerateKey()
|
|
overdrafted = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account
|
|
var tx *types.Transaction
|
|
if nonce == 1 {
|
|
tx = makeTx(nonce, 1, 100, 1, overdrafter)
|
|
} else {
|
|
tx = makeTx(nonce, 1, 1, 1, overdrafter)
|
|
}
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
|
|
id, _ := store.Put(blob)
|
|
if nonce < 1 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
overdrafted[id] = struct{}{}
|
|
}
|
|
}
|
|
// Insert a sequence of transactions overflowing the account cap to verify
|
|
// that part of the set will get invalidated (case 7).
|
|
var (
|
|
overcapper, _ = crypto.GenerateKey()
|
|
overcapped = make(map[uint64]struct{})
|
|
)
|
|
for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
|
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, overcapper))
|
|
|
|
id, _ := store.Put(blob)
|
|
if nonce < maxTxsPerAccount {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
overcapped[id] = struct{}{}
|
|
}
|
|
}
|
|
// Insert a batch of duplicated transactions to verify that only one of each
|
|
// version will remain (case 8).
|
|
var (
|
|
duplicater, _ = crypto.GenerateKey()
|
|
duplicated = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2} {
|
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, duplicater))
|
|
|
|
for i := 0; i < int(nonce)+1; i++ {
|
|
id, _ := store.Put(blob)
|
|
if i == 0 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
duplicated[id] = struct{}{}
|
|
}
|
|
}
|
|
}
|
|
// Insert a batch of duplicated nonces to verify that only one of each will
|
|
// remain (case 9).
|
|
var (
|
|
repeater, _ = crypto.GenerateKey()
|
|
repeated = make(map[uint64]struct{})
|
|
)
|
|
for _, nonce := range []uint64{0, 1, 2} {
|
|
for i := 0; i < int(nonce)+1; i++ {
|
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, uint64(i)+1 /* unique hashes */, 1, repeater))
|
|
|
|
id, _ := store.Put(blob)
|
|
if i == 0 {
|
|
valids[id] = struct{}{}
|
|
} else {
|
|
repeated[id] = struct{}{}
|
|
}
|
|
}
|
|
}
|
|
store.Close()
|
|
|
|
// Create a blob pool out of the pre-seeded data
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3, tracing.NonceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2, tracing.NonceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
chain := &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(params.InitialBaseFee),
|
|
blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
defer pool.Close()
|
|
|
|
// Verify that the malformed (case 1), badly signed (case 2) and gapped (case
|
|
// 3) txs have been deleted from the pool
|
|
alive := make(map[uint64]struct{})
|
|
for _, txs := range pool.index {
|
|
for _, tx := range txs {
|
|
switch tx.id {
|
|
case malformed:
|
|
t.Errorf("malformed RLP transaction remained in storage")
|
|
case badsig:
|
|
t.Errorf("invalidly signed transaction remained in storage")
|
|
default:
|
|
if _, ok := dangling[tx.id]; ok {
|
|
t.Errorf("dangling transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := filled[tx.id]; ok {
|
|
t.Errorf("filled transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := overlapped[tx.id]; ok {
|
|
t.Errorf("overlapped transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := gapped[tx.id]; ok {
|
|
t.Errorf("gapped transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := underpaid[tx.id]; ok {
|
|
t.Errorf("underpaid transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := outpriced[tx.id]; ok {
|
|
t.Errorf("outpriced transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := exceeded[tx.id]; ok {
|
|
t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := overdrafted[tx.id]; ok {
|
|
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := overcapped[tx.id]; ok {
|
|
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := duplicated[tx.id]; ok {
|
|
t.Errorf("duplicated transaction remained in storage: %d", tx.id)
|
|
} else if _, ok := repeated[tx.id]; ok {
|
|
t.Errorf("repeated nonce transaction remained in storage: %d", tx.id)
|
|
} else {
|
|
alive[tx.id] = struct{}{}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
// Verify that the rest of the transactions remained alive
|
|
if len(alive) != len(valids) {
|
|
t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids))
|
|
}
|
|
for id := range alive {
|
|
if _, ok := valids[id]; !ok {
|
|
t.Errorf("extra transaction %d", id)
|
|
}
|
|
}
|
|
for id := range valids {
|
|
if _, ok := alive[id]; !ok {
|
|
t.Errorf("missing transaction %d", id)
|
|
}
|
|
}
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
}
|
|
|
|
// Tests that transactions loaded from disk are indexed correctly.
|
|
//
|
|
// - 1. Transactions must be grouped by sender, sorted by nonce
|
|
// - 2. Eviction thresholds are calculated correctly for the sequences
|
|
// - 3. Balance usage of an account is totals across all transactions
|
|
func TestOpenIndex(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
|
|
|
|
// Insert a sequence of transactions with varying price points to check that
|
|
// the cumulative minimum will be maintained.
|
|
var (
|
|
key, _ = crypto.GenerateKey()
|
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100}
|
|
txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300}
|
|
txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11}
|
|
|
|
//basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap)
|
|
//blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap)
|
|
|
|
evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1}
|
|
evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap))
|
|
evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap))
|
|
|
|
totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value
|
|
)
|
|
for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
|
|
tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
|
|
blob, _ := rlp.EncodeToBytes(tx)
|
|
store.Put(blob)
|
|
}
|
|
store.Close()
|
|
|
|
// Create a blob pool out of the pre-seeded data
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
chain := &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(params.InitialBaseFee),
|
|
blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
defer pool.Close()
|
|
|
|
// Verify that the transactions have been sorted by nonce (case 1)
|
|
for i := 0; i < len(pool.index[addr]); i++ {
|
|
if pool.index[addr][i].nonce != uint64(i) {
|
|
t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i))
|
|
}
|
|
}
|
|
// Verify that the cumulative fee minimums have been correctly calculated (case 2)
|
|
for i, cap := range evictExecTipCaps {
|
|
if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) {
|
|
t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap)
|
|
}
|
|
}
|
|
for i, jumps := range evictExecFeeJumps {
|
|
if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 {
|
|
t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps)
|
|
}
|
|
}
|
|
for i, jumps := range evictBlobFeeJumps {
|
|
if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 {
|
|
t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps)
|
|
}
|
|
}
|
|
// Verify that the balance usage has been correctly calculated (case 3)
|
|
if !pool.spent[addr].Eq(totalSpent) {
|
|
t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent)
|
|
}
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
}
|
|
|
|
// Tests that after indexing all the loaded transactions from disk, a price heap
|
|
// is correctly constructed based on the head basefee and blobfee.
|
|
func TestOpenHeap(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
|
|
|
|
// Insert a few transactions from a few accounts. To remove randomness from
|
|
// the heap initialization, use a deterministic account/tx/priority ordering.
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
key3, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
)
|
|
if bytes.Compare(addr1[:], addr2[:]) > 0 {
|
|
key1, addr1, key2, addr2 = key2, addr2, key1, addr1
|
|
}
|
|
if bytes.Compare(addr1[:], addr3[:]) > 0 {
|
|
key1, addr1, key3, addr3 = key3, addr3, key1, addr1
|
|
}
|
|
if bytes.Compare(addr2[:], addr3[:]) > 0 {
|
|
key2, addr2, key3, addr3 = key3, addr3, key2, addr2
|
|
}
|
|
var (
|
|
tx1 = makeTx(0, 1, 1000, 90, key1)
|
|
tx2 = makeTx(0, 1, 800, 70, key2)
|
|
tx3 = makeTx(0, 1, 1500, 110, key3)
|
|
|
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
|
blob3, _ = rlp.EncodeToBytes(tx3)
|
|
|
|
heapOrder = []common.Address{addr2, addr1, addr3}
|
|
heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
|
|
)
|
|
store.Put(blob1)
|
|
store.Put(blob2)
|
|
store.Put(blob3)
|
|
store.Close()
|
|
|
|
// Create a blob pool out of the pre-seeded data
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
chain := &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
defer pool.Close()
|
|
|
|
// Verify that the heap's internal state matches the expectations
|
|
for i, addr := range pool.evict.addrs {
|
|
if addr != heapOrder[i] {
|
|
t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i])
|
|
}
|
|
}
|
|
for addr, i := range pool.evict.index {
|
|
if i != heapIndex[addr] {
|
|
t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr])
|
|
}
|
|
}
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
}
|
|
|
|
// Tests that after the pool's previous state is loaded back, any transactions
|
|
// over the new storage cap will get dropped.
|
|
func TestOpenCap(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotterEIP7594(testMaxBlobsPerBlock), nil)
|
|
|
|
// Insert a few transactions from a few accounts
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
key3, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
|
|
tx1 = makeTx(0, 1, 1000, 100, key1)
|
|
tx2 = makeTx(0, 1, 800, 70, key2)
|
|
tx3 = makeTx(0, 1, 1500, 110, key3)
|
|
|
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
|
blob3, _ = rlp.EncodeToBytes(tx3)
|
|
|
|
keep = []common.Address{addr1, addr3}
|
|
drop = []common.Address{addr2}
|
|
size = 2 * (txAvgSize + blobSize + uint64(txBlobOverhead))
|
|
)
|
|
store.Put(blob1)
|
|
store.Put(blob2)
|
|
store.Put(blob3)
|
|
store.Close()
|
|
|
|
// Verify pool capping twice: first by reducing the data cap, then restarting
|
|
// with a high cap to ensure everything was persisted previously
|
|
for _, datacap := range []uint64{2 * (txAvgSize + blobSize + uint64(txBlobOverhead)), 1000 * (txAvgSize + blobSize + uint64(txBlobOverhead))} {
|
|
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
chain := &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage, Datacap: datacap}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
// Verify that enough transactions have been dropped to get the pool's size
|
|
// under the requested limit
|
|
if len(pool.index) != len(keep) {
|
|
t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep))
|
|
}
|
|
for _, addr := range keep {
|
|
if _, ok := pool.index[addr]; !ok {
|
|
t.Errorf("expected account %v missing from pool", addr)
|
|
}
|
|
}
|
|
for _, addr := range drop {
|
|
if _, ok := pool.index[addr]; ok {
|
|
t.Errorf("unexpected account %v present in pool", addr)
|
|
}
|
|
}
|
|
if pool.stored != size {
|
|
t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size)
|
|
}
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
|
|
pool.Close()
|
|
}
|
|
}
|
|
|
|
// TestChangingSlotterSize attempts to mimic a scenario where the max blob count
|
|
// of the pool is increased. This would happen during a client release where a
|
|
// new fork is added with a max blob count higher than the previous fork. We
|
|
// want to make sure transactions a persisted between those runs.
|
|
func TestChangingSlotterSize(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(6), nil)
|
|
|
|
// Create transactions from a few accounts.
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
key3, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
|
|
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
|
|
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
|
|
tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
|
|
|
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
|
)
|
|
|
|
// Write the two safely sized txs to store. note: although the store is
|
|
// configured for a blob count of 6, it can also support around ~1mb of call
|
|
// data - all this to say that we aren't using the the absolute largest shelf
|
|
// available.
|
|
store.Put(blob1)
|
|
store.Put(blob2)
|
|
store.Close()
|
|
|
|
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
|
|
for _, maxBlobs := range []int{6, 24} {
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
// Make custom chain config where the max blob count changes based on the loop variable.
|
|
cancunTime := uint64(0)
|
|
config := ¶ms.ChainConfig{
|
|
ChainID: big.NewInt(1),
|
|
LondonBlock: big.NewInt(0),
|
|
BerlinBlock: big.NewInt(0),
|
|
CancunTime: &cancunTime,
|
|
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
|
Cancun: ¶ms.BlobConfig{
|
|
Target: maxBlobs / 2,
|
|
Max: maxBlobs,
|
|
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
|
|
},
|
|
},
|
|
}
|
|
chain := &testBlockChain{
|
|
config: config,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
// Try to add the big blob tx. In the initial iteration it should overflow
|
|
// the pool. On the subsequent iteration it should be accepted.
|
|
errs := pool.Add([]*types.Transaction{tx3}, true)
|
|
if _, ok := pool.index[addr3]; ok && maxBlobs == 6 {
|
|
t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
|
|
} else if !ok && maxBlobs == 10 {
|
|
t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
|
|
}
|
|
|
|
// Verify the regular two txs are always available.
|
|
if got := pool.Get(tx1.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
|
|
}
|
|
if got := pool.Get(tx2.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
|
|
}
|
|
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
|
|
pool.Close()
|
|
}
|
|
}
|
|
|
|
// TestBillyMigration tests the billy migration from the default slotter to
|
|
// the PeerDAS slotter. This tests both the migration of the slotter
|
|
// as well as increasing the slotter size of the new slotter.
|
|
func TestBillyMigration(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
os.MkdirAll(filepath.Join(storage, limboedTransactionStore), 0700)
|
|
// Create the billy with the old slotter
|
|
oldSlotter := newSlotterEIP7594(6)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, oldSlotter, nil)
|
|
|
|
// Create transactions from a few accounts.
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
key3, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
|
|
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
|
|
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
|
|
tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
|
|
|
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
|
)
|
|
|
|
// Write the two safely sized txs to store. note: although the store is
|
|
// configured for a blob count of 6, it can also support around ~1mb of call
|
|
// data - all this to say that we aren't using the the absolute largest shelf
|
|
// available.
|
|
store.Put(blob1)
|
|
store.Put(blob2)
|
|
store.Close()
|
|
|
|
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
|
|
for _, maxBlobs := range []int{6, 24} {
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
// Make custom chain config where the max blob count changes based on the loop variable.
|
|
zero := uint64(0)
|
|
config := ¶ms.ChainConfig{
|
|
ChainID: big.NewInt(1),
|
|
LondonBlock: big.NewInt(0),
|
|
BerlinBlock: big.NewInt(0),
|
|
CancunTime: &zero,
|
|
OsakaTime: &zero,
|
|
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
|
Cancun: ¶ms.BlobConfig{
|
|
Target: maxBlobs / 2,
|
|
Max: maxBlobs,
|
|
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
|
|
},
|
|
Osaka: ¶ms.BlobConfig{
|
|
Target: maxBlobs / 2,
|
|
Max: maxBlobs,
|
|
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
|
|
},
|
|
},
|
|
}
|
|
chain := &testBlockChain{
|
|
config: config,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
// Try to add the big blob tx. In the initial iteration it should overflow
|
|
// the pool. On the subsequent iteration it should be accepted.
|
|
errs := pool.Add([]*types.Transaction{tx3}, true)
|
|
if _, ok := pool.index[addr3]; ok && maxBlobs == 6 {
|
|
t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
|
|
} else if !ok && maxBlobs == 10 {
|
|
t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
|
|
}
|
|
|
|
// Verify the regular two txs are always available.
|
|
if got := pool.Get(tx1.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
|
|
}
|
|
if got := pool.Get(tx2.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
|
|
}
|
|
|
|
// Verify all the calculated pool internals. Interestingly, this is **not**
|
|
// a duplication of the above checks, this actually validates the verifier
|
|
// using the above already hard coded checks.
|
|
//
|
|
// Do not remove this, nor alter the above to be generic.
|
|
verifyPoolInternals(t, pool)
|
|
|
|
pool.Close()
|
|
}
|
|
}
|
|
|
|
// TestBlobCountLimit tests the blobpool enforced limits on the max blob count.
|
|
func TestBlobCountLimit(t *testing.T) {
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
)
|
|
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
// Make Prague-enabled custom chain config.
|
|
cancunTime := uint64(0)
|
|
pragueTime := uint64(0)
|
|
config := ¶ms.ChainConfig{
|
|
ChainID: big.NewInt(1),
|
|
LondonBlock: big.NewInt(0),
|
|
BerlinBlock: big.NewInt(0),
|
|
CancunTime: &cancunTime,
|
|
PragueTime: &pragueTime,
|
|
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
|
Cancun: params.DefaultCancunBlobConfig,
|
|
Prague: params.DefaultPragueBlobConfig,
|
|
},
|
|
}
|
|
chain := &testBlockChain{
|
|
config: config,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: t.TempDir()}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
// Attempt to add transactions.
|
|
var (
|
|
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
|
|
tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, 0, key2, types.BlobSidecarVersion0)
|
|
)
|
|
errs := pool.Add([]*types.Transaction{tx1, tx2}, true)
|
|
|
|
// Check that first succeeds second fails.
|
|
if errs[0] != nil {
|
|
t.Fatalf("expected tx with 7 blobs to succeed")
|
|
}
|
|
if !errors.Is(errs[1], txpool.ErrTxBlobLimitExceeded) {
|
|
t.Fatalf("expected tx with 8 blobs to fail, got: %v", errs[1])
|
|
}
|
|
|
|
verifyPoolInternals(t, pool)
|
|
pool.Close()
|
|
}
|
|
|
|
// Tests that adding transaction will correctly store it in the persistent store
|
|
// and update all the indices.
|
|
//
|
|
// Note, this tests mostly checks the pool transaction shuffling logic or things
|
|
// specific to the blob pool. It does not do an exhaustive transaction validity
|
|
// check.
|
|
func TestAdd(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// seed is a helper tuple to seed an initial state db and pool
|
|
type seed struct {
|
|
balance uint64
|
|
nonce uint64
|
|
txs []*types.BlobTx
|
|
}
|
|
// addtx is a helper sender/tx tuple to represent a new tx addition
|
|
type addtx struct {
|
|
from string
|
|
tx *types.BlobTx
|
|
err error
|
|
}
|
|
|
|
tests := []struct {
|
|
seeds map[string]seed
|
|
adds []addtx
|
|
block []addtx
|
|
}{
|
|
// Transactions from new accounts should be accepted if their initial
|
|
// nonce matches the expected one from the statedb. Higher or lower must
|
|
// be rejected.
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 21100 + blobSize},
|
|
"bob": {balance: 21100 + blobSize, nonce: 1},
|
|
"claire": {balance: 21100 + blobSize},
|
|
"dave": {balance: 21100 + blobSize, nonce: 1},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs: accept nonce 0
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // Old account, 1 tx in chain, 0 pending: accept nonce 1
|
|
from: "bob",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, no previous txs: reject nonce 1
|
|
from: "claire",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: core.ErrNonceTooHigh,
|
|
},
|
|
{ // Old account, 1 tx in chain, 0 pending: reject nonce 0
|
|
from: "dave",
|
|
tx: makeUnsignedTx(0, 1, 1, 1),
|
|
err: core.ErrNonceTooLow,
|
|
},
|
|
{ // Old account, 1 tx in chain, 0 pending: reject nonce 2
|
|
from: "dave",
|
|
tx: makeUnsignedTx(2, 1, 1, 1),
|
|
err: core.ErrNonceTooHigh,
|
|
},
|
|
},
|
|
},
|
|
// Transactions from already pooled accounts should only be accepted if
|
|
// the nonces are contiguous (ignore prices for now, will check later)
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {
|
|
balance: 1000000,
|
|
txs: []*types.BlobTx{
|
|
makeUnsignedTxWithTestBlob(0, 1, 1, 1, 0),
|
|
},
|
|
},
|
|
"bob": {
|
|
balance: 1000000,
|
|
nonce: 1,
|
|
txs: []*types.BlobTx{
|
|
makeUnsignedTxWithTestBlob(1, 1, 1, 1, 1),
|
|
},
|
|
},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, 1 tx pending: reject duplicate nonce 0
|
|
from: "alice",
|
|
tx: makeUnsignedTxWithTestBlob(0, 1, 1, 1, 0),
|
|
err: txpool.ErrAlreadyKnown,
|
|
},
|
|
{ // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 2),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 1 tx pending: accept nonce 1
|
|
from: "alice",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 2 txs pending: reject nonce 3
|
|
from: "alice",
|
|
tx: makeUnsignedTx(3, 1, 1, 1),
|
|
err: core.ErrNonceTooHigh,
|
|
},
|
|
{ // New account, 2 txs pending: accept nonce 2
|
|
from: "alice",
|
|
tx: makeUnsignedTx(2, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 3 txs pending: accept nonce 3 now
|
|
from: "alice",
|
|
tx: makeUnsignedTx(3, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // Old account, 1 tx in chain, 1 tx pending: reject duplicate nonce 1
|
|
from: "bob",
|
|
tx: makeUnsignedTxWithTestBlob(1, 1, 1, 1, 1),
|
|
err: txpool.ErrAlreadyKnown,
|
|
},
|
|
{ // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
|
|
from: "bob",
|
|
tx: makeUnsignedTx(2, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
},
|
|
},
|
|
// Transactions should only be accepted into the pool if the cumulative
|
|
// expenditure doesn't overflow the account balance
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 63299 + 3*blobSize}, // 3 tx - 1 wei
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs: accept nonce 0 with 21100 wei spend
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend
|
|
from: "alice",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow)
|
|
from: "alice",
|
|
tx: makeUnsignedTx(2, 1, 1, 1),
|
|
err: core.ErrInsufficientFunds,
|
|
},
|
|
},
|
|
},
|
|
// Transactions should only be accepted into the pool if the total count
|
|
// from the same account doesn't overflow the pool limits
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 10000000},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs, 16 slots left: accept nonce 0
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 1 pooled tx, 15 slots left: accept nonce 1
|
|
from: "alice",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 2 pooled tx, 14 slots left: accept nonce 2
|
|
from: "alice",
|
|
tx: makeUnsignedTx(2, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 3 pooled tx, 13 slots left: accept nonce 3
|
|
from: "alice",
|
|
tx: makeUnsignedTx(3, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 4 pooled tx, 12 slots left: accept nonce 4
|
|
from: "alice",
|
|
tx: makeUnsignedTx(4, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 5 pooled tx, 11 slots left: accept nonce 5
|
|
from: "alice",
|
|
tx: makeUnsignedTx(5, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 6 pooled tx, 10 slots left: accept nonce 6
|
|
from: "alice",
|
|
tx: makeUnsignedTx(6, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 7 pooled tx, 9 slots left: accept nonce 7
|
|
from: "alice",
|
|
tx: makeUnsignedTx(7, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 8 pooled tx, 8 slots left: accept nonce 8
|
|
from: "alice",
|
|
tx: makeUnsignedTx(8, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 9 pooled tx, 7 slots left: accept nonce 9
|
|
from: "alice",
|
|
tx: makeUnsignedTx(9, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 10 pooled tx, 6 slots left: accept nonce 10
|
|
from: "alice",
|
|
tx: makeUnsignedTx(10, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 11 pooled tx, 5 slots left: accept nonce 11
|
|
from: "alice",
|
|
tx: makeUnsignedTx(11, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 12 pooled tx, 4 slots left: accept nonce 12
|
|
from: "alice",
|
|
tx: makeUnsignedTx(12, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 13 pooled tx, 3 slots left: accept nonce 13
|
|
from: "alice",
|
|
tx: makeUnsignedTx(13, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 14 pooled tx, 2 slots left: accept nonce 14
|
|
from: "alice",
|
|
tx: makeUnsignedTx(14, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 15 pooled tx, 1 slots left: accept nonce 15
|
|
from: "alice",
|
|
tx: makeUnsignedTx(15, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 16 pooled tx, 0 slots left: accept nonce 15 replacement
|
|
from: "alice",
|
|
tx: makeUnsignedTx(15, 10, 10, 10),
|
|
err: nil,
|
|
},
|
|
{ // New account, 16 pooled tx, 0 slots left: reject nonce 16 with overcap
|
|
from: "alice",
|
|
tx: makeUnsignedTx(16, 1, 1, 1),
|
|
err: txpool.ErrAccountLimitExceeded,
|
|
},
|
|
},
|
|
},
|
|
// Previously existing transactions should be allowed to be replaced iff
|
|
// the new cumulative expenditure can be covered by the account and the
|
|
// prices are bumped all around (no percentage check here).
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 2*100 + 5*21000 + 3*blobSize},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs: reject nonce 0 with 341172 wei spend
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 20, 1),
|
|
err: core.ErrInsufficientFunds,
|
|
},
|
|
{ // New account, no previous txs: accept nonce 0 with 173172 wei spend
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 2, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 1 pooled tx with 173172 wei spent: accept nonce 1 with 152172 wei spend
|
|
from: "alice",
|
|
tx: makeUnsignedTx(1, 1, 1, 1),
|
|
err: nil,
|
|
},
|
|
{ // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with 599684 wei spend (173072 extra) (would overflow balance at nonce 1)
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 2, 5, 2),
|
|
err: core.ErrInsufficientFunds,
|
|
},
|
|
{ // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gastip-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 3, 2),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gascap-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 2, 2, 2),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-blobcap-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 2, 4, 1),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 2 pooled tx with 325344 wei spent: accept nonce 0 with 84100 wei spend (42000 extra)
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 2, 4, 2),
|
|
err: nil,
|
|
},
|
|
},
|
|
},
|
|
// Previously existing transactions should be allowed to be replaced iff
|
|
// the new prices are bumped by a sufficient amount.
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 100 + 8*21000 + 4*blobSize},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs: accept nonce 0
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 2, 4, 2),
|
|
err: nil,
|
|
},
|
|
{ // New account, 1 pooled tx: reject nonce 0 with low-gastip-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 3, 8, 4),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 1 pooled tx: reject nonce 0 with low-gascap-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 4, 6, 4),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 1 pooled tx: reject nonce 0 with low-blobcap-bump
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 4, 8, 3),
|
|
err: txpool.ErrReplaceUnderpriced,
|
|
},
|
|
{ // New account, 1 pooled tx: accept nonce 0 with all-bumps
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 4, 8, 4),
|
|
err: nil,
|
|
},
|
|
},
|
|
},
|
|
// Blob transactions that don't meet the min blob gas price should be rejected
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {balance: 10000000},
|
|
},
|
|
adds: []addtx{
|
|
{ // New account, no previous txs, nonce 0, but blob fee cap too low
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 0),
|
|
err: txpool.ErrTxGasPriceTooLow,
|
|
},
|
|
{ // Same as above but blob fee cap equals minimum, should be accepted
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, params.BlobTxMinBlobGasprice),
|
|
err: nil,
|
|
},
|
|
},
|
|
},
|
|
// Tests issue #30518 where a refactor broke internal state invariants,
|
|
// causing included transactions not to be properly accounted and thus
|
|
// account states going our of sync with the chain.
|
|
{
|
|
seeds: map[string]seed{
|
|
"alice": {
|
|
balance: 1000000,
|
|
txs: []*types.BlobTx{
|
|
makeUnsignedTx(0, 1, 1, 1),
|
|
},
|
|
},
|
|
},
|
|
block: []addtx{
|
|
{
|
|
from: "alice",
|
|
tx: makeUnsignedTx(0, 1, 1, 1),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
for i, tt := range tests {
|
|
// Create a temporary folder for the persistent backend
|
|
storage := filepath.Join(t.TempDir(), fmt.Sprintf("test-%d", i))
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
|
|
|
|
// Insert the seed transactions for the pool startup
|
|
var (
|
|
keys = make(map[string]*ecdsa.PrivateKey)
|
|
addrs = make(map[string]common.Address)
|
|
)
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
for acc, seed := range tt.seeds {
|
|
// Generate a new random key/address for the seed account
|
|
keys[acc], _ = crypto.GenerateKey()
|
|
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
|
|
|
// Seed the state database with this account
|
|
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance), tracing.BalanceChangeUnspecified)
|
|
statedb.SetNonce(addrs[acc], seed.nonce, tracing.NonceChangeUnspecified)
|
|
|
|
// Sign the seed transactions and store them in the data store
|
|
for _, tx := range seed.txs {
|
|
signed := types.MustSignNewTx(keys[acc], types.LatestSigner(params.MainnetChainConfig), tx)
|
|
blob, _ := rlp.EncodeToBytes(signed)
|
|
store.Put(blob)
|
|
}
|
|
}
|
|
statedb.Commit(0, true, false)
|
|
store.Close()
|
|
|
|
// Create a blob pool out of the pre-seeded dats
|
|
chain := &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("test %d: failed to create blob pool: %v", i, err)
|
|
}
|
|
verifyPoolInternals(t, pool)
|
|
|
|
// Add each transaction one by one, verifying the pool internals in between
|
|
for j, add := range tt.adds {
|
|
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(params.MainnetChainConfig), add.tx)
|
|
if errs := pool.Add([]*types.Transaction{signed}, true); !errors.Is(errs[0], add.err) {
|
|
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, errs[0], add.err)
|
|
}
|
|
if add.err == nil {
|
|
size, exist := pool.lookup.sizeOfTx(signed.Hash())
|
|
if !exist {
|
|
t.Errorf("test %d, tx %d: failed to lookup transaction's size", i, j)
|
|
}
|
|
if size != signed.Size() {
|
|
t.Errorf("test %d, tx %d: transaction's size mismatches: have %v, want %v",
|
|
i, j, size, signed.Size())
|
|
}
|
|
}
|
|
verifyPoolInternals(t, pool)
|
|
}
|
|
verifyPoolInternals(t, pool)
|
|
|
|
// If the test contains a chain head event, run that and gain verify the internals
|
|
if tt.block != nil {
|
|
// Fake a header for the new set of transactions
|
|
header := &types.Header{
|
|
Number: big.NewInt(int64(chain.CurrentBlock().Number.Uint64() + 1)),
|
|
Difficulty: common.Big0,
|
|
BaseFee: chain.CurrentBlock().BaseFee, // invalid, but nothing checks it, yolo
|
|
}
|
|
// Inject the fake block into the chain
|
|
txs := make([]*types.Transaction, len(tt.block))
|
|
for j, inc := range tt.block {
|
|
txs[j] = types.MustSignNewTx(keys[inc.from], types.LatestSigner(params.MainnetChainConfig), inc.tx)
|
|
}
|
|
chain.blocks = map[uint64]*types.Block{
|
|
header.Number.Uint64(): types.NewBlockWithHeader(header).WithBody(types.Body{
|
|
Transactions: txs,
|
|
}),
|
|
}
|
|
// Apply the nonce updates to the state db
|
|
for _, tx := range txs {
|
|
sender, _ := types.Sender(types.LatestSigner(params.MainnetChainConfig), tx)
|
|
chain.statedb.SetNonce(sender, tx.Nonce()+1, tracing.NonceChangeUnspecified)
|
|
}
|
|
pool.Reset(chain.CurrentBlock(), header)
|
|
verifyPoolInternals(t, pool)
|
|
}
|
|
// Close down the test
|
|
pool.Close()
|
|
}
|
|
}
|
|
|
|
// Tests that transactions with legacy sidecars are accepted within the
|
|
// conversion window but rejected after it has passed.
|
|
func TestAddLegacyBlobTx(t *testing.T) {
|
|
testAddLegacyBlobTx(t, true) // conversion window has not yet passed
|
|
testAddLegacyBlobTx(t, false) // conversion window passed
|
|
}
|
|
|
|
func testAddLegacyBlobTx(t *testing.T, accept bool) {
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
)
|
|
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
chain := &testBlockChain{
|
|
config: params.MergedTestChainConfig,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
var timeDiff uint64
|
|
if accept {
|
|
timeDiff = uint64(conversionTimeWindow.Seconds()) - 1
|
|
} else {
|
|
timeDiff = uint64(conversionTimeWindow.Seconds()) + 1
|
|
}
|
|
time := *params.MergedTestChainConfig.OsakaTime + timeDiff
|
|
chain.setHeadTime(time)
|
|
|
|
pool := New(Config{Datadir: t.TempDir()}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
// Attempt to add legacy blob transactions.
|
|
var (
|
|
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
|
|
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0)
|
|
txs = []*types.Transaction{tx1, tx2}
|
|
)
|
|
errs := pool.Add(txs, true)
|
|
for _, err := range errs {
|
|
if accept && err != nil {
|
|
t.Fatalf("expected tx add to succeed, %v", err)
|
|
}
|
|
if !accept && err == nil {
|
|
t.Fatal("expected tx add to fail")
|
|
}
|
|
}
|
|
verifyPoolInternals(t, pool)
|
|
pool.Close()
|
|
}
|
|
|
|
func TestGetBlobs(t *testing.T) {
|
|
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(params.BlobTxMaxBlobs), nil)
|
|
|
|
// Create transactions from a few accounts.
|
|
var (
|
|
key1, _ = crypto.GenerateKey()
|
|
key2, _ = crypto.GenerateKey()
|
|
key3, _ = crypto.GenerateKey()
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
|
|
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) // [0, 6)
|
|
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion1) // [6, 12)
|
|
tx3 = makeMultiBlobTx(0, 1, 800, 110, 6, 12, key3, types.BlobSidecarVersion0) // [12, 18)
|
|
|
|
blob1, _ = rlp.EncodeToBytes(tx1)
|
|
blob2, _ = rlp.EncodeToBytes(tx2)
|
|
blob3, _ = rlp.EncodeToBytes(tx3)
|
|
)
|
|
|
|
// Write the two safely sized txs to store. note: although the store is
|
|
// configured for a blob count of 6, it can also support around ~1mb of call
|
|
// data - all this to say that we aren't using the the absolute largest shelf
|
|
// available.
|
|
store.Put(blob1)
|
|
store.Put(blob2)
|
|
store.Put(blob3)
|
|
store.Close()
|
|
|
|
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
|
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
statedb.Commit(0, true, false)
|
|
|
|
// Make custom chain config where the max blob count changes based on the loop variable.
|
|
cancunTime := uint64(0)
|
|
config := ¶ms.ChainConfig{
|
|
ChainID: big.NewInt(1),
|
|
LondonBlock: big.NewInt(0),
|
|
BerlinBlock: big.NewInt(0),
|
|
CancunTime: &cancunTime,
|
|
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
|
Cancun: ¶ms.BlobConfig{
|
|
Target: 12,
|
|
Max: 24,
|
|
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
|
|
},
|
|
},
|
|
}
|
|
chain := &testBlockChain{
|
|
config: config,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
}
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
// Verify the regular three txs are always available.
|
|
if got := pool.Get(tx1.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
|
|
}
|
|
if got := pool.Get(tx2.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
|
|
}
|
|
if got := pool.Get(tx3.Hash()); got == nil {
|
|
t.Errorf("expected tx %s from %s in pool", tx3.Hash(), addr3)
|
|
}
|
|
|
|
cases := []struct {
|
|
start int
|
|
limit int
|
|
fillRandom bool // Whether to randomly fill some of the requested blobs with unknowns
|
|
version byte // Blob sidecar version to request
|
|
convert bool // Whether to convert version on retrieval
|
|
}{
|
|
{
|
|
start: 0, limit: 6,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 0, limit: 6,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 0, limit: 6, fillRandom: true,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 0, limit: 6, fillRandom: true,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 3, limit: 9,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 3, limit: 9,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 3, limit: 9, fillRandom: true,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 3, limit: 9, fillRandom: true,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 3, limit: 15,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 3, limit: 15,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 3, limit: 15, fillRandom: true,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 3, limit: 15, fillRandom: true,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 0, limit: 18,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 0, limit: 18,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 0, limit: 18, fillRandom: true,
|
|
version: types.BlobSidecarVersion0,
|
|
},
|
|
{
|
|
start: 0, limit: 18, fillRandom: true,
|
|
version: types.BlobSidecarVersion1,
|
|
},
|
|
{
|
|
start: 0, limit: 18, fillRandom: true,
|
|
version: types.BlobSidecarVersion1,
|
|
convert: true, // Convert some version 0 blobs to version 1 while retrieving
|
|
},
|
|
}
|
|
for i, c := range cases {
|
|
var (
|
|
vhashes []common.Hash
|
|
filled = make(map[int]struct{})
|
|
)
|
|
if c.fillRandom {
|
|
filled[len(vhashes)] = struct{}{}
|
|
vhashes = append(vhashes, testrand.Hash())
|
|
}
|
|
for j := c.start; j < c.limit; j++ {
|
|
vhashes = append(vhashes, testBlobVHashes[j])
|
|
if c.fillRandom && rand.Intn(2) == 0 {
|
|
filled[len(vhashes)] = struct{}{}
|
|
vhashes = append(vhashes, testrand.Hash())
|
|
}
|
|
}
|
|
if c.fillRandom {
|
|
filled[len(vhashes)] = struct{}{}
|
|
vhashes = append(vhashes, testrand.Hash())
|
|
}
|
|
blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version, c.convert)
|
|
if err != nil {
|
|
t.Errorf("Unexpected error for case %d, %v", i, err)
|
|
}
|
|
|
|
// Cross validate what we received vs what we wanted
|
|
length := c.limit - c.start
|
|
wantLen := length + len(filled)
|
|
if len(blobs) != wantLen || len(proofs) != wantLen {
|
|
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), wantLen)
|
|
continue
|
|
}
|
|
|
|
var unknown int
|
|
for j := 0; j < len(blobs); j++ {
|
|
testBlobIndex := c.start + j - unknown
|
|
if _, exist := filled[j]; exist {
|
|
if blobs[j] != nil || proofs[j] != nil {
|
|
t.Errorf("Unexpected blob and proof, item %d", j)
|
|
}
|
|
unknown++
|
|
continue
|
|
}
|
|
// If an item is missing, but shouldn't, error
|
|
if blobs[j] == nil || proofs[j] == nil {
|
|
// This is only an error if there was no version mismatch
|
|
if c.convert ||
|
|
(c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) ||
|
|
(c.version == types.BlobSidecarVersion0 && (testBlobIndex < 6 || 12 <= testBlobIndex)) {
|
|
t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
|
|
}
|
|
continue
|
|
}
|
|
// Item retrieved, make sure the blob matches the expectation
|
|
if *blobs[j] != *testBlobs[testBlobIndex] {
|
|
t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
|
|
continue
|
|
}
|
|
// Item retrieved, make sure the proof matches the expectation
|
|
if c.version == types.BlobSidecarVersion0 {
|
|
if proofs[j][0] != testBlobProofs[testBlobIndex] {
|
|
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
|
|
}
|
|
} else {
|
|
want, _ := kzg4844.ComputeCellProofs(blobs[j])
|
|
if !reflect.DeepEqual(want, proofs[j]) {
|
|
t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
|
|
}
|
|
}
|
|
}
|
|
}
|
|
pool.Close()
|
|
}
|
|
|
|
// TestSidecarConversion will verify that after the Osaka fork, all legacy
|
|
// sidecars in the pool are successfully convert to v1 sidecars.
|
|
func TestSidecarConversion(t *testing.T) {
|
|
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
|
|
|
// Create a temporary folder for the persistent backend
|
|
storage := t.TempDir()
|
|
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
|
|
|
|
var (
|
|
preOsakaTxs = make(types.Transactions, 10)
|
|
postOsakaTxs = make(types.Transactions, 3)
|
|
keys = make([]*ecdsa.PrivateKey, len(preOsakaTxs)+len(postOsakaTxs))
|
|
addrs = make([]common.Address, len(preOsakaTxs)+len(postOsakaTxs))
|
|
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
)
|
|
for i := range keys {
|
|
keys[i], _ = crypto.GenerateKey()
|
|
addrs[i] = crypto.PubkeyToAddress(keys[i].PublicKey)
|
|
statedb.AddBalance(addrs[i], uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
}
|
|
for i := range preOsakaTxs {
|
|
preOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 2, 0, keys[i], types.BlobSidecarVersion0)
|
|
}
|
|
for i := range postOsakaTxs {
|
|
if i == 0 {
|
|
// First has a v0 sidecar.
|
|
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion0)
|
|
}
|
|
postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion1)
|
|
}
|
|
statedb.Commit(0, true, false)
|
|
|
|
// Test plan:
|
|
// 1) Create a bunch v0 sidecar txs and add to pool before Osaka.
|
|
// 2) Pass in new Osaka header to activate the conversion thread.
|
|
// 3) Continue adding both v0 and v1 transactions to the pool.
|
|
// 4) Verify that as additional blocks come in, transactions involved in the
|
|
// migration are correctly discarded.
|
|
|
|
config := ¶ms.ChainConfig{
|
|
ChainID: big.NewInt(1),
|
|
LondonBlock: big.NewInt(0),
|
|
BerlinBlock: big.NewInt(0),
|
|
CancunTime: newUint64(0),
|
|
PragueTime: newUint64(0),
|
|
OsakaTime: newUint64(1),
|
|
BlobScheduleConfig: params.DefaultBlobSchedule,
|
|
}
|
|
chain := &testBlockChain{
|
|
config: config,
|
|
basefee: uint256.NewInt(1050),
|
|
blobfee: uint256.NewInt(105),
|
|
statedb: statedb,
|
|
blocks: make(map[uint64]*types.Block),
|
|
}
|
|
|
|
// Create 3 blocks:
|
|
// - the current block, before Osaka
|
|
// - the first block after Osaka
|
|
// - another post-Osaka block with several transactions in it
|
|
header0 := chain.CurrentBlock()
|
|
header0.Time = 0
|
|
chain.blocks[0] = types.NewBlockWithHeader(header0)
|
|
|
|
header1 := chain.CurrentBlock()
|
|
header1.Number = big.NewInt(1)
|
|
header1.Time = 1
|
|
chain.blocks[1] = types.NewBlockWithHeader(header1)
|
|
|
|
header2 := chain.CurrentBlock()
|
|
header2.Time = 2
|
|
header2.Number = big.NewInt(2)
|
|
|
|
// Make a copy of one of the pre-Osaka transactions and convert it to v1 here
|
|
// so that we can add it to the pool later and ensure a duplicate is not added
|
|
// by the conversion queue.
|
|
tx := preOsakaTxs[len(preOsakaTxs)-1]
|
|
sc := *tx.BlobTxSidecar() // copy sidecar
|
|
sc.ToV1()
|
|
tx.WithBlobTxSidecar(&sc)
|
|
|
|
block2 := types.NewBlockWithHeader(header2).WithBody(types.Body{Transactions: append(postOsakaTxs, tx)})
|
|
chain.blocks[2] = block2
|
|
|
|
pool := New(Config{Datadir: storage}, chain, nil)
|
|
if err := pool.Init(1, header0, newReserver()); err != nil {
|
|
t.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
|
|
errs := pool.Add(preOsakaTxs, true)
|
|
for i, err := range errs {
|
|
if err != nil {
|
|
t.Errorf("failed to insert blob tx from %s: %s", addrs[i], errs[i])
|
|
}
|
|
}
|
|
|
|
// Kick off migration.
|
|
pool.Reset(header0, header1)
|
|
|
|
// Add the v0 sidecar tx, but don't block so we can keep doing other stuff
|
|
// while it converts the sidecar.
|
|
addDone := make(chan struct{})
|
|
go func() {
|
|
pool.Add(types.Transactions{postOsakaTxs[0]}, false)
|
|
close(addDone)
|
|
}()
|
|
|
|
// Add the post-Osaka v1 sidecar txs.
|
|
errs = pool.Add(postOsakaTxs[1:], false)
|
|
for _, err := range errs {
|
|
if err != nil {
|
|
t.Fatalf("expected tx add to succeed: %v", err)
|
|
}
|
|
}
|
|
|
|
// Wait for the first tx's conversion to complete, then check that all
|
|
// transactions added after Osaka can be accounted for in the pool.
|
|
<-addDone
|
|
pending := pool.Pending(txpool.PendingFilter{BlobTxs: true, BlobVersion: types.BlobSidecarVersion1})
|
|
for _, tx := range postOsakaTxs {
|
|
from, _ := pool.signer.Sender(tx)
|
|
if len(pending[from]) != 1 || pending[from][0].Hash != tx.Hash() {
|
|
t.Fatalf("expected post-Osaka txs to be pending")
|
|
}
|
|
}
|
|
|
|
// Now update the pool with the next block. This should cause the pool to
|
|
// clear out the post-Osaka txs since they were included in block 2. Since the
|
|
// test blockchain doesn't manage nonces, we'll just do that manually before
|
|
// the reset is called. Don't forget about the pre-Osaka transaction we also
|
|
// added to block 2!
|
|
for i := range postOsakaTxs {
|
|
statedb.SetNonce(addrs[len(preOsakaTxs)+i], 1, tracing.NonceChangeEoACall)
|
|
}
|
|
statedb.SetNonce(addrs[len(preOsakaTxs)-1], 1, tracing.NonceChangeEoACall)
|
|
pool.Reset(header1, block2.Header())
|
|
|
|
// Now verify no post-Osaka transactions are tracked by the pool.
|
|
for i, tx := range postOsakaTxs {
|
|
if pool.Get(tx.Hash()) != nil {
|
|
t.Fatalf("expected txs added post-osaka to have been placed in limbo due to inclusion in a block: index %d, hash %s", i, tx.Hash())
|
|
}
|
|
}
|
|
|
|
// Wait for the pool migration to complete.
|
|
<-pool.cQueue.anyBillyConversionDone
|
|
|
|
// Verify all transactions in the pool were converted and verify the
|
|
// subsequent cell proofs.
|
|
count, _ := pool.Stats()
|
|
if count != len(preOsakaTxs)-1 {
|
|
t.Errorf("expected pending count to match initial tx count: pending=%d, expected=%d", count, len(preOsakaTxs)-1)
|
|
}
|
|
for addr, acc := range pool.index {
|
|
for _, m := range acc {
|
|
if m.version != types.BlobSidecarVersion1 {
|
|
t.Errorf("expected sidecar to have been converted: from %s, hash %s", addr, m.hash)
|
|
}
|
|
tx := pool.Get(m.hash)
|
|
if tx == nil {
|
|
t.Errorf("failed to get tx by hash: %s", m.hash)
|
|
}
|
|
sc := tx.BlobTxSidecar()
|
|
if err := kzg4844.VerifyCellProofs(sc.Blobs, sc.Commitments, sc.Proofs); err != nil {
|
|
t.Errorf("failed to verify cell proofs for tx %s after conversion: %s", m.hash, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
verifyPoolInternals(t, pool)
|
|
|
|
// Launch conversion a second time.
|
|
// This is just a sanity check to ensure we can handle it.
|
|
pool.Reset(header0, header1)
|
|
|
|
pool.Close()
|
|
}
|
|
|
|
// fakeBilly is a billy.Database implementation which just drops data on the floor.
|
|
type fakeBilly struct {
|
|
billy.Database
|
|
count uint64
|
|
}
|
|
|
|
func (f *fakeBilly) Put(data []byte) (uint64, error) {
|
|
f.count++
|
|
return f.count, nil
|
|
}
|
|
|
|
var _ billy.Database = (*fakeBilly)(nil)
|
|
|
|
// Benchmarks the time it takes to assemble the lazy pending transaction list
|
|
// from the pool contents.
|
|
func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
|
|
func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
|
|
func BenchmarkPoolPending10GB(b *testing.B) {
|
|
if testing.Short() {
|
|
b.Skip("Skipping in short-mode")
|
|
}
|
|
benchmarkPoolPending(b, 10_000_000_000)
|
|
}
|
|
|
|
func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
|
// Calculate the maximum number of transaction that would fit into the pool
|
|
// and generate a set of random accounts to seed them with.
|
|
capacity := datacap / params.BlobTxBlobGasPerBlob
|
|
|
|
var (
|
|
basefee = uint64(1050)
|
|
blobfee = uint64(105)
|
|
signer = types.LatestSigner(params.MainnetChainConfig)
|
|
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
|
|
chain = &testBlockChain{
|
|
config: params.MainnetChainConfig,
|
|
basefee: uint256.NewInt(basefee),
|
|
blobfee: uint256.NewInt(blobfee),
|
|
statedb: statedb,
|
|
}
|
|
pool = New(Config{Datadir: ""}, chain, nil)
|
|
)
|
|
|
|
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
|
|
b.Fatalf("failed to create blob pool: %v", err)
|
|
}
|
|
// Make the pool not use disk (just drop everything). This test never reads
|
|
// back the data, it just iterates over the pool in-memory items
|
|
pool.store = &fakeBilly{pool.store, 0}
|
|
// Fill the pool up with one random transaction from each account with the
|
|
// same price and everything to maximize the worst case scenario
|
|
for i := 0; i < int(capacity); i++ {
|
|
blobtx := makeUnsignedTx(0, 10, basefee+10, blobfee)
|
|
blobtx.R = uint256.NewInt(1)
|
|
blobtx.S = uint256.NewInt(uint64(100 + i))
|
|
blobtx.V = uint256.NewInt(0)
|
|
tx := types.NewTx(blobtx)
|
|
addr, err := types.Sender(signer, tx)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
|
|
pool.add(tx)
|
|
}
|
|
statedb.Commit(0, true, false)
|
|
defer pool.Close()
|
|
|
|
// Benchmark assembling the pending
|
|
b.ResetTimer()
|
|
b.ReportAllocs()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
p := pool.Pending(txpool.PendingFilter{
|
|
MinTip: uint256.NewInt(1),
|
|
BaseFee: chain.basefee,
|
|
BlobFee: chain.blobfee,
|
|
})
|
|
if len(p) != int(capacity) {
|
|
b.Fatalf("have %d want %d", len(p), capacity)
|
|
}
|
|
}
|
|
}
|
|
|
|
func newUint64(val uint64) *uint64 { return &val }
|