diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 2ff47ce042..cc8ea36d74 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -55,4 +55,4 @@ jobs: cache: false - name: Run tests - run: go test ./... + run: go run build/ci.go test diff --git a/.github/workflows/validate_pr.yml b/.github/workflows/validate_pr.yml index 8c9ae1688f..0719ca2e3d 100644 --- a/.github/workflows/validate_pr.yml +++ b/.github/workflows/validate_pr.yml @@ -13,7 +13,7 @@ jobs: with: script: | const prTitle = context.payload.pull_request.title; - const titleRegex = /^(\.?[\w\s,{}/]+): .+/; + const titleRegex = /^([\w\s,{}/.]+): .+/; if (!titleRegex.test(prTitle)) { core.setFailed(`PR title "${prTitle}" does not match required format: directory, ...: description`); diff --git a/accounts/abi/bind/v2/base.go b/accounts/abi/bind/v2/base.go index 535c0ed4fd..f714848efb 100644 --- a/accounts/abi/bind/v2/base.go +++ b/accounts/abi/bind/v2/base.go @@ -150,6 +150,11 @@ func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller } } +// Address returns the deployment address of the contract. +func (c *BoundContract) Address() common.Address { + return c.address +} + // Call invokes the (constant) contract method with params as input values and // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named diff --git a/accounts/abi/bind/v2/util_test.go b/accounts/abi/bind/v2/util_test.go index b1b647a7b9..a9f5b4035c 100644 --- a/accounts/abi/bind/v2/util_test.go +++ b/accounts/abi/bind/v2/util_test.go @@ -100,22 +100,29 @@ func TestWaitDeployed(t *testing.T) { } func TestWaitDeployedCornerCases(t *testing.T) { - backend := simulated.NewBackend( - types.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, - }, + var ( + backend = simulated.NewBackend( + types.GenesisAlloc{ + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, + }, + ) + head, _ = backend.Client().HeaderByNumber(t.Context(), nil) // Should be child's, good enough + gasPrice = new(big.Int).Add(head.BaseFee, big.NewInt(1)) + signer = types.LatestSigner(params.AllDevChainProtocolChanges) + code = common.FromHex("6060604052600a8060106000396000f360606040526008565b00") + ctx, cancel = context.WithCancel(t.Context()) ) defer backend.Close() - head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough - gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) - - // Create a transaction to an account. - code := "6060604052600a8060106000396000f360606040526008565b00" - tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) - tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // 1. WaitDeploy on a transaction that does not deploy a contract, verify it + // returns an error. + tx := types.MustSignNewTx(testKey, signer, &types.LegacyTx{ + Nonce: 0, + To: &common.Address{0x01}, + Gas: 300000, + GasPrice: gasPrice, + Data: code, + }) if err := backend.Client().SendTransaction(ctx, tx); err != nil { t.Errorf("failed to send transaction: %q", err) } @@ -124,14 +131,23 @@ func TestWaitDeployedCornerCases(t *testing.T) { t.Errorf("error mismatch: want %q, got %q, ", bind.ErrNoAddressInReceipt, err) } - // Create a transaction that is not mined. - tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) - tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey) + // 2. Create a contract, but cancel the WaitDeploy before it is mined. + tx = types.MustSignNewTx(testKey, signer, &types.LegacyTx{ + Nonce: 1, + Gas: 300000, + GasPrice: gasPrice, + Data: code, + }) + // Wait in another thread so that we can quickly cancel it after submitting + // the transaction. + done := make(chan struct{}) go func() { - contextCanceled := errors.New("context canceled") - if _, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash()); err.Error() != contextCanceled.Error() { - t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err) + defer close(done) + want := errors.New("context canceled") + _, err := bind.WaitDeployed(ctx, backend.Client(), tx.Hash()) + if err == nil || errors.Is(want, err) { + t.Errorf("error mismatch: want %v, got %v", want, err) } }() @@ -139,4 +155,11 @@ func TestWaitDeployedCornerCases(t *testing.T) { t.Errorf("failed to send transaction: %q", err) } cancel() + + // Wait for goroutine to exit or for a timeout. + select { + case <-done: + case <-time.After(time.Second * 2): + t.Fatalf("failed to cancel wait deploy") + } } diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index fefba026ae..3e4266924f 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -99,9 +99,10 @@ func (ks *KeyStore) init(keydir string) { // TODO: In order for this finalizer to work, there must be no references // to ks. addressCache doesn't keep a reference but unlocked keys do, // so the finalizer will not trigger until all timed unlocks have expired. - runtime.SetFinalizer(ks, func(m *KeyStore) { - m.cache.close() - }) + runtime.AddCleanup(ks, func(c *accountCache) { + c.close() + }, ks.cache) + // Create the initial list of wallets from the cache accs := ks.cache.accounts() ks.wallets = make([]accounts.Wallet, len(accs)) @@ -195,11 +196,14 @@ func (ks *KeyStore) Subscribe(sink chan<- accounts.WalletEvent) event.Subscripti // forces a manual refresh (only triggers for systems where the filesystem notifier // is not running). func (ks *KeyStore) updater() { + ticker := time.NewTicker(walletRefreshCycle) + defer ticker.Stop() + for { // Wait for an account update or a refresh timeout select { case <-ks.changes: - case <-time.After(walletRefreshCycle): + case <-ticker.C: } // Run the wallet refresher ks.refreshWallets() diff --git a/beacon/blsync/block_sync_test.go b/beacon/blsync/block_sync_test.go index e7c2c4d163..e471766738 100644 --- a/beacon/blsync/block_sync_test.go +++ b/beacon/blsync/block_sync_test.go @@ -32,7 +32,7 @@ var ( testServer2 = testServer("testServer2") testBlock1 = types.NewBeaconBlock(&deneb.BeaconBlock{ - Slot: 123, + Slot: 127, Body: deneb.BeaconBlockBody{ ExecutionPayload: deneb.ExecutionPayload{ BlockNumber: 456, @@ -41,7 +41,7 @@ var ( }, }) testBlock2 = types.NewBeaconBlock(&deneb.BeaconBlock{ - Slot: 124, + Slot: 128, Body: deneb.BeaconBlockBody{ ExecutionPayload: deneb.ExecutionPayload{ BlockNumber: 457, @@ -49,6 +49,14 @@ var ( }, }, }) + testFinal1 = types.NewExecutionHeader(&deneb.ExecutionPayloadHeader{ + BlockNumber: 395, + BlockHash: zrntcommon.Hash32(common.HexToHash("abbe7625624bf8ddd84723709e2758956289465dd23475f02387e0854942666")), + }) + testFinal2 = types.NewExecutionHeader(&deneb.ExecutionPayloadHeader{ + BlockNumber: 420, + BlockHash: zrntcommon.Hash32(common.HexToHash("9182a6ef8723654de174283750932ccc092378549836bf4873657eeec474598")), + }) ) type testServer string @@ -66,9 +74,10 @@ func TestBlockSync(t *testing.T) { ts.AddServer(testServer1, 1) ts.AddServer(testServer2, 1) - expHeadBlock := func(expHead *types.BeaconBlock) { + expHeadEvent := func(expHead *types.BeaconBlock, expFinal *types.ExecutionHeader) { t.Helper() var expNumber, headNumber uint64 + var expFinalHash, finalHash common.Hash if expHead != nil { p, err := expHead.ExecutionPayload() if err != nil { @@ -76,19 +85,26 @@ func TestBlockSync(t *testing.T) { } expNumber = p.NumberU64() } + if expFinal != nil { + expFinalHash = expFinal.BlockHash() + } select { case event := <-headCh: headNumber = event.Block.NumberU64() + finalHash = event.Finalized default: } if headNumber != expNumber { t.Errorf("Wrong head block, expected block number %d, got %d)", expNumber, headNumber) } + if finalHash != expFinalHash { + t.Errorf("Wrong finalized block, expected block hash %064x, got %064x)", expFinalHash[:], finalHash[:]) + } } // no block requests expected until head tracker knows about a head ts.Run(1) - expHeadBlock(nil) + expHeadEvent(nil, nil) // set block 1 as prefetch head, announced by server 2 head1 := blockHeadInfo(testBlock1) @@ -103,12 +119,13 @@ func TestBlockSync(t *testing.T) { ts.AddAllowance(testServer2, 1) ts.Run(3) // head block still not expected as the fetched block is not the validated head yet - expHeadBlock(nil) + expHeadEvent(nil, nil) // set as validated head, expect no further requests but block 1 set as head block ht.validated.Header = testBlock1.Header() + ht.finalized, ht.finalizedPayload = testBlock1.Header(), testFinal1 ts.Run(4) - expHeadBlock(testBlock1) + expHeadEvent(testBlock1, testFinal1) // set block 2 as prefetch head, announced by server 1 head2 := blockHeadInfo(testBlock2) @@ -126,17 +143,26 @@ func TestBlockSync(t *testing.T) { // expect req2 retry to server 2 ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot)) // now head block should be unavailable again - expHeadBlock(nil) + expHeadEvent(nil, nil) // valid response, now head block should be block 2 immediately as it is already validated + // but head event is still not expected because an epoch boundary was crossed and the + // expected finality update has not arrived yet ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2) ts.Run(8) - expHeadBlock(testBlock2) + expHeadEvent(nil, nil) + + // expected finality update arrived, now a head event is expected + ht.finalized, ht.finalizedPayload = testBlock2.Header(), testFinal2 + ts.Run(9) + expHeadEvent(testBlock2, testFinal2) } type testHeadTracker struct { - prefetch types.HeadInfo - validated types.SignedHeader + prefetch types.HeadInfo + validated types.SignedHeader + finalized types.Header + finalizedPayload *types.ExecutionHeader } func (h *testHeadTracker) PrefetchHead() types.HeadInfo { @@ -151,13 +177,14 @@ func (h *testHeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) { }, h.validated.Header != (types.Header{}) } -// TODO add test case for finality func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) { - finalized := types.NewExecutionHeader(new(deneb.ExecutionPayloadHeader)) + if h.validated.Header == (types.Header{}) || h.finalizedPayload == nil { + return types.FinalityUpdate{}, false + } return types.FinalityUpdate{ - Attested: types.HeaderWithExecProof{Header: h.validated.Header}, - Finalized: types.HeaderWithExecProof{PayloadHeader: finalized}, + Attested: types.HeaderWithExecProof{Header: h.finalized}, + Finalized: types.HeaderWithExecProof{Header: h.finalized, PayloadHeader: h.finalizedPayload}, Signature: h.validated.Signature, SignatureSlot: h.validated.SignatureSlot, - }, h.validated.Header != (types.Header{}) + }, true } diff --git a/beacon/engine/gen_epe.go b/beacon/engine/gen_epe.go index deada06166..cf7bd9ee3f 100644 --- a/beacon/engine/gen_epe.go +++ b/beacon/engine/gen_epe.go @@ -17,7 +17,7 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` - BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + BlobsBundle *BlobsBundle `json:"blobsBundle"` Requests []hexutil.Bytes `json:"executionRequests"` Override bool `json:"shouldOverrideBuilder"` Witness *hexutil.Bytes `json:"witness,omitempty"` @@ -42,7 +42,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` - BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + BlobsBundle *BlobsBundle `json:"blobsBundle"` Requests []hexutil.Bytes `json:"executionRequests"` Override *bool `json:"shouldOverrideBuilder"` Witness *hexutil.Bytes `json:"witness,omitempty"` diff --git a/beacon/engine/types.go b/beacon/engine/types.go index 76bfd22a23..ddb276ab09 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -33,8 +33,22 @@ import ( type PayloadVersion byte var ( + // PayloadV1 is the identifier of ExecutionPayloadV1 introduced in paris fork. + // https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#executionpayloadv1 PayloadV1 PayloadVersion = 0x1 + + // PayloadV2 is the identifier of ExecutionPayloadV2 introduced in shanghai fork. + // + // https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#executionpayloadv2 + // ExecutionPayloadV2 has the syntax of ExecutionPayloadV1 and appends a + // single field: withdrawals. PayloadV2 PayloadVersion = 0x2 + + // PayloadV3 is the identifier of ExecutionPayloadV3 introduced in cancun fork. + // + // https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#executionpayloadv3 + // ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new + // fields: blobGasUsed and excessBlobGas. PayloadV3 PayloadVersion = 0x3 ) @@ -106,13 +120,18 @@ type StatelessPayloadStatusV1 struct { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *big.Int `json:"blockValue" gencodec:"required"` - BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + BlobsBundle *BlobsBundle `json:"blobsBundle"` Requests [][]byte `json:"executionRequests"` Override bool `json:"shouldOverrideBuilder"` Witness *hexutil.Bytes `json:"witness,omitempty"` } -type BlobsBundleV1 struct { +// BlobsBundle includes the marshalled sidecar data. Note this structure is +// shared by BlobsBundleV1 and BlobsBundleV2 for the sake of simplicity. +// +// - BlobsBundleV1: proofs contain exactly len(blobs) kzg proofs. +// - BlobsBundleV2: proofs contain exactly CELLS_PER_EXT_BLOB * len(blobs) cell proofs. +type BlobsBundle struct { Commitments []hexutil.Bytes `json:"commitments"` Proofs []hexutil.Bytes `json:"proofs"` Blobs []hexutil.Bytes `json:"blobs"` @@ -125,7 +144,7 @@ type BlobAndProofV1 struct { type BlobAndProofV2 struct { Blob hexutil.Bytes `json:"blob"` - CellProofs []hexutil.Bytes `json:"proofs"` + CellProofs []hexutil.Bytes `json:"proofs"` // proofs MUST contain exactly CELLS_PER_EXT_BLOB cell proofs. } // JSON type overrides for ExecutionPayloadEnvelope. @@ -327,18 +346,27 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types. } // Add blobs. - bundle := BlobsBundleV1{ + bundle := BlobsBundle{ Commitments: make([]hexutil.Bytes, 0), Blobs: make([]hexutil.Bytes, 0), Proofs: make([]hexutil.Bytes, 0), } for _, sidecar := range sidecars { for j := range sidecar.Blobs { - bundle.Blobs = append(bundle.Blobs, hexutil.Bytes(sidecar.Blobs[j][:])) - bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:])) + bundle.Blobs = append(bundle.Blobs, sidecar.Blobs[j][:]) + bundle.Commitments = append(bundle.Commitments, sidecar.Commitments[j][:]) } + // - Before the Osaka fork, only version-0 blob transactions should be packed, + // with the proof length equal to len(blobs). + // + // - After the Osaka fork, only version-1 blob transactions should be packed, + // with the proof length equal to CELLS_PER_EXT_BLOB * len(blobs). + // + // Ideally, length validation should be performed based on the bundle version. + // In practice, this is unnecessary because blob transaction filtering is + // already done during payload construction. for _, proof := range sidecar.Proofs { - bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(proof[:])) + bundle.Proofs = append(bundle.Proofs, proof[:]) } } diff --git a/beacon/params/config.go b/beacon/params/config.go index 2f6ba082c5..492ee53308 100644 --- a/beacon/params/config.go +++ b/beacon/params/config.go @@ -20,6 +20,7 @@ import ( "crypto/sha256" "fmt" "math" + "math/big" "os" "slices" "sort" @@ -90,12 +91,8 @@ func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainC // LoadForks parses the beacon chain configuration file (config.yaml) and extracts // the list of forks. -func (c *ChainConfig) LoadForks(path string) error { - file, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read beacon chain config file: %v", err) - } - config := make(map[string]string) +func (c *ChainConfig) LoadForks(file []byte) error { + config := make(map[string]any) if err := yaml.Unmarshal(file, &config); err != nil { return fmt.Errorf("failed to parse beacon chain config file: %v", err) } @@ -108,18 +105,36 @@ func (c *ChainConfig) LoadForks(path string) error { for key, value := range config { if strings.HasSuffix(key, "_FORK_VERSION") { name := key[:len(key)-len("_FORK_VERSION")] - if v, err := hexutil.Decode(value); err == nil { + switch version := value.(type) { + case int: + versions[name] = new(big.Int).SetUint64(uint64(version)).FillBytes(make([]byte, 4)) + case uint64: + versions[name] = new(big.Int).SetUint64(version).FillBytes(make([]byte, 4)) + case string: + v, err := hexutil.Decode(version) + if err != nil { + return fmt.Errorf("failed to decode hex fork id %q in beacon chain config file: %v", version, err) + } versions[name] = v - } else { - return fmt.Errorf("failed to decode hex fork id %q in beacon chain config file: %v", value, err) + default: + return fmt.Errorf("invalid fork version %q in beacon chain config file", version) } } if strings.HasSuffix(key, "_FORK_EPOCH") { name := key[:len(key)-len("_FORK_EPOCH")] - if v, err := strconv.ParseUint(value, 10, 64); err == nil { + switch epoch := value.(type) { + case int: + epochs[name] = uint64(epoch) + case uint64: + epochs[name] = epoch + case string: + v, err := strconv.ParseUint(epoch, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse epoch number %q in beacon chain config file: %v", epoch, err) + } epochs[name] = v - } else { - return fmt.Errorf("failed to parse epoch number %q in beacon chain config file: %v", value, err) + default: + return fmt.Errorf("invalid fork epoch %q in beacon chain config file", epoch) } } } diff --git a/beacon/params/config_test.go b/beacon/params/config_test.go new file mode 100644 index 0000000000..41e120469b --- /dev/null +++ b/beacon/params/config_test.go @@ -0,0 +1,34 @@ +package params + +import ( + "bytes" + "testing" +) + +func TestChainConfig_LoadForks(t *testing.T) { + const config = ` +GENESIS_FORK_VERSION: 0x00000000 + +ALTAIR_FORK_VERSION: 0x00000001 +ALTAIR_FORK_EPOCH: 1 + +EIP7928_FORK_VERSION: 0xb0000038 +EIP7928_FORK_EPOCH: 18446744073709551615 + +BLOB_SCHEDULE: [] +` + c := &ChainConfig{} + err := c.LoadForks([]byte(config)) + if err != nil { + t.Fatal(err) + } + + for _, fork := range c.Forks { + if fork.Name == "GENESIS" && (fork.Epoch != 0) { + t.Errorf("unexpected genesis fork epoch %d", fork.Epoch) + } + if fork.Name == "ALTAIR" && (fork.Epoch != 1 || !bytes.Equal(fork.Version, []byte{0, 0, 0, 1})) { + t.Errorf("unexpected altair fork epoch %d version %x", fork.Epoch, fork.Version) + } + } +} diff --git a/build/checksums.txt b/build/checksums.txt index ab0f7547f6..98ee3a91ef 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,113 +1,85 @@ # This file contains sha256 checksums of optional build dependencies. -# version:spec-tests fusaka-devnet-3%40v1.0.0 +# version:spec-tests v5.1.0 # https://github.com/ethereum/execution-spec-tests/releases -# https://github.com/ethereum/execution-spec-tests/releases/download/fusaka-devnet-3%40v1.0.0 -576261e1280e5300c458aa9b05eccb2fec5ff80a0005940dc52fa03fdd907249 fixtures_fusaka-devnet-3.tar.gz +# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0 +a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz -# version:golang 1.25.0 +# version:golang 1.25.1 # https://go.dev/dl/ -4bd01e91297207bfa450ea40d4d5a93b1b531a5e438473b2a06e18e077227225 go1.25.0.src.tar.gz -e5234a7dac67bc86c528fe9752fc9d63557918627707a733ab4cac1a6faed2d4 go1.25.0.aix-ppc64.tar.gz -5bd60e823037062c2307c71e8111809865116714d6f6b410597cf5075dfd80ef go1.25.0.darwin-amd64.tar.gz -95e836238bcf8f9a71bffea43344cbd35ee1f16db3aaced2f98dbac045d102db go1.25.0.darwin-amd64.pkg -544932844156d8172f7a28f77f2ac9c15a23046698b6243f633b0a0b00c0749c go1.25.0.darwin-arm64.tar.gz -202a0d8338c152cb4c9f04782429e9ba8bef31d9889272380837e4043c9d800a go1.25.0.darwin-arm64.pkg -5ed3cf9a810a1483822538674f1336c06b51aa1b94d6d545a1a0319a48177120 go1.25.0.dragonfly-amd64.tar.gz -abea5d5c6697e6b5c224731f2158fe87c602996a2a233ac0c4730cd57bf8374e go1.25.0.freebsd-386.tar.gz -86e6fe0a29698d7601c4442052dac48bd58d532c51cccb8f1917df648138730b go1.25.0.freebsd-amd64.tar.gz -d90b78e41921f72f30e8bbc81d9dec2cff7ff384a33d8d8debb24053e4336bfe go1.25.0.freebsd-arm.tar.gz -451d0da1affd886bfb291b7c63a6018527b269505db21ce6e14724f22ab0662e go1.25.0.freebsd-arm64.tar.gz -7b565f76bd8bda46549eeaaefe0e53b251e644c230577290c0f66b1ecdb3cdbe go1.25.0.freebsd-riscv64.tar.gz -b1e1fdaab1ad25aa1c08d7a36c97d45d74b98b89c3f78c6d2145f77face54a2c go1.25.0.illumos-amd64.tar.gz -8c602dd9d99bc9453b3995d20ce4baf382cc50855900a0ece5de9929df4a993a go1.25.0.linux-386.tar.gz -2852af0cb20a13139b3448992e69b868e50ed0f8a1e5940ee1de9e19a123b613 go1.25.0.linux-amd64.tar.gz -05de75d6994a2783699815ee553bd5a9327d8b79991de36e38b66862782f54ae go1.25.0.linux-arm64.tar.gz -a5a8f8198fcf00e1e485b8ecef9ee020778bf32a408a4e8873371bfce458cd09 go1.25.0.linux-armv6l.tar.gz -cab86b1cf761b1cb3bac86a8877cfc92e7b036fc0d3084123d77013d61432afc go1.25.0.linux-loong64.tar.gz -d66b6fb74c3d91b9829dc95ec10ca1f047ef5e89332152f92e136cf0e2da5be1 go1.25.0.linux-mips.tar.gz -4082e4381a8661bc2a839ff94ba3daf4f6cde20f8fb771b5b3d4762dc84198a2 go1.25.0.linux-mips64.tar.gz -70002c299ec7f7175ac2ef673b1b347eecfa54ae11f34416a6053c17f855afcc go1.25.0.linux-mips64le.tar.gz -b00a3a39eff099f6df9f1c7355bf28e4589d0586f42d7d4a394efb763d145a73 go1.25.0.linux-mipsle.tar.gz -df166f33bd98160662560a72ff0b4ba731f969a80f088922bddcf566a88c1ec1 go1.25.0.linux-ppc64.tar.gz -0f18a89e7576cf2c5fa0b487a1635d9bcbf843df5f110e9982c64df52a983ad0 go1.25.0.linux-ppc64le.tar.gz -c018ff74a2c48d55c8ca9b07c8e24163558ffec8bea08b326d6336905d956b67 go1.25.0.linux-riscv64.tar.gz -34e5a2e19f2292fbaf8783e3a241e6e49689276aef6510a8060ea5ef54eee408 go1.25.0.linux-s390x.tar.gz -f8586cdb7aa855657609a5c5f6dbf523efa00c2bbd7c76d3936bec80aa6c0aba go1.25.0.netbsd-386.tar.gz -ae8dc1469385b86a157a423bb56304ba45730de8a897615874f57dd096db2c2a go1.25.0.netbsd-amd64.tar.gz -1ff7e4cc764425fc9dd6825eaee79d02b3c7cafffbb3691687c8d672ade76cb7 go1.25.0.netbsd-arm.tar.gz -e1b310739f26724216aa6d7d7208c4031f9ff54c9b5b9a796ddc8bebcb4a5f16 go1.25.0.netbsd-arm64.tar.gz -4802a9b20e533da91adb84aab42e94aa56cfe3e5475d0550bed3385b182e69d8 go1.25.0.openbsd-386.tar.gz -c016cd984bebe317b19a4f297c4f50def120dc9788490540c89f28e42f1dabe1 go1.25.0.openbsd-amd64.tar.gz -a1e31d0bf22172ddde42edf5ec811ef81be43433df0948ece52fecb247ccfd8d go1.25.0.openbsd-arm.tar.gz -343ea8edd8c218196e15a859c6072d0dd3246fbbb168481ab665eb4c4140458d go1.25.0.openbsd-arm64.tar.gz -694c14da1bcaeb5e3332d49bdc2b6d155067648f8fe1540c5de8f3cf8e157154 go1.25.0.openbsd-ppc64.tar.gz -aa510ad25cf54c06cd9c70b6d80ded69cb20188ac6e1735655eef29ff7e7885f go1.25.0.openbsd-riscv64.tar.gz -46f8cef02086cf04bf186c5912776b56535178d4cb319cd19c9fdbdd29231986 go1.25.0.plan9-386.tar.gz -29b34391d84095e44608a228f63f2f88113a37b74a79781353ec043dfbcb427b go1.25.0.plan9-amd64.tar.gz -0a047107d13ebe7943aaa6d54b1d7bbd2e45e68ce449b52915a818da715799c2 go1.25.0.plan9-arm.tar.gz -9977f9e4351984364a3b2b78f8b88bfd1d339812356d5237678514594b7d3611 go1.25.0.solaris-amd64.tar.gz -df9f39db82a803af0db639e3613a36681ab7a42866b1384b3f3a1045663961a7 go1.25.0.windows-386.zip -afd9e0a8d2665ff122c8302bb4a3ce4a5331e4e630ddc388be1f9238adfa8fe3 go1.25.0.windows-386.msi -89efb4f9b30812eee083cc1770fdd2913c14d301064f6454851428f9707d190b go1.25.0.windows-amd64.zip -936bd87109da515f79d80211de5bc6cbda071f2cc577f7e6af1a9e754ea34819 go1.25.0.windows-amd64.msi -27bab004c72b3d7bd05a69b6ec0fc54a309b4b78cc569dd963d8b3ec28bfdb8c go1.25.0.windows-arm64.zip -357d030b217ff68e700b6cfc56097bc21ad493bb45b79733a052d112f5031ed9 go1.25.0.windows-arm64.msi +d010c109cee94d80efe681eab46bdea491ac906bf46583c32e9f0dbb0bd1a594 go1.25.1.src.tar.gz +1d622468f767a1b9fe1e1e67bd6ce6744d04e0c68712adc689748bbeccb126bb go1.25.1.darwin-amd64.tar.gz +68deebb214f39d542e518ebb0598a406ab1b5a22bba8ec9ade9f55fb4dd94a6c go1.25.1.darwin-arm64.tar.gz +d03cdcbc9bd8baf5cf028de390478e9e2b3e4d0afe5a6582dedc19bfe6a263b2 go1.25.1.linux-386.tar.gz +7716a0d940a0f6ae8e1f3b3f4f36299dc53e31b16840dbd171254312c41ca12e go1.25.1.linux-amd64.tar.gz +65a3e34fb2126f55b34e1edfc709121660e1be2dee6bdf405fc399a63a95a87d go1.25.1.linux-arm64.tar.gz +eb949be683e82a99e9861dafd7057e31ea40b161eae6c4cd18fdc0e8c4ae6225 go1.25.1.linux-armv6l.tar.gz +be13d5479b8c75438f2efcaa8c191fba3af684b3228abc9c99c7aa8502f34424 go1.25.1.windows-386.zip +4a974de310e7ee1d523d2fcedb114ba5fa75408c98eb3652023e55ccf3fa7cab go1.25.1.windows-amd64.zip +45ab4290adbd6ee9e7f18f0d57eaa9008fdbef590882778ed93eac3c8cca06c5 go1.25.1.aix-ppc64.tar.gz +2e3c1549bed3124763774d648f291ac42611232f48320ebbd23517c909c09b81 go1.25.1.dragonfly-amd64.tar.gz +dc0198dd4ec520e13f26798def8750544edf6448d8e9c43fd2a814e4885932af go1.25.1.freebsd-386.tar.gz +c4f1a7e7b258406e6f3b677ecdbd97bbb23ff9c0d44be4eb238a07d360f69ac8 go1.25.1.freebsd-amd64.tar.gz +7772fc5ff71ed39297ec0c1599fc54e399642c9b848eac989601040923b0de9c go1.25.1.freebsd-arm.tar.gz +5bb011d5d5b6218b12189f07aa0be618ab2002662fff1ca40afba7389735c207 go1.25.1.freebsd-arm64.tar.gz +ccac716240cb049bebfafcb7eebc3758512178a4c51fc26da9cc032035d850c8 go1.25.1.freebsd-riscv64.tar.gz +cc53910ffb9fcfdd988a9fa25b5423bae1cfa01b19616be646700e1f5453b466 go1.25.1.illumos-amd64.tar.gz +efe809f923bcedab44bf7be2b3af8d182b512b1bf9c07d302e0c45d26c8f56f3 go1.25.1.linux-loong64.tar.gz +c0de33679f6ed68991dc42dc4a602e74a666e3e166c1748ee1b5d1a7ea2ffbb2 go1.25.1.linux-mips.tar.gz +c270f7b0c0bdfbcd54fef4481227c40d41bb518f9ae38ee930870f04a0a6a589 go1.25.1.linux-mips64.tar.gz +80be871ba9c944f34d1868cdf5047e1cf2e1289fe08cdb90e2453d2f0d6965ae go1.25.1.linux-mips64le.tar.gz +9f09defa9bb22ebf2cde76162f40958564e57ce5c2b3649bc063bebcbc9294c1 go1.25.1.linux-mipsle.tar.gz +2c76b7d278c1d43ad19d478ad3f0f05e7b782b64b90870701b314fa48b5f43c6 go1.25.1.linux-ppc64.tar.gz +8b0c8d3ee5b1b5c28b6bd63dc4438792012e01d03b4bf7a61d985c87edab7d1f go1.25.1.linux-ppc64le.tar.gz +22fe934a9d0c9c57275716c55b92d46ebd887cec3177c9140705efa9f84ba1e2 go1.25.1.linux-riscv64.tar.gz +9cfe517ba423f59f3738ca5c3d907c103253cffbbcc2987142f79c5de8c1bf93 go1.25.1.linux-s390x.tar.gz +6af8a08353e76205d5b743dd7a3f0126684f96f62be0a31b75daf9837e512c46 go1.25.1.netbsd-386.tar.gz +e5d534ff362edb1bd8c8e10892b6a027c4c1482454245d1529167676498684c7 go1.25.1.netbsd-amd64.tar.gz +88bcf39254fdcea6a199c1c27d787831b652427ce60851ae9e41a3d7eb477f45 go1.25.1.netbsd-arm.tar.gz +d7c2eabe1d04ee47bcaea2816fdd90dbd25d90d4dfa756faa9786c788e4f3a4e go1.25.1.netbsd-arm64.tar.gz +14a2845977eb4dde11d929858c437a043467c427db87899935e90cee04a38d72 go1.25.1.openbsd-386.tar.gz +d27ac54b38a13a09c81e67c82ac70d387037341c85c3399291c73e13e83fdd8c go1.25.1.openbsd-amd64.tar.gz +0f4ab5f02500afa4befd51fed1e8b45e4d07ca050f641cc3acc76eaa4027b2c3 go1.25.1.openbsd-arm.tar.gz +d46c3bd156843656f7f3cb0dec27ea51cd926ec3f7b80744bf8156e67c1c812f go1.25.1.openbsd-arm64.tar.gz +c550514c67f22e409be10e40eace761e2e43069f4ef086ae6e60aac736c2b679 go1.25.1.openbsd-ppc64.tar.gz +8a09a8714a2556eb13fc1f10b7ce2553fcea4971e3330fc3be0efd24aab45734 go1.25.1.openbsd-riscv64.tar.gz +b0e1fefaf0c7abd71f139a54eee9767944aff5f0bc9d69c968234804884e552f go1.25.1.plan9-386.tar.gz +e94732c94f149690aa0ab11c26090577211b4a988137cb2c03ec0b54e750402e go1.25.1.plan9-amd64.tar.gz +7eb80e9de1e817d9089a54e8c7c5c8d8ed9e5fb4d4a012fc0f18fc422a484f0c go1.25.1.plan9-arm.tar.gz +1261dfad7c4953c0ab90381bc1242dc54e394db7485c59349428d532b2273343 go1.25.1.solaris-amd64.tar.gz +04bc3c078e9e904c4d58d6ac2532a5bdd402bd36a9ff0b5949b3c5e6006a05ee go1.25.1.windows-arm64.zip -# version:golangci 2.0.2 +# version:golangci 2.4.0 # https://github.com/golangci/golangci-lint/releases/ -# https://github.com/golangci/golangci-lint/releases/download/v2.0.2/ -a88cbdc86b483fe44e90bf2dcc3fec2af8c754116e6edf0aa6592cac5baa7a0e golangci-lint-2.0.2-darwin-amd64.tar.gz -664550e7954f5f4451aae99b4f7382c1a47039c66f39ca605f5d9af1a0d32b49 golangci-lint-2.0.2-darwin-arm64.tar.gz -bda0f0f27d300502faceda8428834a76ca25986f6d9fc2bd41d201c3ed73f08e golangci-lint-2.0.2-freebsd-386.tar.gz -1cbd0c7ade3fb027d61d38a646ec1b51be5846952b4b04a5330e7f4687f2270c golangci-lint-2.0.2-freebsd-amd64.tar.gz -1e828a597726198b2e35acdbcc5f3aad85244d79846d2d2bdb05241c5a535f9e golangci-lint-2.0.2-freebsd-armv6.tar.gz -848b49315dc5cddd0c9ce35e96ab33d584db0ea8fb57bcbf9784f1622bec0269 golangci-lint-2.0.2-freebsd-armv7.tar.gz -cabf9a6beab574c7f98581eb237919e580024759e3cdc05c4d516b044dce6770 golangci-lint-2.0.2-illumos-amd64.tar.gz -2fde80d15ed6527791f106d606120620e913c3a663c90a8596861d0a4461169e golangci-lint-2.0.2-linux-386.deb -804bc6e350a8c613aaa0a33d8d45414a80157b0ba1b2c2335ac859f85ad98ebd golangci-lint-2.0.2-linux-386.rpm -e64beb72fecf581e57d88ae3adb1c9d4bf022694b6bd92e3c8e460910bbdc37d golangci-lint-2.0.2-linux-386.tar.gz -9c55aed174d7a52bb1d4006b36e7edee9023631f6b814a80cb39c9860d6f75c3 golangci-lint-2.0.2-linux-amd64.deb -c55a2ef741a687b4c679696931f7fd4a467babd64c9457cf17bb9632fd1cecd1 golangci-lint-2.0.2-linux-amd64.rpm -89cc8a7810dc63b9a37900da03e37c3601caf46d42265d774e0f1a5d883d53e2 golangci-lint-2.0.2-linux-amd64.tar.gz -a3e78583c4e7ea1b63e82559f126bb3a5b12788676f158526752d53e67824b99 golangci-lint-2.0.2-linux-arm64.deb -bd5dd52b5c9f18aa7a2904eda9a9f91c628e98623fe70b7afcbb847e2de84422 golangci-lint-2.0.2-linux-arm64.rpm -789d5b91219ac68c2336f77d41cd7e33a910420594780f455893f8453d09595b golangci-lint-2.0.2-linux-arm64.tar.gz -534cd4c464a66178714ed68152c1ed7aa73e5700bf409e4ed1a8363adf96afca golangci-lint-2.0.2-linux-armv6.deb -cf7d02905a5fc80b96c9a64621693b4cc7337b1ce29986c19fd72608dafe66c5 golangci-lint-2.0.2-linux-armv6.rpm -a0d81cb527d8fe878377f2356b5773e219b0b91832a6b59e7b9bcf9a90fe0b0e golangci-lint-2.0.2-linux-armv6.tar.gz -dedd5be7fff8cba8fe15b658a59347ea90d7d02a9fff87f09c7687e6da05a8b6 golangci-lint-2.0.2-linux-armv7.deb -85521b6f3ad2f5a2bc9bfe14b9b08623f764964048f75ed6dfcfaf8eb7d57cc1 golangci-lint-2.0.2-linux-armv7.rpm -96471046c7780dda4ea680f65e92c2ef56ff58d40bcffaf6cfe9d6d48e3c27aa golangci-lint-2.0.2-linux-armv7.tar.gz -815d914a7738e4362466b2d11004e8618b696b49e8ace13df2c2b25f28fb1e17 golangci-lint-2.0.2-linux-loong64.deb -f16381e3d8a0f011b95e086d83d620248432b915d01f4beab4d29cfe4dc388b0 golangci-lint-2.0.2-linux-loong64.rpm -1bd8d7714f9c92db6a0f23bae89f39c85ba047bec8eeb42b8748d30ae3228d18 golangci-lint-2.0.2-linux-loong64.tar.gz -ea6e9d4aabb526aa298e47e8b026d8893d918c5eb919ba0ab403e315def74cc5 golangci-lint-2.0.2-linux-mips64.deb -519d8d53af83fdc9c25cc3fba8b663331ac22ef68131d4b0084cb6f425b6f79a golangci-lint-2.0.2-linux-mips64.rpm -80d655a0a1ac1b19dcef4b58fa2a7dadb646cc50ad08d460b5c53cdb421165e4 golangci-lint-2.0.2-linux-mips64.tar.gz -aa0e75384bb482c865d4dfc95d23ceb25666bf20461b67a832f0eea6670312ec golangci-lint-2.0.2-linux-mips64le.deb -f2a8b500fb69bdea1b01df6267aaa5218fa4a58aeb781c1a20d0d802fe465a52 golangci-lint-2.0.2-linux-mips64le.rpm -e66a0c0c9a275f02d27a7caa9576112622306f001d73dfc082cf1ae446fc1242 golangci-lint-2.0.2-linux-mips64le.tar.gz -e85ad51aac6428be2d8a37000d053697371a538a5bcbc1644caa7c5e77f6d0af golangci-lint-2.0.2-linux-ppc64le.deb -906798365eac1944af2a9b9a303e6fd49ec9043307bc681b7a96277f7f8beea5 golangci-lint-2.0.2-linux-ppc64le.rpm -f7f1a271b0af274d6c9ce000f5dc6e1fb194350c67bcc62494f96f791882ba92 golangci-lint-2.0.2-linux-ppc64le.tar.gz -eea8bf643a42bf05de9780530db22923e5ab0d588f0e173594dc6518f2a25d2a golangci-lint-2.0.2-linux-riscv64.deb -4ff40f9fe2954400836e2a011ba4744d00ffab5068a51368552dfce6aba3b81b golangci-lint-2.0.2-linux-riscv64.rpm -531d8f225866674977d630afbf0533eb02f9bec607fb13895f7a2cd7b2e0a648 golangci-lint-2.0.2-linux-riscv64.tar.gz -6f827647046c603f40d97ea5aadc6f48cd0bb5d19f7a3d56500c3b833d2a0342 golangci-lint-2.0.2-linux-s390x.deb -387a090e9576d19ca86aac738172e58e07c19f2784a13bb387f4f0d75fb9c8d3 golangci-lint-2.0.2-linux-s390x.rpm -57de1fb7722a9feb2d11ed0a007a93959d05b9db5929a392abc222e30012467e golangci-lint-2.0.2-linux-s390x.tar.gz -ed95e0492ea86bf79eb661f0334474b2a4255093685ff587eccd797c5a54db7e golangci-lint-2.0.2-netbsd-386.tar.gz -eab81d729778166415d349a80e568b2f2b3a781745a9be3212a92abb1e732daf golangci-lint-2.0.2-netbsd-amd64.tar.gz -d20add73f7c2de2c3b01ed4fd7b63ffcf0a6597d5ea228d1699e92339a3cd047 golangci-lint-2.0.2-netbsd-arm64.tar.gz -4e4f44e6057879cd62424ff1800a767d25a595c0e91d6d48809eea9186b4c739 golangci-lint-2.0.2-netbsd-armv6.tar.gz -51ec17b16d8743ae4098a0171f04f0ed4d64561e3051b982778b0e6c306a1b03 golangci-lint-2.0.2-netbsd-armv7.tar.gz -5482cf27b93fae1765c70ee2a95d4074d038e9dee61bdd61d017ce8893d3a4a8 golangci-lint-2.0.2-source.tar.gz -a35d8fdf3e14079a10880dbbb7586b46faec89be96f086b244b3e565aac80313 golangci-lint-2.0.2-windows-386.zip -fe4b946cc01366b989001215687003a9c4a7098589921f75e6228d6d8cffc15c golangci-lint-2.0.2-windows-amd64.zip -646bd9250ef8c771d85cd22fe8e6f2397ae39599179755e3bbfa9ef97ad44090 golangci-lint-2.0.2-windows-arm64.zip -ce1dc0bad6f8a61d64e6b3779eeb773479c175125d6f686b0e67ef9c8432d16e golangci-lint-2.0.2-windows-armv6.zip -92684a48faabe792b11ac27ca8b25551eff940b0a1e84ad7244e98b4994962db golangci-lint-2.0.2-windows-armv7.zip +# https://github.com/golangci/golangci-lint/releases/download/v2.4.0/ +7904ce63f79db44934939cf7a063086ea0ea98e9b19eba0a9d52ccdd0d21951c golangci-lint-2.4.0-darwin-amd64.tar.gz +cd4dd53fa09b6646baff5fd22b8c64d91db02c21c7496df27992d75d34feec59 golangci-lint-2.4.0-darwin-arm64.tar.gz +d58f426ebe14cc257e81562b4bf37a488ffb4ffbbb3ec73041eb3b38bb25c0e1 golangci-lint-2.4.0-freebsd-386.tar.gz +6ec4a6177fc6c0dd541fbcb3a7612845266d020d35cc6fa92959220cdf64ca39 golangci-lint-2.4.0-freebsd-amd64.tar.gz +4d473e3e71c01feaa915a0604fb35758b41284fb976cdeac3f842118d9ee7e17 golangci-lint-2.4.0-freebsd-armv6.tar.gz +58727746c6530801a3f9a702a5945556a5eb7e88809222536dd9f9d54cafaeff golangci-lint-2.4.0-freebsd-armv7.tar.gz +fbf28c662760e24c32f82f8d16dffdb4a82de7726a52ba1fad94f890c22997ea golangci-lint-2.4.0-illumos-amd64.tar.gz +a15a000a8981ef665e971e0f67e2acda9066a9e37a59344393b7351d8fb49c81 golangci-lint-2.4.0-linux-386.tar.gz +fae792524c04424c0ac369f5b8076f04b45cf29fc945a370e55d369a8dc11840 golangci-lint-2.4.0-linux-amd64.tar.gz +70ac11f55b80ec78fd3a879249cc9255121b8dfd7f7ed4fc46ed137f4abf17e7 golangci-lint-2.4.0-linux-arm64.tar.gz +4acdc40e5cebe99e4e7ced358a05b2e71789f409b41cb4f39bbb86ccfa14b1dc golangci-lint-2.4.0-linux-armv6.tar.gz +2a68749568fa22b4a97cb88dbea655595563c795076536aa6c087f7968784bf3 golangci-lint-2.4.0-linux-armv7.tar.gz +9e3369afb023711036dcb0b4f45c9fe2792af962fa1df050c9f6ac101a6c5d73 golangci-lint-2.4.0-linux-loong64.tar.gz +bb9143d6329be2c4dbfffef9564078e7da7d88e7dde6c829b6263d98e072229e golangci-lint-2.4.0-linux-mips64.tar.gz +5ad1765b40d56cd04d4afd805b3ba6f4bfd9b36181da93c31e9b17e483d8608d golangci-lint-2.4.0-linux-mips64le.tar.gz +918936fb9c0d5ba96bef03cf4348b03938634cfcced49be1e9bb29cb5094fa73 golangci-lint-2.4.0-linux-ppc64le.tar.gz +f7474c638e1fb67ebbdc654b55ca0125377ea0bc88e8fee8d964a4f24eacf828 golangci-lint-2.4.0-linux-riscv64.tar.gz +b617a9543997c8bfceaffa88a75d4e595030c6add69fba800c1e4d8f5fe253dd golangci-lint-2.4.0-linux-s390x.tar.gz +7db027b03a9ba328f795215b04f594036837bc7dd0dd7cd16776b02a6167981c golangci-lint-2.4.0-netbsd-386.tar.gz +52d8f9393f4313df0a62b752c37775e3af0b818e43e8dd28954351542d7c60bc golangci-lint-2.4.0-netbsd-amd64.tar.gz +5c0086027fb5a4af3829e530c8115db4b35d11afe1914322eef528eb8cd38c69 golangci-lint-2.4.0-netbsd-arm64.tar.gz +6b779d6ed1aed87cefe195cc11759902b97a76551b593312c6833f2635a3488f golangci-lint-2.4.0-netbsd-armv6.tar.gz +f00d1f4b7ec3468a0f9fffd0d9ea036248b029b7621cbc9a59c449ef94356d09 golangci-lint-2.4.0-netbsd-armv7.tar.gz +3ce671b0b42b58e35066493aab75a7e2826c9e079988f1ba5d814a4029faaf87 golangci-lint-2.4.0-windows-386.zip +003112f7a56746feaabf20b744054bf9acdf900c9e77176383623c4b1d76aaa9 golangci-lint-2.4.0-windows-amd64.zip +dc0c2092af5d47fc2cd31a1dfe7b4c7e765fab22de98bd21ef2ffcc53ad9f54f golangci-lint-2.4.0-windows-arm64.zip +0263d23e20a260cb1592d35e12a388f99efe2c51b3611fdc66fbd9db1fce664d golangci-lint-2.4.0-windows-armv6.zip +9403c03bf648e6313036e0273149d44bad1b9ad53889b6d00e4ccb842ba3c058 golangci-lint-2.4.0-windows-armv7.zip # This is the builder on PPA that will build Go itself (inception-y), don't modify! # diff --git a/build/ci.go b/build/ci.go index 3856f32925..c6f4f28c87 100644 --- a/build/ci.go +++ b/build/ci.go @@ -64,6 +64,11 @@ import ( ) var ( + goModules = []string{ + ".", + "./cmd/keeper", + } + // Files that end up in the geth*.zip archive. gethArchiveFiles = []string{ "COPYING", @@ -216,7 +221,7 @@ func doInstall(cmdline []string) { // Default: collect all 'main' packages in cmd/ and build those. packages := flag.Args() if len(packages) == 0 { - packages = build.FindMainPackages("./cmd") + packages = build.FindMainPackages(&tc, "./cmd/...") } // Do the build! @@ -295,6 +300,7 @@ func doTest(cmdline []string) { if *dlgo { tc.Root = build.DownloadGo(csdb) } + gotest := tc.Go("test") // CI needs a bit more time for the statetests (default 45m). @@ -322,18 +328,26 @@ func doTest(cmdline []string) { gotest.Args = append(gotest.Args, "-short") } - packages := []string{"./..."} - if len(flag.CommandLine.Args()) > 0 { - packages = flag.CommandLine.Args() + packages := flag.CommandLine.Args() + if len(packages) > 0 { + gotest.Args = append(gotest.Args, packages...) + build.MustRun(gotest) + return + } + + // No packages specified, run all tests for all modules. + gotest.Args = append(gotest.Args, "./...") + for _, mod := range goModules { + test := *gotest + test.Dir = mod + build.MustRun(&test) } - gotest.Args = append(gotest.Args, packages...) - build.MustRun(gotest) } // downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures. func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string { ext := ".tar.gz" - base := "fixtures_fusaka-devnet-3" + base := "fixtures_develop" archivePath := filepath.Join(cachedir, base+ext) if err := csdb.DownloadFileFromKnownURL(archivePath); err != nil { log.Fatal(err) @@ -351,40 +365,46 @@ func doCheckGenerate() { cachedir = flag.String("cachedir", "./build/cache", "directory for caching binaries.") tc = new(build.GoToolchain) ) - // Compute the origin hashes of all the files - var hashes map[string][32]byte - var err error - hashes, err = build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"}) - if err != nil { - log.Fatal("Error computing hashes", "err", err) - } // Run any go generate steps we might be missing var ( protocPath = downloadProtoc(*cachedir) protocGenGoPath = downloadProtocGenGo(*cachedir) ) - c := tc.Go("generate", "./...") pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")} - c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator))) - build.MustRun(c) - // Check if generate file hashes have changed - generated, err := build.HashFolder(".", []string{"tests/testdata", "build/cache", ".git"}) - if err != nil { - log.Fatalf("Error re-computing hashes: %v", err) - } - updates := build.DiffHashes(hashes, generated) - for _, file := range updates { - log.Printf("File changed: %s", file) - } - if len(updates) != 0 { - log.Fatal("One or more generated files were updated by running 'go generate ./...'") + for _, mod := range goModules { + // Compute the origin hashes of all the files + hashes, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"}) + if err != nil { + log.Fatal("Error computing hashes", "err", err) + } + + c := tc.Go("generate", "./...") + c.Env = append(c.Env, "PATH="+strings.Join(pathList, string(os.PathListSeparator))) + c.Dir = mod + build.MustRun(c) + // Check if generate file hashes have changed + generated, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"}) + if err != nil { + log.Fatalf("Error re-computing hashes: %v", err) + } + updates := build.DiffHashes(hashes, generated) + for _, file := range updates { + log.Printf("File changed: %s", file) + } + if len(updates) != 0 { + log.Fatal("One or more generated files were updated by running 'go generate ./...'") + } } fmt.Println("No stale files detected.") // Run go mod tidy check. - build.MustRun(tc.Go("mod", "tidy", "-diff")) + for _, mod := range goModules { + tidy := tc.Go("mod", "tidy", "-diff") + tidy.Dir = mod + build.MustRun(tidy) + } fmt.Println("No untidy module files detected.") } @@ -424,14 +444,30 @@ func doLint(cmdline []string) { cachedir = flag.String("cachedir", "./build/cache", "directory for caching golangci-lint binary.") ) flag.CommandLine.Parse(cmdline) - packages := []string{"./..."} - if len(flag.CommandLine.Args()) > 0 { - packages = flag.CommandLine.Args() - } linter := downloadLinter(*cachedir) - lflags := []string{"run", "--config", ".golangci.yml"} - build.MustRunCommandWithOutput(linter, append(lflags, packages...)...) + linter, err := filepath.Abs(linter) + if err != nil { + log.Fatal(err) + } + config, err := filepath.Abs(".golangci.yml") + if err != nil { + log.Fatal(err) + } + + lflags := []string{"run", "--config", config} + packages := flag.CommandLine.Args() + if len(packages) > 0 { + build.MustRunCommandWithOutput(linter, append(lflags, packages...)...) + } else { + // Run for all modules in workspace. + for _, mod := range goModules { + args := append(lflags, "./...") + lintcmd := exec.Command(linter, args...) + lintcmd.Dir = mod + build.MustRunWithOutput(lintcmd) + } + } fmt.Println("You have achieved perfection.") } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 47d00761f3..47327b6844 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -1133,7 +1133,10 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types // transmit the same tx but with correct sidecar from the good peer. var req *eth.GetPooledTransactionsPacket - req, err = readUntil[eth.GetPooledTransactionsPacket](context.Background(), conn) + ctx, cancel := context.WithTimeout(context.Background(), 12*time.Second) + defer cancel() + + req, err = readUntil[eth.GetPooledTransactionsPacket](ctx, conn) if err != nil { errc <- fmt.Errorf("reading pooled tx request failed: %v", err) return diff --git a/cmd/era/main.go b/cmd/era/main.go index 8b57fd695c..35a889d4dc 100644 --- a/cmd/era/main.go +++ b/cmd/era/main.go @@ -274,10 +274,10 @@ func checkAccumulator(e *era.Era) error { for it.Next() { // 1) next() walks the block index, so we're able to implicitly verify it. if it.Error() != nil { - return fmt.Errorf("error reading block %d: %w", it.Number(), err) + return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error()) } block, receipts, err := it.BlockAndReceipts() - if it.Error() != nil { + if err != nil { return fmt.Errorf("error reading block %d: %w", it.Number(), err) } // 2) recompute tx root and verify against header. @@ -294,6 +294,9 @@ func checkAccumulator(e *era.Era) error { td.Add(td, block.Difficulty()) tds = append(tds, new(big.Int).Set(td)) } + if it.Error() != nil { + return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error()) + } // 4+5) Verify accumulator and total difficulty. got, err := era.ComputeAccumulator(hashes, tds) if err != nil { diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index da1fb3701f..f1e65afe9c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -152,7 +151,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gasUsed = uint64(0) blobGasUsed = uint64(0) receipts = make(types.Receipts, 0) - txIndex = 0 ) gaspool.AddGas(pre.Env.GasLimit) vmContext := vm.BlockContext{ @@ -193,6 +191,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, Time: pre.Env.ParentTimestamp, ExcessBlobGas: pre.Env.ParentExcessBlobGas, BlobGasUsed: pre.Env.ParentBlobGasUsed, + BaseFee: pre.Env.ParentBaseFee, } header := &types.Header{ Time: pre.Env.Timestamp, @@ -250,24 +249,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, continue } } - statedb.SetTxContext(tx.Hash(), txIndex) + statedb.SetTxContext(tx.Hash(), len(receipts)) var ( snapshot = statedb.Snapshot() prevGas = gaspool.Gas() ) - if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil { - evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) - } - // (ret []byte, usedGas uint64, failed bool, err error) - msgResult, err := core.ApplyMessage(evm, msg, gaspool) + receipt, err := core.ApplyTransactionWithEVM(msg, gaspool, statedb, vmContext.BlockNumber, blockHash, pre.Env.Timestamp, tx, &gasUsed, evm) if err != nil { statedb.RevertToSnapshot(snapshot) log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) gaspool.SetGas(prevGas) - if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil { - evm.Config.Tracer.OnTxEnd(nil, err) - } continue } includedTxs = append(includedTxs, tx) @@ -275,50 +267,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError) } blobGasUsed += txBlobGas - gasUsed += msgResult.UsedGas - - // Receipt: - { - var root []byte - if chainConfig.IsByzantium(vmContext.BlockNumber) { - statedb.Finalise(true) - } else { - root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes() - } - - // Create a new receipt for the transaction, storing the intermediate root and - // gas used by the tx. - receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: gasUsed} - if msgResult.Failed() { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } - receipt.TxHash = tx.Hash() - receipt.GasUsed = msgResult.UsedGas - - // If the transaction created a contract, store the creation address in the receipt. - if msg.To == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) - } - - // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash(), vmContext.BlockNumber.Uint64(), blockHash, vmContext.Time) - receipt.Bloom = types.CreateBloom(receipt) - - // These three are non-consensus fields: - //receipt.BlockHash - //receipt.BlockNumber - receipt.TransactionIndex = uint(txIndex) - receipts = append(receipts, receipt) - if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxEnd != nil { - evm.Config.Tracer.OnTxEnd(receipt, nil) - } - } - - txIndex++ + receipts = append(receipts, receipt) } + statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) + // Add mining reward? (-1 means rewards are disabled) if miningReward >= 0 { // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases @@ -423,7 +376,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB sdb := state.NewDatabase(tdb, nil) statedb, _ := state.New(types.EmptyRootHash, sdb) for addr, a := range accounts { - statedb.SetCode(addr, a.Code) + statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified) statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance) for k, v := range a.Storage { diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index b2cf28353b..ebb3e04461 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -322,7 +322,7 @@ func runCmd(ctx *cli.Context) error { } } else { if len(code) > 0 { - prestate.SetCode(receiver, code) + prestate.SetCode(receiver, code, tracing.CodeChangeUnspecified) } execFunc = func() ([]byte, uint64, error) { // don't mutate the state! diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 46146be787..3c6fd90b47 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -296,6 +296,14 @@ func TestT8n(t *testing.T) { output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", }, + { // Osaka test, EIP-7918 blob gas with parent base fee + base: "./testdata/34", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Osaka", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, } { args := []string{"t8n"} args = append(args, tc.output.get()...) diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json index 50662f35ea..6537c9517d 100644 --- a/cmd/evm/testdata/1/exp.json +++ b/cmd/evm/testdata/1/exp.json @@ -29,7 +29,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5208", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/13/exp2.json b/cmd/evm/testdata/13/exp2.json index 6415a4f1f4..f716289cf7 100644 --- a/cmd/evm/testdata/13/exp2.json +++ b/cmd/evm/testdata/13/exp2.json @@ -17,7 +17,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x84d0", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" }, { @@ -31,7 +32,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x84d0", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x1" } ], diff --git a/cmd/evm/testdata/23/exp.json b/cmd/evm/testdata/23/exp.json index 7f36165e35..2d9cd492db 100644 --- a/cmd/evm/testdata/23/exp.json +++ b/cmd/evm/testdata/23/exp.json @@ -16,7 +16,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x520b", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x5", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/24/exp.json b/cmd/evm/testdata/24/exp.json index 8f380c662b..0dd552e112 100644 --- a/cmd/evm/testdata/24/exp.json +++ b/cmd/evm/testdata/24/exp.json @@ -32,7 +32,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0xa861", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" }, { @@ -45,7 +46,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5aa5", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x1" } ], diff --git a/cmd/evm/testdata/25/exp.json b/cmd/evm/testdata/25/exp.json index a674633762..3dac46aa60 100644 --- a/cmd/evm/testdata/25/exp.json +++ b/cmd/evm/testdata/25/exp.json @@ -28,7 +28,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5208", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json index b86c2d8def..15b29bc0ac 100644 --- a/cmd/evm/testdata/28/exp.json +++ b/cmd/evm/testdata/28/exp.json @@ -33,7 +33,10 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0xa865", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blobGasUsed": "0x20000", + "blobGasPrice": "0x1", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json index 7fbdc18283..69c8661aa8 100644 --- a/cmd/evm/testdata/29/exp.json +++ b/cmd/evm/testdata/29/exp.json @@ -31,7 +31,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5208", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json index 831c078591..807cdccfb4 100644 --- a/cmd/evm/testdata/3/exp.json +++ b/cmd/evm/testdata/3/exp.json @@ -29,7 +29,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x521f", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x5", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/30/exp.json b/cmd/evm/testdata/30/exp.json index a206c3bbdf..9861f5a071 100644 --- a/cmd/evm/testdata/30/exp.json +++ b/cmd/evm/testdata/30/exp.json @@ -30,7 +30,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5208", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" }, { @@ -44,7 +45,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x5208", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x1" } ], diff --git a/cmd/evm/testdata/33/exp.json b/cmd/evm/testdata/33/exp.json index ae82ef3efa..b40ca9fee2 100644 --- a/cmd/evm/testdata/33/exp.json +++ b/cmd/evm/testdata/33/exp.json @@ -48,7 +48,8 @@ "contractAddress": "0x0000000000000000000000000000000000000000", "gasUsed": "0x15fa9", "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockHash": "0x1337000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", "transactionIndex": "0x0" } ], diff --git a/cmd/evm/testdata/34/README.md b/cmd/evm/testdata/34/README.md new file mode 100644 index 0000000000..a18f85ca14 --- /dev/null +++ b/cmd/evm/testdata/34/README.md @@ -0,0 +1,6 @@ +This test verifies that Osaka fork blob gas calculation works correctly when +parentBaseFee is provided. It tests the EIP-7918 reserve price calculation +which requires parent.BaseFee to be properly set. + +Regression test for: nil pointer dereference when parent.BaseFee was not +included in the parent header during Osaka fork blob gas calculations. \ No newline at end of file diff --git a/cmd/evm/testdata/34/alloc.json b/cmd/evm/testdata/34/alloc.json new file mode 100644 index 0000000000..199de13285 --- /dev/null +++ b/cmd/evm/testdata/34/alloc.json @@ -0,0 +1,6 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1000000000000000000", + "nonce": "0x0" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/34/env.json b/cmd/evm/testdata/34/env.json new file mode 100644 index 0000000000..ae2bde5ef3 --- /dev/null +++ b/cmd/evm/testdata/34/env.json @@ -0,0 +1,18 @@ +{ + "currentCoinbase": "0x0000000000000000000000000000000000000000", + "currentDifficulty": "0x0", + "currentRandom": "0x0000000000000000000000000000000000000000000000000000000000000000", + "currentGasLimit": "0x5f5e100", + "currentNumber": "0x1", + "currentTimestamp": "0x1000", + "parentTimestamp": "0x0", + "currentBaseFee": "0x10", + "parentBaseFee": "0x0a", + "parentGasUsed": "0x0", + "parentGasLimit": "0x5f5e100", + "currentExcessBlobGas": "0x0", + "parentExcessBlobGas": "0x0", + "parentBlobGasUsed": "0x20000", + "parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "withdrawals": [] +} \ No newline at end of file diff --git a/cmd/evm/testdata/34/exp.json b/cmd/evm/testdata/34/exp.json new file mode 100644 index 0000000000..56d24a532e --- /dev/null +++ b/cmd/evm/testdata/34/exp.json @@ -0,0 +1,23 @@ +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1000000000000000000" + } + }, + "result": { + "stateRoot": "0x01c28492482a1a1f66224726ef1059a7036fce69d1d2c991b65cd013725d5742", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": null, + "receipts": [], + "gasUsed": "0x0", + "currentBaseFee": "0x10", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x0", + "blobGasUsed": "0x0", + "requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "requests": [] + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/34/txs.json b/cmd/evm/testdata/34/txs.json new file mode 100644 index 0000000000..0637a088a0 --- /dev/null +++ b/cmd/evm/testdata/34/txs.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index d0f4d6f81d..c5145bbfb7 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -59,6 +59,8 @@ var ( Flags: slices.Concat([]cli.Flag{ utils.CachePreimagesFlag, utils.OverrideOsaka, + utils.OverrideBPO1, + utils.OverrideBPO2, utils.OverrideVerkle, }, utils.DatabaseFlags), Description: ` @@ -108,6 +110,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.MetricsInfluxDBTokenFlag, utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBOrganizationFlag, + utils.StateSizeTrackingFlag, utils.TxLookupLimitFlag, utils.VMTraceFlag, utils.VMTraceJsonConfigFlag, @@ -273,6 +276,14 @@ func initGenesis(ctx *cli.Context) error { v := ctx.Uint64(utils.OverrideOsaka.Name) overrides.OverrideOsaka = &v } + if ctx.IsSet(utils.OverrideBPO1.Name) { + v := ctx.Uint64(utils.OverrideBPO1.Name) + overrides.OverrideBPO1 = &v + } + if ctx.IsSet(utils.OverrideBPO2.Name) { + v := ctx.Uint64(utils.OverrideBPO2.Name) + overrides.OverrideBPO2 = &v + } if ctx.IsSet(utils.OverrideVerkle.Name) { v := ctx.Uint64(utils.OverrideVerkle.Name) overrides.OverrideVerkle = &v @@ -281,7 +292,7 @@ func initGenesis(ctx *cli.Context) error { chaindb := utils.MakeChainDatabase(ctx, stack, false) defer chaindb.Close() - triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) + triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) defer triedb.Close() _, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) @@ -635,7 +646,7 @@ func dump(ctx *cli.Context) error { if err != nil { return err } - triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup + triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup defer triedb.Close() state, err := state.New(root, state.NewDatabase(triedb, nil)) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 96bd715e88..fcb315af97 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -209,7 +209,7 @@ func constructDevModeBanner(ctx *cli.Context, cfg gethConfig) string { 0x%x (10^49 ETH) `, cfg.Eth.Miner.PendingFeeRecipient) if cfg.Eth.Miner.PendingFeeRecipient == utils.DeveloperAddr { - devModeBanner += fmt.Sprintf(` + devModeBanner += fmt.Sprintf(` Private Key ------------------ 0x%x @@ -227,6 +227,14 @@ func makeFullNode(ctx *cli.Context) *node.Node { v := ctx.Uint64(utils.OverrideOsaka.Name) cfg.Eth.OverrideOsaka = &v } + if ctx.IsSet(utils.OverrideBPO1.Name) { + v := ctx.Uint64(utils.OverrideBPO1.Name) + cfg.Eth.OverrideBPO1 = &v + } + if ctx.IsSet(utils.OverrideBPO2.Name) { + v := ctx.Uint64(utils.OverrideBPO2.Name) + cfg.Eth.OverrideBPO2 = &v + } if ctx.IsSet(utils.OverrideVerkle.Name) { v := ctx.Uint64(utils.OverrideVerkle.Name) cfg.Eth.OverrideVerkle = &v diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 44a52521f0..c57add0656 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -524,7 +524,7 @@ func dbDumpTrie(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, true) defer db.Close() - triedb := utils.MakeTrieDatabase(ctx, db, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false) defer triedb.Close() var ( @@ -859,7 +859,7 @@ func inspectHistory(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, true) defer db.Close() - triedb := utils.MakeTrieDatabase(ctx, db, false, false, false) + triedb := utils.MakeTrieDatabase(ctx, stack, db, false, false, false) defer triedb.Close() var ( diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index 37fffecc30..d420c2d078 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -91,7 +91,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { have = censor(have, tStart, tEnd) want = censor(want, tStart, tEnd) if have != want { - t.Logf(nicediff([]byte(have), []byte(want))) + t.Log(nicediff([]byte(have), []byte(want))) t.Fatalf("format %v, line %d\nhave %v\nwant %v", format, i, have, want) } } @@ -142,7 +142,7 @@ func TestJsonLogging(t *testing.T) { } if !bytes.Equal(have, want) { // show an intelligent diff - t.Logf(nicediff(have, want)) + t.Log(nicediff(have, want)) t.Errorf("file content wrong") } } @@ -211,7 +211,7 @@ func TestFileOut(t *testing.T) { } if !bytes.Equal(have, want) { // show an intelligent diff - t.Logf(nicediff(have, want)) + t.Log(nicediff(have, want)) t.Errorf("file content wrong") } } @@ -231,7 +231,7 @@ func TestRotatingFileOut(t *testing.T) { } if !bytes.Equal(have, want) { // show an intelligent diff - t.Logf(nicediff(have, want)) + t.Log(nicediff(have, want)) t.Errorf("file content wrong") } } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2da5c43216..2465b52ad1 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -63,6 +63,8 @@ var ( utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideOsaka, + utils.OverrideBPO1, + utils.OverrideBPO2, utils.OverrideVerkle, utils.EnablePersonal, // deprecated utils.TxPoolLocalsFlag, @@ -133,6 +135,8 @@ var ( utils.VMEnableDebugFlag, utils.VMTraceFlag, utils.VMTraceJsonConfigFlag, + utils.VMWitnessStatsFlag, + utils.VMStatelessSelfValidationFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, utils.GpoBlocksFlag, @@ -180,6 +184,7 @@ var ( utils.RPCGlobalGasCapFlag, utils.RPCGlobalEVMTimeoutFlag, utils.RPCGlobalTxFeeCapFlag, + utils.RPCGlobalLogQueryLimit, utils.AllowUnprotectedTxs, utils.BatchRequestLimit, utils.BatchResponseMaxSize, @@ -200,6 +205,7 @@ var ( utils.MetricsInfluxDBTokenFlag, utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBOrganizationFlag, + utils.StateSizeTrackingFlag, } ) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index aa9ae7087f..7621dfa93c 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -217,7 +217,7 @@ func verifyState(ctx *cli.Context) error { log.Error("Failed to load head block") return errors.New("no head block") } - triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false) defer triedb.Close() var ( @@ -282,7 +282,7 @@ func traverseState(ctx *cli.Context) error { chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() - triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false) defer triedb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) @@ -391,7 +391,7 @@ func traverseRawState(ctx *cli.Context) error { chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() - triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false) defer triedb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) @@ -558,20 +558,14 @@ func dumpState(ctx *cli.Context) error { if err != nil { return err } - triedb := utils.MakeTrieDatabase(ctx, db, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false) defer triedb.Close() - snapConfig := snapshot.Config{ - CacheSize: 256, - Recovery: false, - NoBuild: true, - AsyncBuild: false, - } - snaptree, err := snapshot.New(snapConfig, db, triedb, root) + stateIt, err := utils.NewStateIterator(triedb, db, root) if err != nil { return err } - accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start)) + accIt, err := stateIt.AccountIterator(root, common.BytesToHash(conf.Start)) if err != nil { return err } @@ -605,7 +599,7 @@ func dumpState(ctx *cli.Context) error { if !conf.SkipStorage { da.Storage = make(map[common.Hash]string) - stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + stIt, err := stateIt.StorageIterator(root, accIt.Hash(), common.Hash{}) if err != nil { return err } @@ -640,7 +634,7 @@ func snapshotExportPreimages(ctx *cli.Context) error { chaindb := utils.MakeChainDatabase(ctx, stack, true) defer chaindb.Close() - triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false) + triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false) defer triedb.Close() var root common.Hash @@ -658,17 +652,11 @@ func snapshotExportPreimages(ctx *cli.Context) error { } root = headBlock.Root() } - snapConfig := snapshot.Config{ - CacheSize: 256, - Recovery: false, - NoBuild: true, - AsyncBuild: false, - } - snaptree, err := snapshot.New(snapConfig, chaindb, triedb, root) + stateIt, err := utils.NewStateIterator(triedb, chaindb, root) if err != nil { return err } - return utils.ExportSnapshotPreimages(chaindb, snaptree, ctx.Args().First(), root) + return utils.ExportSnapshotPreimages(chaindb, stateIt, ctx.Args().First(), root) } // checkAccount iterates the snap data layers, and looks up the given account diff --git a/cmd/keeper/1192c3_block.rlp b/cmd/keeper/1192c3_block.rlp new file mode 100644 index 0000000000..e788c2b8fd Binary files /dev/null and b/cmd/keeper/1192c3_block.rlp differ diff --git a/cmd/keeper/1192c3_witness.rlp b/cmd/keeper/1192c3_witness.rlp new file mode 100644 index 0000000000..4990f2299f Binary files /dev/null and b/cmd/keeper/1192c3_witness.rlp differ diff --git a/cmd/keeper/README.md b/cmd/keeper/README.md new file mode 100644 index 0000000000..4737a22674 --- /dev/null +++ b/cmd/keeper/README.md @@ -0,0 +1,69 @@ +# Keeper - geth as a zkvm guest + +Keeper command is a specialized tool for validating stateless execution of Ethereum blocks. It's designed to run as a zkvm guest. + +## Overview + +The keeper reads an RLP-encoded payload containing: +- A block to execute +- A witness with the necessary state data +- A chainID + +It then executes the block statelessly and validates that the computed state root and receipt root match the values in the block header. + +## Building Keeper + +The keeper uses build tags to compile platform-specific input methods and chain configurations: + +### Example Implementation + +See `getpayload_example.go` for a complete example with embedded Hoodi block data: + +```bash +# Build example with different chain configurations +go build -tags "example" ./cmd/keeper +``` + +### Ziren zkVM Implementation + +Build for the Ziren zkVM platform, which is a MIPS ISA-based zkvm: + +```bash +GOOS=linux GOARCH=mipsle GOMIPS=softfloat go build -tags "ziren" ./cmd/keeper +``` + +As an example runner, refer to https://gist.github.com/gballet/7b669a99eb3ab2b593324e3a76abd23d + +## Creating a Custom Platform Implementation + +To add support for a new platform (e.g., "myplatform"), create a new file with the appropriate build tag: + +### 1. Create `getinput_myplatform.go` + +```go +//go:build myplatform + +package main + +import ( + "github.com/ethereum/go-ethereum/params" + // ... other imports as needed +) + +// getInput returns the RLP-encoded payload +func getInput() []byte { + // Your platform-specific code to retrieve the RLP-encoded payload + // This might read from: + // - Memory-mapped I/O + // - Hardware registers + // - Serial port + // - Network interface + // - File system + + // The payload must be RLP-encoded and contain: + // - Block with transactions + // - Witness with parent headers and state data + + return encodedPayload +} +``` diff --git a/cmd/keeper/chainconfig.go b/cmd/keeper/chainconfig.go new file mode 100644 index 0000000000..c9859d450f --- /dev/null +++ b/cmd/keeper/chainconfig.go @@ -0,0 +1,38 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/params" +) + +// getChainConfig returns the appropriate chain configuration based on the chainID. +// Returns an error for unsupported chain IDs. +func getChainConfig(chainID uint64) (*params.ChainConfig, error) { + switch chainID { + case 0, params.MainnetChainConfig.ChainID.Uint64(): + return params.MainnetChainConfig, nil + case params.SepoliaChainConfig.ChainID.Uint64(): + return params.SepoliaChainConfig, nil + case params.HoodiChainConfig.ChainID.Uint64(): + return params.HoodiChainConfig, nil + default: + return nil, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} diff --git a/cmd/keeper/getpayload_example.go b/cmd/keeper/getpayload_example.go new file mode 100644 index 0000000000..683cc79248 --- /dev/null +++ b/cmd/keeper/getpayload_example.go @@ -0,0 +1,102 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build example + +package main + +import ( + _ "embed" + "fmt" + "os" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// ExtWitness is a witness RLP encoding for transferring across clients. +// This is taken from PR #32216 until it's merged. +// It contains block headers, contract codes, state nodes, and storage keys +// required for stateless execution verification. +type ExtWitness struct { + Headers []*types.Header `json:"headers"` + Codes []hexutil.Bytes `json:"codes"` + State []hexutil.Bytes `json:"state"` + Keys []hexutil.Bytes `json:"keys"` +} + +// This is taken from PR #32216 until it's merged +// fromExtWitness converts the consensus witness format into our internal one. +func fromExtWitness(ext *ExtWitness) (*stateless.Witness, error) { + w := &stateless.Witness{} + w.Headers = ext.Headers + + w.Codes = make(map[string]struct{}, len(ext.Codes)) + for _, code := range ext.Codes { + w.Codes[string(code)] = struct{}{} + } + w.State = make(map[string]struct{}, len(ext.State)) + for _, node := range ext.State { + w.State[string(node)] = struct{}{} + } + return w, nil +} + +//go:embed 1192c3_witness.rlp +var witnessRlp []byte + +//go:embed 1192c3_block.rlp +var blockRlp []byte + +// getInput is a platform-specific function that will recover the input payload +// and returns it as a slice. It is expected to be an RLP-encoded Payload structure +// that contains the witness and the block. +// This is a demo version, that is intended to run on a regular computer, so what +// it does is embed a small Hoodi block, encodes the Payload structure containing +// the block and its witness as RLP, and returns the encoding. +func getInput() []byte { + var block types.Block + err := rlp.DecodeBytes(blockRlp, &block) + if err != nil { + panic(err) + } + + var extwitness ExtWitness + err = rlp.DecodeBytes(witnessRlp, &extwitness) + if err != nil { + panic(err) + } + witness, err := fromExtWitness(&extwitness) + if err != nil { + panic(err) + } + + payload := Payload{ + ChainID: params.HoodiChainConfig.ChainID.Uint64(), + Block: &block, + Witness: witness, + } + + encoded, err := rlp.EncodeToBytes(payload) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to encode payload: %v\n", err) + os.Exit(20) + } + return encoded +} diff --git a/cmd/keeper/getpayload_ziren.go b/cmd/keeper/getpayload_ziren.go new file mode 100644 index 0000000000..11c5845bcc --- /dev/null +++ b/cmd/keeper/getpayload_ziren.go @@ -0,0 +1,31 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build ziren + +package main + +import ( + zkruntime "github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime" +) + +// getInput reads the input payload from the zkVM runtime environment. +// The zkVM host provides the RLP-encoded Payload structure containing +// the block and witness data through the runtime's input mechanism. +func getInput() []byte { + input := zkruntime.Read[[]byte]() + return input +} diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod new file mode 100644 index 0000000000..d1649da43f --- /dev/null +++ b/cmd/keeper/go.mod @@ -0,0 +1,49 @@ +module github.com/ethereum/go-ethereum/cmd/keeper + +go 1.24.0 + +require ( + github.com/ethereum/go-ethereum v0.0.0-00010101000000-000000000000 + github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 +) + +require ( + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.3 // indirect + github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/klauspost/cpuid/v2 v2.0.9 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.36.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) + +replace ( + github.com/ethereum/go-ethereum => ../../ + github.com/zkMIPS/zkMIPS/crates/go-runtime/zkm_runtime => github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 +) diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum new file mode 100644 index 0000000000..e3bc204ba8 --- /dev/null +++ b/cmd/keeper/go.sum @@ -0,0 +1,150 @@ +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= +github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= +github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= +github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5 h1:MxKlbmI7Dta6O6Nsc9OAer/rOltjoL11CVLMqCiYnxU= +github.com/weilzkm/zkMIPS/crates/go-runtime/zkvm_runtime v0.0.0-20250915074013-fbc07aa2c6f5/go.mod h1:zk/SUgiiVz2U1ufZ+yM2MHPbD93W25KH5zK3qAxXbT4= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cmd/keeper/main.go b/cmd/keeper/main.go new file mode 100644 index 0000000000..9b459f6f36 --- /dev/null +++ b/cmd/keeper/main.go @@ -0,0 +1,68 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "os" + "runtime/debug" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" +) + +// Payload represents the input data for stateless execution containing +// a block and its associated witness data for verification. +type Payload struct { + ChainID uint64 + Block *types.Block + Witness *stateless.Witness +} + +func init() { + debug.SetGCPercent(-1) // Disable garbage collection +} + +func main() { + input := getInput() + var payload Payload + rlp.DecodeBytes(input, &payload) + + chainConfig, err := getChainConfig(payload.ChainID) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get chain config: %v\n", err) + os.Exit(13) + } + vmConfig := vm.Config{} + + crossStateRoot, crossReceiptRoot, err := core.ExecuteStateless(chainConfig, vmConfig, payload.Block, payload.Witness) + if err != nil { + fmt.Fprintf(os.Stderr, "stateless self-validation failed: %v\n", err) + os.Exit(10) + } + if crossStateRoot != payload.Block.Root() { + fmt.Fprintf(os.Stderr, "stateless self-validation root mismatch (cross: %x local: %x)\n", crossStateRoot, payload.Block.Root()) + os.Exit(11) + } + if crossReceiptRoot != payload.Block.ReceiptHash() { + fmt.Fprintf(os.Stderr, "stateless self-validation receipt root mismatch (cross: %x local: %x)\n", crossReceiptRoot, payload.Block.ReceiptHash()) + os.Exit(12) + } +} diff --git a/build/tools/tools.go b/cmd/keeper/stubs.go similarity index 68% rename from build/tools/tools.go rename to cmd/keeper/stubs.go index e9e2241d2f..04a3bc735b 100644 --- a/build/tools/tools.go +++ b/cmd/keeper/stubs.go @@ -1,4 +1,4 @@ -// Copyright 2019 The go-ethereum Authors +// Copyright 2025 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,14 +14,13 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build tools -// +build tools +//go:build !example && !ziren -package tools +package main -import ( - // Tool imports for go:generate. - _ "github.com/fjl/gencodec" - _ "golang.org/x/tools/cmd/stringer" - _ "google.golang.org/protobuf/cmd/protoc-gen-go" -) +// getInput is a stub implementation for when no platform-specific build tags are set. +// This allows golangci-lint to typecheck the code without errors. +// The actual implementations are provided in platform-specific files. +func getInput() []byte { + panic("stub") +} diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index b332a060de..db7bd691d8 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -48,6 +48,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/triedb" "github.com/urfave/cli/v2" ) @@ -567,9 +568,64 @@ func ExportPreimages(db ethdb.Database, fn string) error { return nil } +// StateIterator is a temporary structure for traversing state in order. It serves +// as an aggregator for both path scheme and hash scheme implementations and should +// be removed once the hash scheme is fully deprecated. +type StateIterator struct { + scheme string + root common.Hash + triedb *triedb.Database + snapshots *snapshot.Tree +} + +// NewStateIterator constructs the state iterator with the specific root. +func NewStateIterator(triedb *triedb.Database, db ethdb.Database, root common.Hash) (*StateIterator, error) { + if triedb.Scheme() == rawdb.PathScheme { + return &StateIterator{ + scheme: rawdb.PathScheme, + root: root, + triedb: triedb, + }, nil + } + config := snapshot.Config{ + CacheSize: 256, + Recovery: false, + NoBuild: true, + AsyncBuild: false, + } + snapshots, err := snapshot.New(config, db, triedb, root) + if err != nil { + return nil, err + } + return &StateIterator{ + scheme: rawdb.HashScheme, + root: root, + triedb: triedb, + snapshots: snapshots, + }, nil +} + +// AccountIterator creates a new account iterator for the specified root hash and +// seeks to a starting account hash. +func (it *StateIterator) AccountIterator(root common.Hash, start common.Hash) (snapshot.AccountIterator, error) { + if it.scheme == rawdb.PathScheme { + return it.triedb.AccountIterator(root, start) + } + return it.snapshots.AccountIterator(root, start) +} + +// StorageIterator creates a new storage iterator for the specified root hash and +// account. The iterator will be moved to the specific start position. +func (it *StateIterator) StorageIterator(root common.Hash, accountHash common.Hash, start common.Hash) (snapshot.StorageIterator, error) { + if it.scheme == rawdb.PathScheme { + return it.triedb.StorageIterator(root, accountHash, start) + } + return it.snapshots.StorageIterator(root, accountHash, start) +} + // ExportSnapshotPreimages exports the preimages corresponding to the enumeration of // the snapshot for a given root. -func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error { +func ExportSnapshotPreimages(chaindb ethdb.Database, stateIt *StateIterator, fn string, root common.Hash) error { log.Info("Exporting preimages", "file", fn) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) @@ -602,7 +658,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn ) go func() { defer close(hashCh) - accIt, err := snaptree.AccountIterator(root, common.Hash{}) + accIt, err := stateIt.AccountIterator(root, common.Hash{}) if err != nil { log.Error("Failed to create account iterator", "error", err) return @@ -619,7 +675,7 @@ func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength} if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash { - stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + stIt, err := stateIt.StorageIterator(root, accIt.Hash(), common.Hash{}) if err != nil { log.Error("Failed to create storage iterator", "error", err) return diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index cbc1d925e4..c9da08578c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -24,7 +24,6 @@ import ( "encoding/json" "errors" "fmt" - "math" "math/big" "net" "net/http" @@ -248,6 +247,16 @@ var ( Usage: "Manually specify the Osaka fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverrideBPO1 = &cli.Uint64Flag{ + Name: "override.bpo1", + Usage: "Manually specify the bpo1 fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } + OverrideBPO2 = &cli.Uint64Flag{ + Name: "override.bpo2", + Usage: "Manually specify the bpo2 fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } OverrideVerkle = &cli.Uint64Flag{ Name: "override.verkle", Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", @@ -270,6 +279,12 @@ var ( Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", Category: flags.StateCategory, } + StateSizeTrackingFlag = &cli.BoolFlag{ + Name: "state.size-tracking", + Usage: "Enable state size tracking, retrieve state size with debug_stateSize.", + Value: ethconfig.Defaults.EnableStateSizeTracking, + Category: flags.StateCategory, + } StateHistoryFlag = &cli.Uint64Flag{ Name: "history.state", Usage: "Number of recent blocks to retain state history for, only relevant in state.scheme=path (default = 90,000 blocks, 0 = entire chain)", @@ -565,6 +580,16 @@ var ( Value: "{}", Category: flags.VMCategory, } + VMWitnessStatsFlag = &cli.BoolFlag{ + Name: "vmwitnessstats", + Usage: "Enable collection of witness trie access statistics (automatically enables witness generation)", + Category: flags.VMCategory, + } + VMStatelessSelfValidationFlag = &cli.BoolFlag{ + Name: "stateless-self-validation", + Usage: "Generate execution witnesses and self-check against them (testing purpose)", + Category: flags.VMCategory, + } // API options. RPCGlobalGasCapFlag = &cli.Uint64Flag{ Name: "rpc.gascap", @@ -584,6 +609,12 @@ var ( Value: ethconfig.Defaults.RPCTxFeeCap, Category: flags.APICategory, } + RPCGlobalLogQueryLimit = &cli.IntFlag{ + Name: "rpc.logquerylimit", + Usage: "Maximum number of alternative addresses or topics allowed per search position in eth_getLogs filter criteria (0 = no cap)", + Value: ethconfig.Defaults.LogQueryLimit, + Category: flags.APICategory, + } // Authenticated RPC HTTP settings AuthListenFlag = &cli.StringFlag{ Name: "authrpc.addr", @@ -1588,7 +1619,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } // Ensure Go's GC ignores the database cache for trigger percentage cache := ctx.Int(CacheFlag.Name) - gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + gogc := max(20, min(100, 100/(float64(cache)/1024))) log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) godebug.SetGCPercent(int(gogc)) @@ -1683,6 +1714,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(CacheLogSizeFlag.Name) { cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name) } + if ctx.IsSet(RPCGlobalLogQueryLimit.Name) { + cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name) + } if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 { // If snap-sync is requested, this flag is also required if cfg.SyncMode == ethconfig.SnapSync { @@ -1701,6 +1735,16 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(VMEnableDebugFlag.Name) { cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name) } + if ctx.IsSet(VMWitnessStatsFlag.Name) { + cfg.EnableWitnessStats = ctx.Bool(VMWitnessStatsFlag.Name) + } + if ctx.IsSet(VMStatelessSelfValidationFlag.Name) { + cfg.StatelessSelfValidation = ctx.Bool(VMStatelessSelfValidationFlag.Name) + } + // Auto-enable StatelessSelfValidation when witness stats are enabled + if ctx.Bool(VMWitnessStatsFlag.Name) { + cfg.StatelessSelfValidation = true + } if ctx.IsSet(RPCGlobalGasCapFlag.Name) { cfg.RPCGasCap = ctx.Uint64(RPCGlobalGasCapFlag.Name) @@ -1726,6 +1770,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EthDiscoveryURLs = SplitAndTrim(urls) } } + if ctx.Bool(StateSizeTrackingFlag.Name) { + cfg.EnableStateSizeTracking = true + } // Override any default configs for hard coded networks. switch { case ctx.Bool(MainnetFlag.Name): @@ -1893,11 +1940,15 @@ func MakeBeaconLightConfig(ctx *cli.Context) bparams.ClientConfig { } else { Fatalf("Could not parse --%s: %v", BeaconGenesisRootFlag.Name, err) } - configFile := ctx.String(BeaconConfigFlag.Name) - if err := config.ChainConfig.LoadForks(configFile); err != nil { - Fatalf("Could not load beacon chain config '%s': %v", configFile, err) + configPath := ctx.String(BeaconConfigFlag.Name) + file, err := os.ReadFile(configPath) + if err != nil { + Fatalf("failed to read beacon chain config file '%s': %v", configPath, err) } - log.Info("Using custom beacon chain config", "file", configFile) + if err := config.ChainConfig.LoadForks(file); err != nil { + Fatalf("Could not load beacon chain config '%s': %v", configPath, err) + } + log.Info("Using custom beacon chain config", "file", configPath) } else { if ctx.IsSet(BeaconGenesisRootFlag.Name) { Fatalf("Genesis root is specified but custom beacon chain config is missing") @@ -1988,7 +2039,8 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst // RegisterFilterAPI adds the eth log filtering RPC API to the node. func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem { filterSystem := filters.NewFilterSystem(backend, filters.Config{ - LogCacheSize: ethcfg.FilterLogCacheSize, + LogCacheSize: ethcfg.FilterLogCacheSize, + LogQueryLimit: ethcfg.LogQueryLimit, }) stack.RegisterAPIs([]rpc.API{{ Namespace: "eth", @@ -2208,6 +2260,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh // - DATADIR/triedb/merkle.journal // - DATADIR/triedb/verkle.journal TrieJournalDirectory: stack.ResolvePath("triedb"), + + // Enable state size tracking if enabled + StateSizeTracking: ctx.Bool(StateSizeTrackingFlag.Name), } if options.ArchiveMode && !options.Preimages { options.Preimages = true @@ -2231,6 +2286,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh } vmcfg := vm.Config{ EnablePreimageRecording: ctx.Bool(VMEnableDebugFlag.Name), + EnableWitnessStats: ctx.Bool(VMWitnessStatsFlag.Name), + StatelessSelfValidation: ctx.Bool(VMStatelessSelfValidationFlag.Name) || ctx.Bool(VMWitnessStatsFlag.Name), } if ctx.IsSet(VMTraceFlag.Name) { if name := ctx.String(VMTraceFlag.Name); name != "" { @@ -2269,7 +2326,7 @@ func MakeConsolePreloads(ctx *cli.Context) []string { } // MakeTrieDatabase constructs a trie database based on the configured scheme. -func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database { +func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database { config := &triedb.Config{ Preimages: preimage, IsVerkle: isVerkle, @@ -2285,10 +2342,13 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read config.HashDB = hashdb.Defaults return triedb.NewDatabase(disk, config) } + var pathConfig pathdb.Config if readOnly { - config.PathDB = pathdb.ReadOnly + pathConfig = *pathdb.ReadOnly } else { - config.PathDB = pathdb.Defaults + pathConfig = *pathdb.Defaults } + pathConfig.JournalDirectory = stack.ResolvePath("triedb") + config.PathDB = &pathConfig return triedb.NewDatabase(disk, config) } diff --git a/common/hexutil/json_test.go b/common/hexutil/json_test.go index a014438458..3dc19680f9 100644 --- a/common/hexutil/json_test.go +++ b/common/hexutil/json_test.go @@ -408,7 +408,6 @@ func TestUnmarshalFixedUnprefixedText(t *testing.T) { {input: "0x2", wantErr: ErrOddLength}, {input: "2", wantErr: ErrOddLength}, {input: "4444", wantErr: errors.New("hex string has length 4, want 8 for x")}, - {input: "4444", wantErr: errors.New("hex string has length 4, want 8 for x")}, // check that output is not modified for partially correct input {input: "444444gg", wantErr: ErrSyntax, want: []byte{0, 0, 0, 0}}, {input: "0x444444gg", wantErr: ErrSyntax, want: []byte{0, 0, 0, 0}}, diff --git a/common/math/integer_test.go b/common/math/integer_test.go index 4643a43f20..feb761c6dc 100644 --- a/common/math/integer_test.go +++ b/common/math/integer_test.go @@ -110,7 +110,7 @@ func TestMustParseUint64(t *testing.T) { func TestMustParseUint64Panic(t *testing.T) { defer func() { if recover() == nil { - t.Error("MustParseBig should've panicked") + t.Error("MustParseUint64 should've panicked") } }() MustParseUint64("ggg") diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 196cbc857c..84926c3d0b 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -71,16 +71,6 @@ func New(ethone consensus.Engine) *Beacon { return &Beacon{ethone: ethone} } -// isPostMerge reports whether the given block number is assumed to be post-merge. -// Here we check the MergeNetsplitBlock to allow configuring networks with a PoW or -// PoA chain for unit testing purposes. -func isPostMerge(config *params.ChainConfig, blockNum uint64, timestamp uint64) bool { - mergedAtGenesis := config.TerminalTotalDifficulty != nil && config.TerminalTotalDifficulty.Sign() == 0 - return mergedAtGenesis || - config.MergeNetsplitBlock != nil && blockNum >= config.MergeNetsplitBlock.Uint64() || - config.ShanghaiTime != nil && timestamp >= *config.ShanghaiTime -} - // Author implements consensus.Engine, returning the verified author of the block. func (beacon *Beacon) Author(header *types.Header) (common.Address, error) { if !beacon.IsPoSHeader(header) { @@ -328,7 +318,7 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [ // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the beacon protocol. The changes are done inline. func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { - if !isPostMerge(chain.Config(), header.Number.Uint64(), header.Time) { + if !chain.Config().IsPostMerge(header.Number.Uint64(), header.Time) { return beacon.ethone.Prepare(chain, header) } header.Difficulty = beaconDifficulty @@ -442,7 +432,7 @@ func (beacon *Beacon) SealHash(header *types.Header) common.Hash { // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { - if !isPostMerge(chain.Config(), parent.Number.Uint64()+1, time) { + if !chain.Config().IsPostMerge(parent.Number.Uint64()+1, time) { return beacon.ethone.CalcDifficulty(chain, time, parent) } return beaconDifficulty diff --git a/core/bench_test.go b/core/bench_test.go index 2830022662..932188f8e2 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -301,7 +301,7 @@ func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uin func benchWriteChain(b *testing.B, full bool, count uint64) { genesis := &Genesis{Config: params.AllEthashProtocolChanges} - for i := 0; i < b.N; i++ { + for b.Loop() { pdb, err := pebble.New(b.TempDir(), 1024, 128, "", false) if err != nil { b.Fatalf("error opening database: %v", err) @@ -326,9 +326,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { db.Close() options := DefaultConfig().WithArchive(true) b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { pdb, err = pebble.New(dir, 1024, 128, "", false) if err != nil { b.Fatalf("error opening database: %v", err) diff --git a/core/blockchain.go b/core/blockchain.go index 5205483af9..30f3da3004 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "io" - "math" "math/big" "runtime" "slices" @@ -168,10 +167,13 @@ type BlockChainConfig struct { TrieNoAsyncFlush bool // Whether the asynchronous buffer flushing is disallowed TrieJournalDirectory string // Directory path to the journal used for persisting trie data across node restarts - Preimages bool // Whether to store preimage of trie key to the disk - StateHistory uint64 // Number of blocks from head whose state histories are reserved. - StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top - ArchiveMode bool // Whether to enable the archive mode + Preimages bool // Whether to store preimage of trie key to the disk + StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top + ArchiveMode bool // Whether to enable the archive mode + + // Number of blocks from the chain head for which state histories are retained. + // If set to 0, all state histories across the entire chain will be retained; + StateHistory uint64 // State snapshot related options SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory @@ -193,6 +195,9 @@ type BlockChainConfig struct { // If the value is zero, all transactions of the entire chain will be indexed. // If the value is -1, indexing is disabled. TxLookupLimit int64 + + // StateSizeTracking indicates whether the state size tracking is enabled. + StateSizeTracking bool } // DefaultConfig returns the default config. @@ -330,6 +335,7 @@ type BlockChain struct { prefetcher Prefetcher processor Processor // Block transaction processor interface logger *tracing.Hooks + stateSizer *state.SizeTracker // State size tracking lastForkReadyAlert time.Time // Last time there was a fork readiness print out } @@ -523,6 +529,17 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, if bc.cfg.TxLookupLimit >= 0 { bc.txIndexer = newTxIndexer(uint64(bc.cfg.TxLookupLimit), bc) } + + // Start state size tracker + if bc.cfg.StateSizeTracking { + stateSizer, err := state.NewSizeTracker(bc.db, bc.triedb) + if err == nil { + bc.stateSizer = stateSizer + log.Info("Enabled state size metrics") + } else { + log.Info("Failed to setup size tracker", "err", err) + } + } return bc, nil } @@ -1249,6 +1266,10 @@ func (bc *BlockChain) stopWithoutSaving() { // Signal shutdown to all goroutines. bc.InterruptInsert(true) + // Stop state size tracker + if bc.stateSizer != nil { + bc.stateSizer.Stop() + } // Now wait for all chain modifications to end and persistent goroutines to exit. // // Note: Close waits for the mutex to become available, i.e. any running chain @@ -1583,10 +1604,14 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. log.Crit("Failed to write block into disk", "err", err) } // Commit all cached state changes into underlying memory database. - root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time())) + root, stateUpdate, err := statedb.CommitWithUpdate(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time())) if err != nil { return err } + // Emit the state update to the state sizestats if it's active + if bc.stateSizer != nil { + bc.stateSizer.Notify(stateUpdate) + } // If node is running in path mode, skip explicit gc operation // which is unnecessary in this mode. if bc.triedb.Scheme() == rawdb.PathScheme { @@ -1881,7 +1906,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness } // The traced section of block import. start := time.Now() - res, err := bc.processBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1) + res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1) if err != nil { return nil, it.index, err } @@ -1947,9 +1972,13 @@ type blockProcessingResult struct { witness *stateless.Witness } -// processBlock executes and validates the given block. If there was no error +func (bpr *blockProcessingResult) Witness() *stateless.Witness { + return bpr.witness +} + +// ProcessBlock executes and validates the given block. If there was no error // it writes the block and associated state to database. -func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) { +func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) { var ( err error startTime = time.Now() @@ -2127,7 +2156,7 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s } // Report the collected witness statistics if witnessStats != nil { - witnessStats.ReportMetrics() + witnessStats.ReportMetrics(block.NumberU64()) } // Update the metrics touched during block commit @@ -2633,13 +2662,11 @@ func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err er // logForkReadiness will write a log when a future fork is scheduled, but not // active. This is useful so operators know their client is ready for the fork. func (bc *BlockChain) logForkReadiness(block *types.Block) { - config := bc.Config() - current, last := config.LatestFork(block.Time()), config.LatestFork(math.MaxUint64) + current := bc.Config().LatestFork(block.Time()) - // Short circuit if the timestamp of the last fork is undefined, - // or if the network has already passed the last configured fork. - t := config.Timestamp(last) - if t == nil || current >= last { + // Short circuit if the timestamp of the last fork is undefined. + t := bc.Config().Timestamp(current + 1) + if t == nil { return } at := time.Unix(int64(*t), 0) @@ -2649,7 +2676,7 @@ func (bc *BlockChain) logForkReadiness(block *types.Block) { // - Enough time has passed since last alert now := time.Now() if now.Before(at) && now.After(bc.lastForkReadyAlert.Add(forkReadyInterval)) { - log.Info("Ready for fork activation", "fork", last, "date", at.Format(time.RFC822), + log.Info("Ready for fork activation", "fork", current+1, "date", at.Format(time.RFC822), "remaining", time.Until(at).Round(time.Second), "timestamp", at.Unix()) bc.lastForkReadyAlert = time.Now() } @@ -2788,3 +2815,8 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } + +// StateSizer returns the state size tracker, or nil if it's not initialized +func (bc *BlockChain) StateSizer() *state.SizeTracker { + return bc.stateSizer +} diff --git a/core/filtermaps/math.go b/core/filtermaps/math.go index 33ac07f721..68fd6debd6 100644 --- a/core/filtermaps/math.go +++ b/core/filtermaps/math.go @@ -22,7 +22,7 @@ import ( "fmt" "hash/fnv" "math" - "sort" + "slices" "github.com/ethereum/go-ethereum/common" ) @@ -245,7 +245,7 @@ func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue co panic("potentialMatches: insufficient list of row alternatives") } } - sort.Sort(results) + slices.Sort(results) // remove duplicates j := 0 for i, match := range results { @@ -260,12 +260,7 @@ func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue co // potentialMatches is a strictly monotonically increasing list of log value // indices in the range of a filter map that are potential matches for certain // filter criteria. -// potentialMatches implements sort.Interface. // Note that nil is used as a wildcard and therefore means that all log value // indices in the filter map range are potential matches. If there are no // potential matches in the given map's range then an empty slice should be used. type potentialMatches []uint64 - -func (p potentialMatches) Len() int { return len(p) } -func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] } -func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 413e4d77a8..dc6e6fe817 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -95,8 +95,14 @@ func TestCreation(t *testing.T) { {1735372, 1706655071, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // Last Shanghai block {1735372, 1706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // First Cancun block {1735372, 1741159775, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // Last Cancun block - {1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // First Prague block - {1735372, 2741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // Future Prague block + {1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // First Prague block + {1735372, 1760427359, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // Last Prague block + {1735372, 1760427360, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // First Osaka block + {1735372, 1761017183, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // Last Osaka block + {1735372, 1761017184, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // First BPO1 block + {1735372, 1761607007, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // Last BPO1 block + {1735372, 1761607008, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // First BPO2 block + {1735372, 2000000000, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // Future BPO2 block }, }, // Holesky test cases @@ -110,8 +116,14 @@ func TestCreation(t *testing.T) { {123, 1707305663, ID{Hash: checksumToBytes(0xfd4f016b), Next: 1707305664}}, // Last Shanghai block {123, 1707305664, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // First Cancun block {123, 1740434111, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // Last Cancun block - {123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // First Prague block - {123, 2740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // Future Prague block + {123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // First Prague block + {123, 1759308479, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // Last Prague block + {123, 1759308480, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // First Osaka block + {123, 1759799999, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // Last Osaka block + {123, 1759800000, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // First BPO1 block + {123, 1760389823, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // Last BPO1 block + {123, 1760389824, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // First BPO2 block + {123, 2000000000, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // Future BPO1 block }, }, // Hoodi test cases @@ -121,8 +133,14 @@ func TestCreation(t *testing.T) { []testcase{ {0, 0, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris, Shanghai, Cancun block {123, 1742999831, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Last Cancun block - {123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // First Prague block - {123, 2740434112, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // Future Prague block + {123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // First Prague block + {123, 1761677591, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // Last Prague block + {123, 1761677592, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // First Osaka block + {123, 1762365719, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // Last Osaka block + {123, 1762365720, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // First BPO1 block + {123, 1762955543, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // Last BPO1 block + {123, 1762955544, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // First BPO2 block + {123, 2000000000, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // Future BPO2 block }, }, } diff --git a/core/genesis.go b/core/genesis.go index f1a620da57..2fd044c70a 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -153,7 +153,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) { if account.Balance != nil { statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) } - statedb.SetCode(addr, account.Code) + statedb.SetCode(addr, account.Code, tracing.CodeChangeGenesis) statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis) for key, value := range account.Storage { statedb.SetState(addr, key, value) @@ -179,7 +179,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e // already captures the allocations. statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) } - statedb.SetCode(addr, account.Code) + statedb.SetCode(addr, account.Code, tracing.CodeChangeGenesis) statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis) for key, value := range account.Storage { statedb.SetState(addr, key, value) @@ -259,6 +259,8 @@ func (e *GenesisMismatchError) Error() string { // ChainOverrides contains the changes to chain config. type ChainOverrides struct { OverrideOsaka *uint64 + OverrideBPO1 *uint64 + OverrideBPO2 *uint64 OverrideVerkle *uint64 } @@ -270,6 +272,12 @@ func (o *ChainOverrides) apply(cfg *params.ChainConfig) error { if o.OverrideOsaka != nil { cfg.OsakaTime = o.OverrideOsaka } + if o.OverrideBPO1 != nil { + cfg.BPO1Time = o.OverrideBPO1 + } + if o.OverrideBPO2 != nil { + cfg.BPO2Time = o.OverrideBPO2 + } if o.OverrideVerkle != nil { cfg.VerkleTime = o.OverrideVerkle } @@ -514,6 +522,11 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { if head.BlobGasUsed == nil { head.BlobGasUsed = new(uint64) } + } else { + if g.ExcessBlobGas != nil { + log.Warn("Invalid genesis, unexpected ExcessBlobGas set before Cancun, allowing it for testing purposes") + head.ExcessBlobGas = g.ExcessBlobGas + } } if conf.IsPrague(num, g.Timestamp) { head.RequestsHash = &types.EmptyRequestsHash diff --git a/core/overlay/state_transition.go b/core/overlay/state_transition.go index 90b5c9431a..67ca0f9671 100644 --- a/core/overlay/state_transition.go +++ b/core/overlay/state_transition.go @@ -60,6 +60,7 @@ func (ts *TransitionState) Copy() *TransitionState { CurrentSlotHash: ts.CurrentSlotHash, CurrentPreimageOffset: ts.CurrentPreimageOffset, StorageProcessed: ts.StorageProcessed, + BaseRoot: ts.BaseRoot, } if ts.CurrentAccountAddress != nil { addr := *ts.CurrentAccountAddress diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 196f3dac8f..819788b4da 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -246,7 +246,7 @@ func TestBadBlockStorage(t *testing.T) { } for i := 0; i < len(badBlocks)-1; i++ { if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() { - t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64()) + t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, badBlocks[i].NumberU64(), i+1, badBlocks[i+1].NumberU64()) } } @@ -511,7 +511,7 @@ func TestWriteAncientHeaderChain(t *testing.T) { t.Fatalf("unexpected body returned") } if blob := ReadReceiptsRLP(db, header.Hash(), header.Number.Uint64()); len(blob) != 0 { - t.Fatalf("unexpected body returned") + t.Fatalf("unexpected receipts returned") } } } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 859566f722..6996031be2 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -174,16 +174,3 @@ func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) { log.Warn("Failed to write unclean-shutdown marker", "err", err) } } - -// ReadTransitionStatus retrieves the eth2 transition status from the database -func ReadTransitionStatus(db ethdb.KeyValueReader) []byte { - data, _ := db.Get(transitionStatusKey) - return data -} - -// WriteTransitionStatus stores the eth2 transition status to the database -func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) { - if err := db.Put(transitionStatusKey, data); err != nil { - log.Crit("Failed to store the eth2 transition status", "err", err) - } -} diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e154ab527b..7d8b266c15 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -150,7 +150,7 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c if len(blob) == 0 { return false } - return crypto.Keccak256Hash(blob) == hash // exists but not match + return crypto.Keccak256Hash(blob) == hash // exist and match default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } @@ -173,7 +173,7 @@ func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash return nil } if crypto.Keccak256Hash(blob) != hash { - return nil // exists but not match + return nil // exist but not match } return blob default: diff --git a/core/rawdb/ancienttest/testsuite.go b/core/rawdb/ancienttest/testsuite.go index e33e768947..7512c1f44b 100644 --- a/core/rawdb/ancienttest/testsuite.go +++ b/core/rawdb/ancienttest/testsuite.go @@ -48,12 +48,16 @@ func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { ) defer db.Close() - db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < len(data); i++ { - op.AppendRaw("a", uint64(i), data[i]) + if err := op.AppendRaw("a", uint64(i), data[i]); err != nil { + return err + } } return nil - }) + }); err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } db.TruncateTail(10) db.TruncateHead(90) @@ -109,12 +113,16 @@ func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { ) defer db.Close() - db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), data[i]) + if err := op.AppendRaw("a", uint64(i), data[i]); err != nil { + return err + } } return nil - }) + }); err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } db.TruncateTail(10) db.TruncateHead(90) @@ -189,7 +197,9 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { // The ancient write to tables should be aligned _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), dataA[i]) + if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil { + return err + } } return nil }) @@ -200,8 +210,12 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { // Test normal ancient write size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), dataA[i]) - op.AppendRaw("b", uint64(i), dataB[i]) + if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil { + return err + } + if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil { + return err + } } return nil }) @@ -217,8 +231,12 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { db.TruncateHead(90) _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 90; i < 100; i++ { - op.AppendRaw("a", uint64(i), dataA[i]) - op.AppendRaw("b", uint64(i), dataB[i]) + if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil { + return err + } + if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil { + return err + } } return nil }) @@ -227,11 +245,15 @@ func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { } // Write should work after truncating everything - db.TruncateTail(0) + db.TruncateHead(0) _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), dataA[i]) - op.AppendRaw("b", uint64(i), dataB[i]) + if err := op.AppendRaw("a", uint64(i), dataA[i]); err != nil { + return err + } + if err := op.AppendRaw("b", uint64(i), dataB[i]); err != nil { + return err + } } return nil }) @@ -245,14 +267,18 @@ func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { defer db.Close() // We write 100 zero-bytes to the freezer and immediately mutate the slice - db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { data := make([]byte, 100) - op.AppendRaw("a", uint64(0), data) + if err := op.AppendRaw("a", uint64(0), data); err != nil { + return err + } for i := range data { data[i] = 0xff } return nil - }) + }); err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } // Now read it. data, err := db.Ancient("a", uint64(0)) if err != nil { @@ -275,23 +301,31 @@ func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.R ) defer db.Close() - db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), data[i]) + if err := op.AppendRaw("a", uint64(i), data[i]); err != nil { + return err + } } return nil - }) + }); err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } db.TruncateTail(10) db.TruncateHead(90) // Ancient write should work after resetting db.Reset() - db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + if _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { for i := 0; i < 100; i++ { - op.AppendRaw("a", uint64(i), data[i]) + if err := op.AppendRaw("a", uint64(i), data[i]); err != nil { + return err + } } return nil - }) + }); err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } }) } diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index b8a3d4a6d2..fab3319a2a 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -239,7 +239,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { // fails, otherwise it succeeds. In either case, the freezer should be positioned // at 10 after both operations are done. if truncateErr != nil { - t.Fatal("concurrent truncate failed:", err) + t.Fatal("concurrent truncate failed:", truncateErr) } if !(errors.Is(modifyErr, nil) || errors.Is(modifyErr, errOutOrderInsertion)) { t.Fatal("wrong error from concurrent modify:", modifyErr) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 3588063468..9a17e1c173 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -95,7 +95,7 @@ var ( uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db // transitionStatusKey tracks the eth2 transition status. - transitionStatusKey = []byte("eth2-transition") + transitionStatusKey = []byte("eth2-transition") // deprecated // snapSyncStatusFlagKey flags that status of snap sync. snapSyncStatusFlagKey = []byte("SnapSyncStatus") diff --git a/core/rlp_test.go b/core/rlp_test.go index bc37408537..69efa82551 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -149,8 +149,7 @@ func BenchmarkHashing(b *testing.B) { var got common.Hash var hasher = sha3.NewLegacyKeccak256() b.Run("iteratorhashing", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { var hash common.Hash it, err := rlp.NewListIterator(bodyRlp) if err != nil { @@ -172,8 +171,7 @@ func BenchmarkHashing(b *testing.B) { }) var exp common.Hash b.Run("fullbodyhashing", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { var body types.Body rlp.DecodeBytes(bodyRlp, &body) for _, tx := range body.Transactions { @@ -182,8 +180,7 @@ func BenchmarkHashing(b *testing.B) { } }) b.Run("fullblockhashing", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { var block types.Block rlp.DecodeBytes(blockRlp, &block) for _, tx := range block.Transactions() { diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index b212098e75..2c868b3010 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -229,8 +229,7 @@ func BenchmarkSearch(b *testing.B) { layer = fill(layer) } key := crypto.Keccak256Hash([]byte{0x13, 0x38}) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { layer.AccountRLP(key) } } @@ -269,8 +268,7 @@ func BenchmarkSearchSlot(b *testing.B) { for i := 0; i < 128; i++ { layer = fill(layer) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { layer.Storage(accountKey, storageKey) } } @@ -300,9 +298,7 @@ func BenchmarkFlatten(b *testing.B) { } return newDiffLayer(parent, common.Hash{}, accounts, storage) } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() + for b.Loop() { var layer snapshot layer = emptyLayer() for i := 1; i < 128; i++ { @@ -352,9 +348,7 @@ func BenchmarkJournal(b *testing.B) { for i := 1; i < 128; i++ { layer = fill(layer) } - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { layer.Journal(new(bytes.Buffer)) } } diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 2e882f484e..dd6c4cf968 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -928,7 +928,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) b.Run("binary iterator keys", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { got := 0 it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) for it.Next() { @@ -940,7 +940,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { } }) b.Run("binary iterator values", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { got := 0 it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) for it.Next() { @@ -953,7 +953,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { } }) b.Run("fast iterator keys", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) defer it.Release() @@ -967,7 +967,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { } }) b.Run("fast iterator values", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) defer it.Release() @@ -1025,7 +1025,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) b.Run("binary iterator (keys)", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { got := 0 it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) for it.Next() { @@ -1037,7 +1037,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { } }) b.Run("binary iterator (values)", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { got := 0 it := head.(*diffLayer).newBinaryAccountIterator(common.Hash{}) for it.Next() { @@ -1051,7 +1051,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { } }) b.Run("fast iterator (keys)", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) defer it.Release() @@ -1065,7 +1065,7 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { } }) b.Run("fast iterator (values)", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) defer it.Release() diff --git a/core/state/state_object.go b/core/state/state_object.go index 767f469bfd..2938750503 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -333,7 +333,7 @@ func (s *stateObject) updateTrie() (Trie, error) { continue } if !exist { - log.Error("Storage slot is not found in pending area", s.address, "slot", key) + log.Error("Storage slot is not found in pending area", "address", s.address, "slot", key) continue } if (value != common.Hash{}) { diff --git a/core/state/state_object_test.go b/core/state/state_object_test.go index 42fd778025..0237f0580d 100644 --- a/core/state/state_object_test.go +++ b/core/state/state_object_test.go @@ -25,7 +25,7 @@ import ( func BenchmarkCutOriginal(b *testing.B) { value := common.HexToHash("0x01") - for i := 0; i < b.N; i++ { + for b.Loop() { bytes.TrimLeft(value[:], "\x00") } } @@ -33,14 +33,14 @@ func BenchmarkCutOriginal(b *testing.B) { func BenchmarkCutsetterFn(b *testing.B) { value := common.HexToHash("0x01") cutSetFn := func(r rune) bool { return r == 0 } - for i := 0; i < b.N; i++ { + for b.Loop() { bytes.TrimLeftFunc(value[:], cutSetFn) } } func BenchmarkCutCustomTrim(b *testing.B) { value := common.HexToHash("0x01") - for i := 0; i < b.N; i++ { + for b.Loop() { common.TrimLeftZeroes(value[:]) } } diff --git a/core/state/state_sizer.go b/core/state/state_sizer.go new file mode 100644 index 0000000000..2066c94845 --- /dev/null +++ b/core/state/state_sizer.go @@ -0,0 +1,638 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "container/heap" + "errors" + "fmt" + "maps" + "runtime" + "slices" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/triedb" + "golang.org/x/sync/errgroup" +) + +const ( + statEvictThreshold = 128 // the depth of statistic to be preserved +) + +// Database key scheme for states. +var ( + accountKeySize = int64(len(rawdb.SnapshotAccountPrefix) + common.HashLength) + storageKeySize = int64(len(rawdb.SnapshotStoragePrefix) + common.HashLength*2) + accountTrienodePrefixSize = int64(len(rawdb.TrieNodeAccountPrefix)) + storageTrienodePrefixSize = int64(len(rawdb.TrieNodeStoragePrefix) + common.HashLength) + codeKeySize = int64(len(rawdb.CodePrefix) + common.HashLength) +) + +// SizeStats represents either the current state size statistics or the size +// differences resulting from a state transition. +type SizeStats struct { + StateRoot common.Hash // State root hash at the time of measurement + BlockNumber uint64 // Associated block number at the time of measurement + + Accounts int64 // Total number of accounts in the state + AccountBytes int64 // Total storage size used by all account data (in bytes) + Storages int64 // Total number of storage slots across all accounts + StorageBytes int64 // Total storage size used by all storage slot data (in bytes) + AccountTrienodes int64 // Total number of account trie nodes in the state + AccountTrienodeBytes int64 // Total storage size occupied by account trie nodes (in bytes) + StorageTrienodes int64 // Total number of storage trie nodes in the state + StorageTrienodeBytes int64 // Total storage size occupied by storage trie nodes (in bytes) + ContractCodes int64 // Total number of contract codes in the state + ContractCodeBytes int64 // Total size of all contract code (in bytes) +} + +func (s SizeStats) String() string { + return fmt.Sprintf("Accounts: %d(%s), Storages: %d(%s), AccountTrienodes: %d(%s), StorageTrienodes: %d(%s), Codes: %d(%s)", + s.Accounts, common.StorageSize(s.AccountBytes), + s.Storages, common.StorageSize(s.StorageBytes), + s.AccountTrienodes, common.StorageSize(s.AccountTrienodeBytes), + s.StorageTrienodes, common.StorageSize(s.StorageTrienodeBytes), + s.ContractCodes, common.StorageSize(s.ContractCodeBytes), + ) +} + +// add applies the given state diffs and produces a new version of the statistics. +func (s SizeStats) add(diff SizeStats) SizeStats { + s.StateRoot = diff.StateRoot + s.BlockNumber = diff.BlockNumber + + s.Accounts += diff.Accounts + s.AccountBytes += diff.AccountBytes + s.Storages += diff.Storages + s.StorageBytes += diff.StorageBytes + s.AccountTrienodes += diff.AccountTrienodes + s.AccountTrienodeBytes += diff.AccountTrienodeBytes + s.StorageTrienodes += diff.StorageTrienodes + s.StorageTrienodeBytes += diff.StorageTrienodeBytes + s.ContractCodes += diff.ContractCodes + s.ContractCodeBytes += diff.ContractCodeBytes + return s +} + +// calSizeStats measures the state size changes of the provided state update. +func calSizeStats(update *stateUpdate) (SizeStats, error) { + stats := SizeStats{ + BlockNumber: update.blockNumber, + StateRoot: update.root, + } + + // Measure the account changes + for addr, oldValue := range update.accountsOrigin { + addrHash := crypto.Keccak256Hash(addr.Bytes()) + newValue, exists := update.accounts[addrHash] + if !exists { + return SizeStats{}, fmt.Errorf("account %x not found", addr) + } + oldLen, newLen := len(oldValue), len(newValue) + + switch { + case oldLen > 0 && newLen == 0: + // Account deletion + stats.Accounts -= 1 + stats.AccountBytes -= accountKeySize + int64(oldLen) + case oldLen == 0 && newLen > 0: + // Account creation + stats.Accounts += 1 + stats.AccountBytes += accountKeySize + int64(newLen) + default: + // Account update + stats.AccountBytes += int64(newLen - oldLen) + } + } + + // Measure storage changes + for addr, slots := range update.storagesOrigin { + addrHash := crypto.Keccak256Hash(addr.Bytes()) + subset, exists := update.storages[addrHash] + if !exists { + return SizeStats{}, fmt.Errorf("storage %x not found", addr) + } + for key, oldValue := range slots { + var ( + exists bool + newValue []byte + ) + if update.rawStorageKey { + newValue, exists = subset[crypto.Keccak256Hash(key.Bytes())] + } else { + newValue, exists = subset[key] + } + if !exists { + return SizeStats{}, fmt.Errorf("storage slot %x-%x not found", addr, key) + } + oldLen, newLen := len(oldValue), len(newValue) + + switch { + case oldLen > 0 && newLen == 0: + // Storage deletion + stats.Storages -= 1 + stats.StorageBytes -= storageKeySize + int64(oldLen) + case oldLen == 0 && newLen > 0: + // Storage creation + stats.Storages += 1 + stats.StorageBytes += storageKeySize + int64(newLen) + default: + // Storage update + stats.StorageBytes += int64(newLen - oldLen) + } + } + } + + // Measure trienode changes + for owner, subset := range update.nodes.Sets { + var ( + keyPrefix int64 + isAccount = owner == (common.Hash{}) + ) + if isAccount { + keyPrefix = accountTrienodePrefixSize + } else { + keyPrefix = storageTrienodePrefixSize + } + + // Iterate over Origins since every modified node has an origin entry + for path, oldNode := range subset.Origins { + newNode, exists := subset.Nodes[path] + if !exists { + return SizeStats{}, fmt.Errorf("node %x-%v not found", owner, path) + } + keySize := keyPrefix + int64(len(path)) + + switch { + case len(oldNode) > 0 && len(newNode.Blob) == 0: + // Node deletion + if isAccount { + stats.AccountTrienodes -= 1 + stats.AccountTrienodeBytes -= keySize + int64(len(oldNode)) + } else { + stats.StorageTrienodes -= 1 + stats.StorageTrienodeBytes -= keySize + int64(len(oldNode)) + } + case len(oldNode) == 0 && len(newNode.Blob) > 0: + // Node creation + if isAccount { + stats.AccountTrienodes += 1 + stats.AccountTrienodeBytes += keySize + int64(len(newNode.Blob)) + } else { + stats.StorageTrienodes += 1 + stats.StorageTrienodeBytes += keySize + int64(len(newNode.Blob)) + } + default: + // Node update + if isAccount { + stats.AccountTrienodeBytes += int64(len(newNode.Blob) - len(oldNode)) + } else { + stats.StorageTrienodeBytes += int64(len(newNode.Blob) - len(oldNode)) + } + } + } + } + + // Measure code changes. Note that the reported contract code size may be slightly + // inaccurate due to database deduplication (code is stored by its hash). However, + // this deviation is negligible and acceptable for measurement purposes. + for _, code := range update.codes { + stats.ContractCodes += 1 + stats.ContractCodeBytes += codeKeySize + int64(len(code.blob)) + } + return stats, nil +} + +type stateSizeQuery struct { + root *common.Hash // nil means latest + err error // non-nil if the state size is not yet initialized + result chan *SizeStats // nil means the state is unknown +} + +// SizeTracker handles the state size initialization and tracks of state size metrics. +type SizeTracker struct { + db ethdb.KeyValueStore + triedb *triedb.Database + abort chan struct{} + aborted chan struct{} + updateCh chan *stateUpdate + queryCh chan *stateSizeQuery +} + +// NewSizeTracker creates a new state size tracker and starts it automatically +func NewSizeTracker(db ethdb.KeyValueStore, triedb *triedb.Database) (*SizeTracker, error) { + if triedb.Scheme() != rawdb.PathScheme { + return nil, errors.New("state size tracker is not compatible with hash mode") + } + t := &SizeTracker{ + db: db, + triedb: triedb, + abort: make(chan struct{}), + aborted: make(chan struct{}), + updateCh: make(chan *stateUpdate), + queryCh: make(chan *stateSizeQuery), + } + go t.run() + return t, nil +} + +func (t *SizeTracker) Stop() { + close(t.abort) + <-t.aborted +} + +// sizeStatsHeap is a heap.Interface implementation over statesize statistics for +// retrieving the oldest statistics for eviction. +type sizeStatsHeap []SizeStats + +func (h sizeStatsHeap) Len() int { return len(h) } +func (h sizeStatsHeap) Less(i, j int) bool { return h[i].BlockNumber < h[j].BlockNumber } +func (h sizeStatsHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *sizeStatsHeap) Push(x any) { + *h = append(*h, x.(SizeStats)) +} + +func (h *sizeStatsHeap) Pop() any { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// run performs the state size initialization and handles updates +func (t *SizeTracker) run() { + defer close(t.aborted) + + var last common.Hash + stats, err := t.init() // launch background thread for state size init + if err != nil { + return + } + h := sizeStatsHeap(slices.Collect(maps.Values(stats))) + heap.Init(&h) + + for { + select { + case u := <-t.updateCh: + base, found := stats[u.originRoot] + if !found { + log.Debug("Ignored the state size without parent", "parent", u.originRoot, "root", u.root, "number", u.blockNumber) + continue + } + diff, err := calSizeStats(u) + if err != nil { + continue + } + stat := base.add(diff) + stats[u.root] = stat + last = u.root + + heap.Push(&h, stats[u.root]) + for u.blockNumber-h[0].BlockNumber > statEvictThreshold { + delete(stats, h[0].StateRoot) + heap.Pop(&h) + } + log.Debug("Update state size", "number", stat.BlockNumber, "root", stat.StateRoot, "stat", stat) + + case r := <-t.queryCh: + var root common.Hash + if r.root != nil { + root = *r.root + } else { + root = last + } + if s, ok := stats[root]; ok { + r.result <- &s + } else { + r.result <- nil + } + + case <-t.abort: + return + } + } +} + +type buildResult struct { + stat SizeStats + root common.Hash + blockNumber uint64 + elapsed time.Duration + err error +} + +func (t *SizeTracker) init() (map[common.Hash]SizeStats, error) { + // Wait for snapshot completion and then init + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + +wait: + for { + select { + case <-ticker.C: + if t.triedb.SnapshotCompleted() { + break wait + } + case <-t.updateCh: + continue + case r := <-t.queryCh: + r.err = errors.New("state size is not initialized yet") + r.result <- nil + case <-t.abort: + return nil, errors.New("size tracker closed") + } + } + + var ( + updates = make(map[common.Hash]*stateUpdate) + children = make(map[common.Hash][]common.Hash) + done chan buildResult + ) + + for { + select { + case u := <-t.updateCh: + updates[u.root] = u + children[u.originRoot] = append(children[u.originRoot], u.root) + log.Debug("Received state update", "root", u.root, "blockNumber", u.blockNumber) + + case r := <-t.queryCh: + r.err = errors.New("state size is not initialized yet") + r.result <- nil + + case <-ticker.C: + // Only check timer if build hasn't started yet + if done != nil { + continue + } + root := rawdb.ReadSnapshotRoot(t.db) + if root == (common.Hash{}) { + continue + } + entry, exists := updates[root] + if !exists { + continue + } + done = make(chan buildResult) + go t.build(entry.root, entry.blockNumber, done) + log.Info("Measuring persistent state size", "root", root.Hex(), "number", entry.blockNumber) + + case result := <-done: + if result.err != nil { + return nil, result.err + } + var ( + stats = make(map[common.Hash]SizeStats) + apply func(root common.Hash, stat SizeStats) error + ) + apply = func(root common.Hash, base SizeStats) error { + for _, child := range children[root] { + entry, ok := updates[child] + if !ok { + return fmt.Errorf("the state update is not found, %x", child) + } + diff, err := calSizeStats(entry) + if err != nil { + return err + } + stats[child] = base.add(diff) + if err := apply(child, stats[child]); err != nil { + return err + } + } + return nil + } + if err := apply(result.root, result.stat); err != nil { + return nil, err + } + + // Set initial latest stats + stats[result.root] = result.stat + log.Info("Measured persistent state size", "root", result.root, "number", result.blockNumber, "stat", result.stat, "elapsed", common.PrettyDuration(result.elapsed)) + return stats, nil + + case <-t.abort: + return nil, errors.New("size tracker closed") + } + } +} + +func (t *SizeTracker) build(root common.Hash, blockNumber uint64, done chan buildResult) { + // Metrics will be directly updated by each goroutine + var ( + accounts, accountBytes int64 + storages, storageBytes int64 + codes, codeBytes int64 + + accountTrienodes, accountTrienodeBytes int64 + storageTrienodes, storageTrienodeBytes int64 + + group errgroup.Group + start = time.Now() + ) + + // Start all table iterations concurrently with direct metric updates + group.Go(func() error { + count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotAccountPrefix, "account") + if err != nil { + return err + } + accounts, accountBytes = count, bytes + return nil + }) + + group.Go(func() error { + count, bytes, err := t.iterateTableParallel(t.abort, rawdb.SnapshotStoragePrefix, "storage") + if err != nil { + return err + } + storages, storageBytes = count, bytes + return nil + }) + + group.Go(func() error { + count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeAccountPrefix, "accountnode") + if err != nil { + return err + } + accountTrienodes, accountTrienodeBytes = count, bytes + return nil + }) + + group.Go(func() error { + count, bytes, err := t.iterateTableParallel(t.abort, rawdb.TrieNodeStoragePrefix, "storagenode") + if err != nil { + return err + } + storageTrienodes, storageTrienodeBytes = count, bytes + return nil + }) + + group.Go(func() error { + count, bytes, err := t.iterateTable(t.abort, rawdb.CodePrefix, "contractcode") + if err != nil { + return err + } + codes, codeBytes = count, bytes + return nil + }) + + // Wait for all goroutines to complete + if err := group.Wait(); err != nil { + done <- buildResult{err: err} + } else { + stat := SizeStats{ + StateRoot: root, + BlockNumber: blockNumber, + Accounts: accounts, + AccountBytes: accountBytes, + Storages: storages, + StorageBytes: storageBytes, + AccountTrienodes: accountTrienodes, + AccountTrienodeBytes: accountTrienodeBytes, + StorageTrienodes: storageTrienodes, + StorageTrienodeBytes: storageTrienodeBytes, + ContractCodes: codes, + ContractCodeBytes: codeBytes, + } + done <- buildResult{ + root: root, + blockNumber: blockNumber, + stat: stat, + elapsed: time.Since(start), + } + } +} + +// iterateTable performs iteration over a specific table and returns the results. +func (t *SizeTracker) iterateTable(closed chan struct{}, prefix []byte, name string) (int64, int64, error) { + var ( + start = time.Now() + logged = time.Now() + count, bytes int64 + ) + + iter := t.db.NewIterator(prefix, nil) + defer iter.Release() + + log.Debug("Iterating state", "category", name) + for iter.Next() { + count++ + bytes += int64(len(iter.Key()) + len(iter.Value())) + + if time.Since(logged) > time.Second*8 { + logged = time.Now() + + select { + case <-closed: + log.Debug("State iteration cancelled", "category", name) + return 0, 0, errors.New("size tracker closed") + default: + log.Debug("Iterating state", "category", name, "count", count, "size", common.StorageSize(bytes)) + } + } + } + // Check for iterator errors + if err := iter.Error(); err != nil { + log.Error("Iterator error", "category", name, "err", err) + return 0, 0, err + } + log.Debug("Finished state iteration", "category", name, "count", count, "size", common.StorageSize(bytes), "elapsed", common.PrettyDuration(time.Since(start))) + return count, bytes, nil +} + +// iterateTableParallel performs parallel iteration over a table by splitting into +// hex ranges. For storage tables, it splits on the first byte of the account hash +// (after the prefix). +func (t *SizeTracker) iterateTableParallel(closed chan struct{}, prefix []byte, name string) (int64, int64, error) { + var ( + totalCount int64 + totalBytes int64 + + start = time.Now() + workers = runtime.NumCPU() + group errgroup.Group + mu sync.Mutex + ) + group.SetLimit(workers) + log.Debug("Starting parallel state iteration", "category", name, "workers", workers) + + if len(prefix) > 0 { + if blob, err := t.db.Get(prefix); err == nil && len(blob) > 0 { + // If there's a direct hit on the prefix, include it in the stats + totalCount = 1 + totalBytes = int64(len(prefix) + len(blob)) + } + } + for i := 0; i < 256; i++ { + h := byte(i) + group.Go(func() error { + count, bytes, err := t.iterateTable(closed, slices.Concat(prefix, []byte{h}), fmt.Sprintf("%s-%02x", name, h)) + if err != nil { + return err + } + mu.Lock() + totalCount += count + totalBytes += bytes + mu.Unlock() + return nil + }) + } + if err := group.Wait(); err != nil { + return 0, 0, err + } + log.Debug("Finished parallel state iteration", "category", name, "count", totalCount, "size", common.StorageSize(totalBytes), "elapsed", common.PrettyDuration(time.Since(start))) + return totalCount, totalBytes, nil +} + +// Notify is an async method used to send the state update to the size tracker. +// It ignores empty updates (where no state changes occurred). +// If the channel is full, it drops the update to avoid blocking. +func (t *SizeTracker) Notify(update *stateUpdate) { + if update == nil || update.empty() { + return + } + select { + case t.updateCh <- update: + case <-t.abort: + return + } +} + +// Query returns the state size specified by the root, or nil if not available. +// If the root is nil, query the size of latest chain head; +// If the root is non-nil, query the size of the specified state; +func (t *SizeTracker) Query(root *common.Hash) (*SizeStats, error) { + r := &stateSizeQuery{ + root: root, + result: make(chan *SizeStats, 1), + } + select { + case <-t.aborted: + return nil, errors.New("state sizer has been closed") + case t.queryCh <- r: + return <-r.result, r.err + } +} diff --git a/core/state/state_sizer_test.go b/core/state/state_sizer_test.go new file mode 100644 index 0000000000..cab0c38163 --- /dev/null +++ b/core/state/state_sizer_test.go @@ -0,0 +1,231 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/triedb" + "github.com/ethereum/go-ethereum/triedb/pathdb" + "github.com/holiman/uint256" +) + +func TestSizeTracker(t *testing.T) { + db := rawdb.NewMemoryDatabase() + defer db.Close() + + tdb := triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults}) + sdb := NewDatabase(tdb, nil) + + // Generate 50 blocks to establish a baseline + baselineBlockNum := uint64(50) + currentRoot := types.EmptyRootHash + + addr1 := common.BytesToAddress([]byte{1, 0, 0, 1}) + addr2 := common.BytesToAddress([]byte{1, 0, 0, 2}) + addr3 := common.BytesToAddress([]byte{1, 0, 0, 3}) + + // Create initial state with fixed accounts + state, _ := New(currentRoot, sdb) + state.AddBalance(addr1, uint256.NewInt(1000), tracing.BalanceChangeUnspecified) + state.SetNonce(addr1, 1, tracing.NonceChangeUnspecified) + state.SetState(addr1, common.HexToHash("0x1111"), common.HexToHash("0xaaaa")) + state.SetState(addr1, common.HexToHash("0x2222"), common.HexToHash("0xbbbb")) + + state.AddBalance(addr2, uint256.NewInt(2000), tracing.BalanceChangeUnspecified) + state.SetNonce(addr2, 2, tracing.NonceChangeUnspecified) + state.SetCode(addr2, []byte{0x60, 0x80, 0x60, 0x40, 0x52}, tracing.CodeChangeUnspecified) + + state.AddBalance(addr3, uint256.NewInt(3000), tracing.BalanceChangeUnspecified) + state.SetNonce(addr3, 3, tracing.NonceChangeUnspecified) + + currentRoot, _, err := state.CommitWithUpdate(1, true, false) + if err != nil { + t.Fatalf("Failed to commit initial state: %v", err) + } + if err := tdb.Commit(currentRoot, false); err != nil { + t.Fatalf("Failed to commit initial trie: %v", err) + } + + for i := 1; i < 50; i++ { // blocks 2-50 + blockNum := uint64(i + 1) + + newState, err := New(currentRoot, sdb) + if err != nil { + t.Fatalf("Failed to create new state at block %d: %v", blockNum, err) + } + testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig()) + newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified) + newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified) + + if i%2 == 0 { + newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig())) + } + if i%3 == 0 { + newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified) + } + root, _, err := newState.CommitWithUpdate(blockNum, true, false) + if err != nil { + t.Fatalf("Failed to commit state at block %d: %v", blockNum, err) + } + if err := tdb.Commit(root, false); err != nil { + t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err) + } + currentRoot = root + } + baselineRoot := currentRoot + + // Wait for snapshot completion + for !tdb.SnapshotCompleted() { + time.Sleep(100 * time.Millisecond) + } + + // Calculate baseline from the intermediate persisted state + baselineTracker := &SizeTracker{ + db: db, + triedb: tdb, + abort: make(chan struct{}), + } + done := make(chan buildResult) + + go baselineTracker.build(baselineRoot, baselineBlockNum, done) + var baselineResult buildResult + select { + case baselineResult = <-done: + if baselineResult.err != nil { + t.Fatalf("Failed to get baseline stats: %v", baselineResult.err) + } + case <-time.After(30 * time.Second): + t.Fatal("Timeout waiting for baseline stats") + } + baseline := baselineResult.stat + + // Now start the tracker and notify it of updates that happen AFTER the baseline + tracker, err := NewSizeTracker(db, tdb) + if err != nil { + t.Fatalf("Failed to create size tracker: %v", err) + } + defer tracker.Stop() + + var trackedUpdates []SizeStats + currentRoot = baselineRoot + + // Generate additional blocks beyond the baseline and track them + for i := 49; i < 130; i++ { // blocks 51-132 + blockNum := uint64(i + 2) + newState, err := New(currentRoot, sdb) + if err != nil { + t.Fatalf("Failed to create new state at block %d: %v", blockNum, err) + } + testAddr := common.BigToAddress(uint256.NewInt(uint64(i + 100)).ToBig()) + newState.AddBalance(testAddr, uint256.NewInt(uint64((i+1)*1000)), tracing.BalanceChangeUnspecified) + newState.SetNonce(testAddr, uint64(i+10), tracing.NonceChangeUnspecified) + + if i%2 == 0 { + newState.SetState(addr1, common.BigToHash(uint256.NewInt(uint64(i+0x1000)).ToBig()), common.BigToHash(uint256.NewInt(uint64(i+0x2000)).ToBig())) + } + if i%3 == 0 { + newState.SetCode(testAddr, []byte{byte(i), 0x60, 0x80, byte(i + 1), 0x52}, tracing.CodeChangeUnspecified) + } + root, update, err := newState.CommitWithUpdate(blockNum, true, false) + if err != nil { + t.Fatalf("Failed to commit state at block %d: %v", blockNum, err) + } + if err := tdb.Commit(root, false); err != nil { + t.Fatalf("Failed to commit trie at block %d: %v", blockNum, err) + } + + diff, err := calSizeStats(update) + if err != nil { + t.Fatalf("Failed to calculate size stats for block %d: %v", blockNum, err) + } + trackedUpdates = append(trackedUpdates, diff) + tracker.Notify(update) + currentRoot = root + } + finalRoot := rawdb.ReadSnapshotRoot(db) + + // Ensure all commits are flushed to disk + if err := tdb.Close(); err != nil { + t.Fatalf("Failed to close triedb: %v", err) + } + // Reopen the database to simulate a restart + tdb = triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.Defaults}) + defer tdb.Close() + + finalTracker := &SizeTracker{ + db: db, + triedb: tdb, + abort: make(chan struct{}), + } + finalDone := make(chan buildResult) + + go finalTracker.build(finalRoot, uint64(132), finalDone) + var result buildResult + select { + case result = <-finalDone: + if result.err != nil { + t.Fatalf("Failed to build final stats: %v", result.err) + } + case <-time.After(30 * time.Second): + t.Fatal("Timeout waiting for final stats") + } + actualStats := result.stat + + expectedStats := baseline + for _, diff := range trackedUpdates { + expectedStats = expectedStats.add(diff) + } + + // The final measured stats should match our calculated expected stats exactly + if actualStats.Accounts != expectedStats.Accounts { + t.Errorf("Account count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Accounts, expectedStats.Accounts, actualStats.Accounts) + } + if actualStats.AccountBytes != expectedStats.AccountBytes { + t.Errorf("Account bytes mismatch: expected %d, got %d", expectedStats.AccountBytes, actualStats.AccountBytes) + } + if actualStats.Storages != expectedStats.Storages { + t.Errorf("Storage count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.Storages, expectedStats.Storages, actualStats.Storages) + } + if actualStats.StorageBytes != expectedStats.StorageBytes { + t.Errorf("Storage bytes mismatch: expected %d, got %d", expectedStats.StorageBytes, actualStats.StorageBytes) + } + if actualStats.ContractCodes != expectedStats.ContractCodes { + t.Errorf("Contract code count mismatch: baseline(%d) + tracked_changes = %d, but final_measurement = %d", baseline.ContractCodes, expectedStats.ContractCodes, actualStats.ContractCodes) + } + if actualStats.ContractCodeBytes != expectedStats.ContractCodeBytes { + t.Errorf("Contract code bytes mismatch: expected %d, got %d", expectedStats.ContractCodeBytes, actualStats.ContractCodeBytes) + } + // TODO: failed on github actions, need to investigate + // if actualStats.AccountTrienodes != expectedStats.AccountTrienodes { + // t.Errorf("Account trie nodes mismatch: expected %d, got %d", expectedStats.AccountTrienodes, actualStats.AccountTrienodes) + // } + // if actualStats.AccountTrienodeBytes != expectedStats.AccountTrienodeBytes { + // t.Errorf("Account trie node bytes mismatch: expected %d, got %d", expectedStats.AccountTrienodeBytes, actualStats.AccountTrienodeBytes) + // } + if actualStats.StorageTrienodes != expectedStats.StorageTrienodes { + t.Errorf("Storage trie nodes mismatch: expected %d, got %d", expectedStats.StorageTrienodes, actualStats.StorageTrienodes) + } + if actualStats.StorageTrienodeBytes != expectedStats.StorageTrienodeBytes { + t.Errorf("Storage trie node bytes mismatch: expected %d, got %d", expectedStats.StorageTrienodeBytes, actualStats.StorageTrienodeBytes) + } +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 6474d3a2fa..b770698255 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -458,7 +458,7 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64, reason tracing.Non } } -func (s *StateDB) SetCode(addr common.Address, code []byte) (prev []byte) { +func (s *StateDB) SetCode(addr common.Address, code []byte, reason tracing.CodeChangeReason) (prev []byte) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { return stateObject.SetCode(crypto.Keccak256Hash(code), code) @@ -1155,7 +1155,7 @@ func (s *StateDB) GetTrie() Trie { // commit gathers the state mutations accumulated along with the associated // trie changes, resetting all internal flags with the new state as the base. -func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) { +func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNumber uint64) (*stateUpdate, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) @@ -1307,13 +1307,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU origin := s.originalRoot s.originalRoot = root - return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil + return newStateUpdate(noStorageWiping, origin, root, blockNumber, deletes, updates, nodes), nil } // commitAndFlush is a wrapper of commit which also commits the state mutations // to the configured data stores. func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) { - ret, err := s.commit(deleteEmptyObjects, noStorageWiping) + ret, err := s.commit(deleteEmptyObjects, noStorageWiping, block) if err != nil { return nil, err } @@ -1378,6 +1378,16 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping return ret.root, nil } +// CommitWithUpdate writes the state mutations and returns both the root hash and the state update. +// This is useful for tracking state changes at the blockchain level. +func (s *StateDB) CommitWithUpdate(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, *stateUpdate, error) { + ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping) + if err != nil { + return common.Hash{}, nil, err + } + return ret.root, ret, nil +} + // Prepare handles the preparatory steps for executing a state transition with. // This method must be invoked before state transition. // diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 7dada63d45..f4761bd10c 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -89,7 +89,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction code := make([]byte, 16) binary.BigEndian.PutUint64(code, uint64(a.args[0])) binary.BigEndian.PutUint64(code[8:], uint64(a.args[1])) - s.SetCode(addr, code) + s.SetCode(addr, code, tracing.CodeChangeUnspecified) }, args: make([]int64, 2), }, diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 3d1ef15031..d2595bcefe 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -189,14 +189,20 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr } } -func (s *hookedStateDB) SetCode(address common.Address, code []byte) []byte { - prev := s.inner.SetCode(address, code) - if s.hooks.OnCodeChange != nil { +func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte { + prev := s.inner.SetCode(address, code, reason) + if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil { prevHash := types.EmptyCodeHash if len(prev) != 0 { prevHash = crypto.Keccak256Hash(prev) } - s.hooks.OnCodeChange(address, prevHash, prev, crypto.Keccak256Hash(code), code) + codeHash := crypto.Keccak256Hash(code) + + if s.hooks.OnCodeChangeV2 != nil { + s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason) + } else if s.hooks.OnCodeChange != nil { + s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code) + } } return prev } @@ -224,8 +230,12 @@ func (s *hookedStateDB) SelfDestruct(address common.Address) uint256.Int { s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct) } - if s.hooks.OnCodeChange != nil && len(prevCode) > 0 { - s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) + if len(prevCode) > 0 { + if s.hooks.OnCodeChangeV2 != nil { + s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) + } else if s.hooks.OnCodeChange != nil { + s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) + } } return prev @@ -242,12 +252,16 @@ func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, b prev, changed := s.inner.SelfDestruct6780(address) - if s.hooks.OnBalanceChange != nil && changed && !prev.IsZero() { + if s.hooks.OnBalanceChange != nil && !prev.IsZero() { s.hooks.OnBalanceChange(address, prev.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestruct) } - if s.hooks.OnCodeChange != nil && changed && len(prevCode) > 0 { - s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) + if changed && len(prevCode) > 0 { + if s.hooks.OnCodeChangeV2 != nil { + s.hooks.OnCodeChangeV2(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil, tracing.CodeChangeSelfDestruct) + } else if s.hooks.OnCodeChange != nil { + s.hooks.OnCodeChange(address, prevCodeHash, prevCode, types.EmptyCodeHash, nil) + } } return prev, changed diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go index f319b0e63c..bacb7baee1 100644 --- a/core/state/statedb_hooked_test.go +++ b/core/state/statedb_hooked_test.go @@ -114,7 +114,7 @@ func TestHooks(t *testing.T) { sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified) sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer) sdb.SetNonce(common.Address{0xaa}, 1337, tracing.NonceChangeGenesis) - sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}) + sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}, tracing.CodeChangeUnspecified) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11")) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22")) sdb.SetTransientState(common.Address{0xaa}, common.HexToHash("0x02"), common.HexToHash("0x01")) diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 67e27cc832..661d17bb7b 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -65,7 +65,7 @@ func TestUpdateLeaks(t *testing.T) { state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i})) } if i%3 == 0 { - state.SetCode(addr, []byte{i, i, i, i, i}) + state.SetCode(addr, []byte{i, i, i, i, i}, tracing.CodeChangeUnspecified) } } @@ -101,7 +101,7 @@ func TestIntermediateLeaks(t *testing.T) { state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak}) } if i%3 == 0 { - state.SetCode(addr, []byte{i, i, i, i, i, tweak}) + state.SetCode(addr, []byte{i, i, i, i, i, tweak}, tracing.CodeChangeUnspecified) } } @@ -374,7 +374,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { code := make([]byte, 16) binary.BigEndian.PutUint64(code, uint64(a.args[0])) binary.BigEndian.PutUint64(code[8:], uint64(a.args[1])) - s.SetCode(addr, code) + s.SetCode(addr, code, tracing.CodeChangeUnspecified) }, args: make([]int64, 2), }, @@ -403,7 +403,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { // which would cause a difference in state when unrolling // the journal. (CreateContact assumes created was false prior to // invocation, and the journal rollback sets it to false). - s.SetCode(addr, []byte{1}) + s.SetCode(addr, []byte{1}, tracing.CodeChangeUnspecified) } }, }, @@ -731,7 +731,7 @@ func TestCopyCommitCopy(t *testing.T) { sval := common.HexToHash("bbb") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata state.SetState(addr, skey, sval) // Change the storage trie if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { @@ -772,7 +772,7 @@ func TestCopyCommitCopy(t *testing.T) { t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval) } if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval) + t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } // Commit state, ensure states can be loaded from disk root, _ := state.Commit(0, false, false) @@ -804,7 +804,7 @@ func TestCopyCopyCommitCopy(t *testing.T) { sval := common.HexToHash("bbb") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata state.SetState(addr, skey, sval) // Change the storage trie if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { @@ -859,7 +859,7 @@ func TestCopyCopyCommitCopy(t *testing.T) { t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval) } if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval) + t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } } @@ -874,7 +874,7 @@ func TestCommitCopy(t *testing.T) { sval1, sval2 := common.HexToHash("b1"), common.HexToHash("b2") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata state.SetState(addr, skey1, sval1) // Change the storage trie if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { @@ -912,10 +912,10 @@ func TestCommitCopy(t *testing.T) { } // Slots cached in the stateDB, available after commit if val := copied.GetState(addr, skey2); val != sval2 { - t.Fatalf("unexpected storage slot: have %x", sval1) + t.Fatalf("unexpected storage slot: have %x, want %x", val, sval2) } if val := copied.GetCommittedState(addr, skey2); val != sval2 { - t.Fatalf("unexpected storage slot: have %x", val) + t.Fatalf("unexpected storage slot: have %x, want %x", val, sval2) } } @@ -987,10 +987,10 @@ func testMissingTrieNodes(t *testing.T, scheme string) { addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified) - state.SetCode(addr, []byte{1, 2, 3}) + state.SetCode(addr, []byte{1, 2, 3}, tracing.CodeChangeUnspecified) a2 := common.BytesToAddress([]byte("another")) state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified) - state.SetCode(a2, []byte{1, 2, 4}) + state.SetCode(a2, []byte{1, 2, 4}, tracing.CodeChangeUnspecified) root, _ = state.Commit(0, false, false) t.Logf("root: %x", root) // force-flush diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go index 75c4ca028c..a62e2b2d2d 100644 --- a/core/state/stateupdate.go +++ b/core/state/stateupdate.go @@ -64,8 +64,10 @@ type accountUpdate struct { // execution. It contains information about mutated contract codes, accounts, // and storage slots, along with their original values. type stateUpdate struct { - originRoot common.Hash // hash of the state before applying mutation - root common.Hash // hash of the state after applying mutation + originRoot common.Hash // hash of the state before applying mutation + root common.Hash // hash of the state after applying mutation + blockNumber uint64 // Associated block number + accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding @@ -95,7 +97,7 @@ func (sc *stateUpdate) empty() bool { // // rawStorageKey is a flag indicating whether to use the raw storage slot key or // the hash of the slot key for constructing state update object. -func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate { +func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, blockNumber uint64, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate { var ( accounts = make(map[common.Hash][]byte) accountsOrigin = make(map[common.Address][]byte) @@ -164,6 +166,7 @@ func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash return &stateUpdate{ originRoot: originRoot, root: root, + blockNumber: blockNumber, accounts: accounts, accountsOrigin: accountsOrigin, storages: storages, diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 4d1b627c4d..41349c0c0e 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -39,7 +39,7 @@ func filledStateDB() *StateDB { sval := common.HexToHash("bbb") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata state.SetState(addr, skey, sval) // Change the storage trie for i := 0; i < 100; i++ { sk := common.BigToHash(big.NewInt(int64(i))) @@ -81,7 +81,7 @@ func TestVerklePrefetcher(t *testing.T) { sval := testrand.Hash() state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata state.SetState(addr, skey, sval) // Change the storage trie root, _ := state.Commit(0, true, false) diff --git a/core/state_transition.go b/core/state_transition.go index 681c300696..bf5ac07636 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -164,8 +164,12 @@ type Message struct { // or the state prefetching. SkipNonceChecks bool - // When SkipFromEOACheck is true, the message sender is not checked to be an EOA. - SkipFromEOACheck bool + // When set, the message is not treated as a transaction, and certain + // transaction-specific checks are skipped: + // + // - From is not verified to be an EOA + // - GasLimit is not checked against the protocol defined tx gaslimit + SkipTransactionChecks bool } // TransactionToMessage converts a transaction into a Message. @@ -182,7 +186,7 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In AccessList: tx.AccessList(), SetCodeAuthorizations: tx.SetCodeAuthorizations(), SkipNonceChecks: false, - SkipFromEOACheck: false, + SkipTransactionChecks: false, BlobHashes: tx.BlobHashes(), BlobGasFeeCap: tx.BlobGasFeeCap(), } @@ -320,7 +324,12 @@ func (st *stateTransition) preCheck() error { msg.From.Hex(), stNonce) } } - if !msg.SkipFromEOACheck { + isOsaka := st.evm.ChainConfig().IsOsaka(st.evm.Context.BlockNumber, st.evm.Context.Time) + if !msg.SkipTransactionChecks { + // Verify tx gas limit does not exceed EIP-7825 cap. + if isOsaka && msg.GasLimit > params.MaxTxGas { + return fmt.Errorf("%w (cap: %d, tx: %d)", ErrGasLimitTooHigh, params.MaxTxGas, msg.GasLimit) + } // Make sure the sender is an EOA code := st.state.GetCode(msg.From) _, delegated := types.ParseDelegation(code) @@ -354,7 +363,6 @@ func (st *stateTransition) preCheck() error { } } // Check the blob version validity - isOsaka := st.evm.ChainConfig().IsOsaka(st.evm.Context.BlockNumber, st.evm.Context.Time) if msg.BlobHashes != nil { // The to field of a blob tx type is mandatory, and a `BlobTx` transaction internally // has it as a non-nillable value, so any msg derived from blob transaction has it non-nil. @@ -398,10 +406,6 @@ func (st *stateTransition) preCheck() error { return fmt.Errorf("%w (sender %v)", ErrEmptyAuthList, msg.From) } } - // Verify tx gas limit does not exceed EIP-7825 cap. - if isOsaka && msg.GasLimit > params.MaxTxGas { - return fmt.Errorf("%w (cap: %d, tx: %d)", ErrGasLimitTooHigh, params.MaxTxGas, msg.GasLimit) - } return st.buyGas() } @@ -617,12 +621,12 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization) st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization) if auth.Address == (common.Address{}) { // Delegation to zero address means clear. - st.state.SetCode(authority, nil) + st.state.SetCode(authority, nil, tracing.CodeChangeAuthorizationClear) return nil } // Otherwise install delegation to auth.Address. - st.state.SetCode(authority, types.AddressToDelegation(auth.Address)) + st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeAuthorization) return nil } diff --git a/core/stateless/encoding.go b/core/stateless/encoding.go index 5f4cb0ea3c..5c43159e66 100644 --- a/core/stateless/encoding.go +++ b/core/stateless/encoding.go @@ -19,20 +19,21 @@ package stateless import ( "io" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) -// toExtWitness converts our internal witness representation to the consensus one. -func (w *Witness) toExtWitness() *extWitness { - ext := &extWitness{ +// ToExtWitness converts our internal witness representation to the consensus one. +func (w *Witness) ToExtWitness() *ExtWitness { + ext := &ExtWitness{ Headers: w.Headers, } - ext.Codes = make([][]byte, 0, len(w.Codes)) + ext.Codes = make([]hexutil.Bytes, 0, len(w.Codes)) for code := range w.Codes { ext.Codes = append(ext.Codes, []byte(code)) } - ext.State = make([][]byte, 0, len(w.State)) + ext.State = make([]hexutil.Bytes, 0, len(w.State)) for node := range w.State { ext.State = append(ext.State, []byte(node)) } @@ -40,7 +41,7 @@ func (w *Witness) toExtWitness() *extWitness { } // fromExtWitness converts the consensus witness format into our internal one. -func (w *Witness) fromExtWitness(ext *extWitness) error { +func (w *Witness) fromExtWitness(ext *ExtWitness) error { w.Headers = ext.Headers w.Codes = make(map[string]struct{}, len(ext.Codes)) @@ -56,21 +57,22 @@ func (w *Witness) fromExtWitness(ext *extWitness) error { // EncodeRLP serializes a witness as RLP. func (w *Witness) EncodeRLP(wr io.Writer) error { - return rlp.Encode(wr, w.toExtWitness()) + return rlp.Encode(wr, w.ToExtWitness()) } // DecodeRLP decodes a witness from RLP. func (w *Witness) DecodeRLP(s *rlp.Stream) error { - var ext extWitness + var ext ExtWitness if err := s.Decode(&ext); err != nil { return err } return w.fromExtWitness(&ext) } -// extWitness is a witness RLP encoding for transferring across clients. -type extWitness struct { - Headers []*types.Header - Codes [][]byte - State [][]byte +// ExtWitness is a witness RLP encoding for transferring across clients. +type ExtWitness struct { + Headers []*types.Header `json:"headers"` + Codes []hexutil.Bytes `json:"codes"` + State []hexutil.Bytes `json:"state"` + Keys []hexutil.Bytes `json:"keys"` } diff --git a/core/stateless/stats.go b/core/stateless/stats.go index adc898929b..94f5587f99 100644 --- a/core/stateless/stats.go +++ b/core/stateless/stats.go @@ -17,76 +17,37 @@ package stateless import ( + "encoding/json" "maps" "slices" "sort" + "strconv" "strings" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" ) -var ( - accountTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/account/depth/avg", nil) - accountTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/account/depth/min", nil) - accountTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/account/depth/max", nil) +var accountTrieLeavesAtDepth [16]*metrics.Counter +var storageTrieLeavesAtDepth [16]*metrics.Counter - storageTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/storage/depth/avg", nil) - storageTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/storage/depth/min", nil) - storageTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/storage/depth/max", nil) -) - -// depthStats tracks min/avg/max statistics for trie access depths. -type depthStats struct { - totalDepth int64 - samples int64 - minDepth int64 - maxDepth int64 -} - -// newDepthStats creates a new depthStats with default values. -func newDepthStats() *depthStats { - return &depthStats{minDepth: -1} -} - -// add records a new depth sample. -func (d *depthStats) add(n int64) { - if n < 0 { - return +func init() { + for i := 0; i < 16; i++ { + accountTrieLeavesAtDepth[i] = metrics.NewRegisteredCounter("witness/trie/account/leaves/depth_"+strconv.Itoa(i), nil) + storageTrieLeavesAtDepth[i] = metrics.NewRegisteredCounter("witness/trie/storage/leaves/depth_"+strconv.Itoa(i), nil) } - d.totalDepth += n - d.samples++ - - if d.minDepth == -1 || n < d.minDepth { - d.minDepth = n - } - if n > d.maxDepth { - d.maxDepth = n - } -} - -// report uploads the collected statistics into the provided gauges. -func (d *depthStats) report(maxGauge, minGauge, avgGauge *metrics.Gauge) { - if d.samples == 0 { - return - } - maxGauge.Update(d.maxDepth) - minGauge.Update(d.minDepth) - avgGauge.Update(d.totalDepth / d.samples) } // WitnessStats aggregates statistics for account and storage trie accesses. type WitnessStats struct { - accountTrie *depthStats - storageTrie *depthStats + accountTrieLeaves [16]int64 + storageTrieLeaves [16]int64 } // NewWitnessStats creates a new WitnessStats collector. func NewWitnessStats() *WitnessStats { - return &WitnessStats{ - accountTrie: newDepthStats(), - storageTrie: newDepthStats(), - } + return &WitnessStats{} } // Add records trie access depths from the given node paths. @@ -102,16 +63,30 @@ func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) { // The last path is always a leaf. if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) { if owner == (common.Hash{}) { - s.accountTrie.add(int64(len(path))) + s.accountTrieLeaves[len(path)] += 1 } else { - s.storageTrie.add(int64(len(path))) + s.storageTrieLeaves[len(path)] += 1 } } } } // ReportMetrics reports the collected statistics to the global metrics registry. -func (s *WitnessStats) ReportMetrics() { - s.accountTrie.report(accountTrieDepthMax, accountTrieDepthMin, accountTrieDepthAvg) - s.storageTrie.report(storageTrieDepthMax, storageTrieDepthMin, storageTrieDepthAvg) +func (s *WitnessStats) ReportMetrics(blockNumber uint64) { + // Encode the metrics as JSON for easier consumption + accountLeavesJson, _ := json.Marshal(s.accountTrieLeaves) + storageLeavesJson, _ := json.Marshal(s.storageTrieLeaves) + + // Log account trie depth statistics + log.Info("Account trie depth stats", + "block", blockNumber, + "leavesAtDepth", string(accountLeavesJson)) + log.Info("Storage trie depth stats", + "block", blockNumber, + "leavesAtDepth", string(storageLeavesJson)) + + for i := 0; i < 16; i++ { + accountTrieLeavesAtDepth[i].Inc(s.accountTrieLeaves[i]) + storageTrieLeavesAtDepth[i].Inc(s.storageTrieLeaves[i]) + } } diff --git a/core/stateless/stats_test.go b/core/stateless/stats_test.go index 51c78cc9c9..e77084df5d 100644 --- a/core/stateless/stats_test.go +++ b/core/stateless/stats_test.go @@ -24,27 +24,32 @@ import ( func TestWitnessStatsAdd(t *testing.T) { tests := []struct { - name string - nodes map[string][]byte - owner common.Hash - expectedAccountDepth int64 - expectedStorageDepth int64 + name string + nodes map[string][]byte + owner common.Hash + expectedAccountLeaves map[int64]int64 + expectedStorageLeaves map[int64]int64 }{ { - name: "empty nodes", - nodes: map[string][]byte{}, - owner: common.Hash{}, - expectedAccountDepth: 0, - expectedStorageDepth: 0, + name: "empty nodes", + nodes: map[string][]byte{}, + owner: common.Hash{}, + }, + { + name: "single account trie leaf at depth 0", + nodes: map[string][]byte{ + "": []byte("data"), + }, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{0: 1}, }, { name: "single account trie leaf", nodes: map[string][]byte{ "abc": []byte("data"), }, - owner: common.Hash{}, - expectedAccountDepth: 3, - expectedStorageDepth: 0, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{3: 1}, }, { name: "account trie with internal nodes", @@ -53,9 +58,8 @@ func TestWitnessStatsAdd(t *testing.T) { "ab": []byte("data2"), "abc": []byte("data3"), }, - owner: common.Hash{}, - expectedAccountDepth: 3, // Only "abc" is a leaf - expectedStorageDepth: 0, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{3: 1}, // Only "abc" is a leaf }, { name: "multiple account trie branches", @@ -67,9 +71,8 @@ func TestWitnessStatsAdd(t *testing.T) { "bc": []byte("data5"), "bcd": []byte("data6"), }, - owner: common.Hash{}, - expectedAccountDepth: 6, // "abc" (3) + "bcd" (3) = 6 - expectedStorageDepth: 0, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{3: 2}, // "abc" (3) + "bcd" (3) }, { name: "siblings are all leaves", @@ -78,9 +81,8 @@ func TestWitnessStatsAdd(t *testing.T) { "ab": []byte("data2"), "ac": []byte("data3"), }, - owner: common.Hash{}, - expectedAccountDepth: 6, // 2 + 2 + 2 = 6 - expectedStorageDepth: 0, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{2: 3}, }, { name: "storage trie leaves", @@ -90,9 +92,8 @@ func TestWitnessStatsAdd(t *testing.T) { "123": []byte("data3"), "124": []byte("data4"), }, - owner: common.HexToHash("0x1234"), - expectedAccountDepth: 0, - expectedStorageDepth: 6, // "123" (3) + "124" (3) = 6 + owner: common.HexToHash("0x1234"), + expectedStorageLeaves: map[int64]int64{3: 2}, // "123" (3) + "124" (3) }, { name: "complex trie structure", @@ -107,9 +108,8 @@ func TestWitnessStatsAdd(t *testing.T) { "235": []byte("data8"), "3": []byte("data9"), }, - owner: common.Hash{}, - expectedAccountDepth: 13, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1) = 13 - expectedStorageDepth: 0, + owner: common.Hash{}, + expectedAccountLeaves: map[int64]int64{1: 1, 3: 4}, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1) }, } @@ -118,14 +118,23 @@ func TestWitnessStatsAdd(t *testing.T) { stats := NewWitnessStats() stats.Add(tt.nodes, tt.owner) + var expectedAccountTrieLeaves [16]int64 + for depth, count := range tt.expectedAccountLeaves { + expectedAccountTrieLeaves[depth] = count + } + var expectedStorageTrieLeaves [16]int64 + for depth, count := range tt.expectedStorageLeaves { + expectedStorageTrieLeaves[depth] = count + } + // Check account trie depth - if stats.accountTrie.totalDepth != tt.expectedAccountDepth { - t.Errorf("Account trie total depth = %d, want %d", stats.accountTrie.totalDepth, tt.expectedAccountDepth) + if stats.accountTrieLeaves != expectedAccountTrieLeaves { + t.Errorf("Account trie total depth = %v, want %v", stats.accountTrieLeaves, expectedAccountTrieLeaves) } // Check storage trie depth - if stats.storageTrie.totalDepth != tt.expectedStorageDepth { - t.Errorf("Storage trie total depth = %d, want %d", stats.storageTrie.totalDepth, tt.expectedStorageDepth) + if stats.storageTrieLeaves != expectedStorageTrieLeaves { + t.Errorf("Storage trie total depth = %v, want %v", stats.storageTrieLeaves, expectedStorageTrieLeaves) } }) } @@ -144,11 +153,10 @@ func TestWitnessStatsMinMax(t *testing.T) { }, common.Hash{}) // Only "abcde" is a leaf (depth 5) - if stats.accountTrie.minDepth != 5 { - t.Errorf("Account trie min depth = %d, want %d", stats.accountTrie.minDepth, 5) - } - if stats.accountTrie.maxDepth != 5 { - t.Errorf("Account trie max depth = %d, want %d", stats.accountTrie.maxDepth, 5) + for i, v := range stats.accountTrieLeaves { + if v != 0 && i != 5 { + t.Errorf("leaf found at invalid depth %d", i) + } } // Add more leaves with different depths @@ -158,11 +166,10 @@ func TestWitnessStatsMinMax(t *testing.T) { }, common.Hash{}) // Now we have leaves at depths 1, 2, and 5 - if stats.accountTrie.minDepth != 1 { - t.Errorf("Account trie min depth after update = %d, want %d", stats.accountTrie.minDepth, 1) - } - if stats.accountTrie.maxDepth != 5 { - t.Errorf("Account trie max depth after update = %d, want %d", stats.accountTrie.maxDepth, 5) + for i, v := range stats.accountTrieLeaves { + if v != 0 && (i != 5 && i != 2 && i != 1) { + t.Errorf("leaf found at invalid depth %d", i) + } } } @@ -179,7 +186,12 @@ func TestWitnessStatsAverage(t *testing.T) { // All are leaves: 2 + 2 + 3 + 4 = 11 total, 4 samples expectedAvg := int64(11) / int64(4) - actualAvg := stats.accountTrie.totalDepth / stats.accountTrie.samples + var actualAvg, totalSamples int64 + for i, c := range stats.accountTrieLeaves { + actualAvg += c * int64(i) + totalSamples += c + } + actualAvg = actualAvg / totalSamples if actualAvg != expectedAvg { t.Errorf("Account trie average depth = %d, want %d", actualAvg, expectedAvg) diff --git a/core/stateless/witness.go b/core/stateless/witness.go index 371a128f48..588c895a2f 100644 --- a/core/stateless/witness.go +++ b/core/stateless/witness.go @@ -100,6 +100,10 @@ func (w *Witness) AddState(nodes map[string][]byte) { } } +func (w *Witness) AddKey() { + panic("not yet implemented") +} + // Copy deep-copies the witness object. Witness.Block isn't deep-copied as it // is never mutated by Witness func (w *Witness) Copy() *Witness { diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md index a14e123d99..a94fa81b55 100644 --- a/core/tracing/CHANGELOG.md +++ b/core/tracing/CHANGELOG.md @@ -4,6 +4,27 @@ All notable changes to the tracing interface will be documented in this file. ## [Unreleased] +### Deprecated methods + +- `OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)`: This hook is deprecated in favor of `OnCodeChangeV2` which includes a reason parameter ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)). + +### New methods + +- `OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason)`: This hook is called when a code change occurs. It is a successor to `OnCodeChange` with an additional reason parameter ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)). + +### New types + +- `CodeChangeReason` is a new type used to provide a reason for code changes. It includes various reasons such as contract creation, genesis initialization, EIP-7702 authorization, self-destruct, and revert operations ([#32525](https://github.com/ethereum/go-ethereum/pull/32525)). + +## [v1.15.4](https://github.com/ethereum/go-ethereum/releases/tag/v1.15.4) + +### Modified types + +- `GasChangeReason` has been extended with auto-generated String() methods for better debugging and logging ([#31234](https://github.com/ethereum/go-ethereum/pull/31234)). +- `NonceChangeReason` has been extended with auto-generated String() methods for better debugging and logging ([#31234](https://github.com/ethereum/go-ethereum/pull/31234)). + +## [v1.15.0](https://github.com/ethereum/go-ethereum/releases/tag/v1.15.0) + The tracing interface has been extended with backwards-compatible changes to support more use-cases and simplify tracer code. The most notable change is a state journaling library which emits reverse events when a call is reverted. ### Deprecated methods @@ -23,8 +44,13 @@ The tracing interface has been extended with backwards-compatible changes to sup ### Modified types -- `VMContext.StateDB` has been extended with `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash an account. +- `VMContext.StateDB` has been extended with the following method: + - `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash of an account. +- `BlockEvent` has been modified: + - The `TD` (Total Difficulty) field has been removed ([#30744](https://github.com/ethereum/go-ethereum/pull/30744)). - `BalanceChangeReason` has been extended with the `BalanceChangeRevert` reason. More on that below. +- `GasChangeReason` has been extended with the following reason: + - `GasChangeTxDataFloor` is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the transaction data. This change will always be a negative change. ### State journaling @@ -49,6 +75,26 @@ The state changes that are covered by the journaling library are: - `OnCodeChange` - `OnStorageChange` +## [v1.14.12](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.12) + +This release contains a change in behavior for `OnCodeChange` hook and an extension to the StateDB interface. + +### Modified types + +- `VMContext.StateDB` has been extended with the following method: + - `GetTransientState(addr common.Address, slot common.Hash) common.Hash` method used to access contract transient storage ([#30531](https://github.com/ethereum/go-ethereum/pull/30531)). + +### `OnCodeChange` change + +The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions. + +## [v1.14.10](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.10) + +### Modified types + +- `OpContext` has been extended with the following method: + - `ContractCode() []byte` provides access to the contract bytecode within the OpContext interface ([#30466](https://github.com/ethereum/go-ethereum/pull/30466)). + ## [v1.14.9](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9) ### Modified types @@ -56,13 +102,6 @@ The state changes that are covered by the journaling library are: - `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork. - `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision. -## [v1.14.12] - -This release contains a change in behavior for `OnCodeChange` hook. - -### `OnCodeChange` change - -The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions. ## [v1.14.4] @@ -148,7 +187,12 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale - `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract. - `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above. -[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.8...master -[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0 -[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3 +[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.16.3...master +[v1.15.4]: https://github.com/ethereum/go-ethereum/releases/tag/v1.15.4 +[v1.15.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.15.0 +[v1.14.12]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.12 +[v1.14.10]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.10 +[v1.14.9]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9 [v1.14.4]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.4 +[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3 +[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0 diff --git a/core/tracing/gen_code_change_reason_stringer.go b/core/tracing/gen_code_change_reason_stringer.go new file mode 100644 index 0000000000..9372954063 --- /dev/null +++ b/core/tracing/gen_code_change_reason_stringer.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go"; DO NOT EDIT. + +package tracing + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CodeChangeUnspecified-0] + _ = x[CodeChangeContractCreation-1] + _ = x[CodeChangeGenesis-2] + _ = x[CodeChangeAuthorization-3] + _ = x[CodeChangeAuthorizationClear-4] + _ = x[CodeChangeSelfDestruct-5] + _ = x[CodeChangeRevert-6] +} + +const _CodeChangeReason_name = "UnspecifiedContractCreationGenesisAuthorizationAuthorizationClearSelfDestructRevert" + +var _CodeChangeReason_index = [...]uint8{0, 11, 27, 34, 47, 65, 77, 83} + +func (i CodeChangeReason) String() string { + if i >= CodeChangeReason(len(_CodeChangeReason_index)-1) { + return "CodeChangeReason(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CodeChangeReason_name[_CodeChangeReason_index[i]:_CodeChangeReason_index[i+1]] +} diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 0485f7a3eb..8e50dc3d8f 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -177,6 +177,9 @@ type ( // CodeChangeHook is called when the code of an account changes. CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) + // CodeChangeHookV2 is called when the code of an account changes. + CodeChangeHookV2 = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason) + // StorageChangeHook is called when the storage of an account changes. StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash) @@ -211,6 +214,7 @@ type Hooks struct { OnNonceChange NonceChangeHook OnNonceChangeV2 NonceChangeHookV2 OnCodeChange CodeChangeHook + OnCodeChangeV2 CodeChangeHookV2 OnStorageChange StorageChangeHook OnLog LogHook // Block hash read @@ -372,3 +376,31 @@ const ( // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). NonceChangeRevert NonceChangeReason = 6 ) + +// CodeChangeReason is used to indicate the reason for a code change. +type CodeChangeReason byte + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go + +const ( + CodeChangeUnspecified CodeChangeReason = 0 + + // CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations. + CodeChangeContractCreation CodeChangeReason = 1 + + // CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup. + CodeChangeGenesis CodeChangeReason = 2 + + // CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization. + CodeChangeAuthorization CodeChangeReason = 3 + + // CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address. + CodeChangeAuthorizationClear CodeChangeReason = 4 + + // CodeChangeSelfDestruct is when contract code is cleared due to self-destruct. + CodeChangeSelfDestruct CodeChangeReason = 5 + + // CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure. + // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). + CodeChangeRevert CodeChangeReason = 6 +) diff --git a/core/tracing/journal.go b/core/tracing/journal.go index a402f1ac09..62a70d6c27 100644 --- a/core/tracing/journal.go +++ b/core/tracing/journal.go @@ -42,12 +42,15 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) { return nil, errors.New("wrapping nil tracer") } // No state change to journal, return the wrapped hooks as is - if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil { + if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnCodeChangeV2 == nil && hooks.OnStorageChange == nil { return hooks, nil } if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil { return nil, errors.New("cannot have both OnNonceChange and OnNonceChangeV2") } + if hooks.OnCodeChange != nil && hooks.OnCodeChangeV2 != nil { + return nil, errors.New("cannot have both OnCodeChange and OnCodeChangeV2") + } // Create a new Hooks instance and copy all hooks wrapped := *hooks @@ -72,6 +75,9 @@ func WrapWithJournal(hooks *Hooks) (*Hooks, error) { if hooks.OnCodeChange != nil { wrapped.OnCodeChange = j.OnCodeChange } + if hooks.OnCodeChangeV2 != nil { + wrapped.OnCodeChangeV2 = j.OnCodeChangeV2 + } if hooks.OnStorageChange != nil { wrapped.OnStorageChange = j.OnStorageChange } @@ -174,6 +180,19 @@ func (j *journal) OnCodeChange(addr common.Address, prevCodeHash common.Hash, pr } } +func (j *journal) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason) { + j.entries = append(j.entries, codeChange{ + addr: addr, + prevCodeHash: prevCodeHash, + prevCode: prevCode, + newCodeHash: codeHash, + newCode: code, + }) + if j.hooks.OnCodeChangeV2 != nil { + j.hooks.OnCodeChangeV2(addr, prevCodeHash, prevCode, codeHash, code, reason) + } +} + func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) { j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new}) if j.hooks.OnStorageChange != nil { @@ -225,7 +244,9 @@ func (n nonceChange) revert(hooks *Hooks) { } func (c codeChange) revert(hooks *Hooks) { - if hooks.OnCodeChange != nil { + if hooks.OnCodeChangeV2 != nil { + hooks.OnCodeChangeV2(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode, CodeChangeRevert) + } else if hooks.OnCodeChange != nil { hooks.OnCodeChange(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode) } } diff --git a/core/tracing/journal_test.go b/core/tracing/journal_test.go index 99447e1e1d..e00447f5f3 100644 --- a/core/tracing/journal_test.go +++ b/core/tracing/journal_test.go @@ -23,6 +23,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) type testTracer struct { @@ -56,6 +57,11 @@ func (t *testTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, t.code = code } +func (t *testTracer) OnCodeChangeV2(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason) { + t.t.Logf("OnCodeChangeV2(%v, %v -> %v, %v)", addr, prevCodeHash, codeHash, reason) + t.code = code +} + func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) { t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new) if t.storage == nil { @@ -232,6 +238,27 @@ func TestOnNonceChangeV2(t *testing.T) { } } +func TestOnCodeChangeV2(t *testing.T) { + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnCodeChangeV2: tr.OnCodeChangeV2}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + code := []byte{1, 2, 3} + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnCodeChangeV2(addr, common.Hash{}, nil, crypto.Keccak256Hash(code), code, CodeChangeContractCreation) + wr.OnExit(2, nil, 100, nil, true) + } + + // After revert, code should be nil + if tr.code != nil { + t.Fatalf("unexpected code after revert: %v", tr.code) + } +} + func TestAllHooksCalled(t *testing.T) { tracer := newTracerAllHooks() hooks := tracer.hooks() @@ -253,10 +280,6 @@ func TestAllHooksCalled(t *testing.T) { if field.Type.Kind() != reflect.Func { continue } - // Skip non-hooks, i.e. Copy - if field.Name == "copy" { - continue - } // Skip if field is not set if wrappedValue.Field(i).IsNil() { continue @@ -298,6 +321,7 @@ func newTracerAllHooks() *tracerAllHooks { t.hooksCalled[hooksType.Field(i).Name] = false } delete(t.hooksCalled, "OnNonceChange") + delete(t.hooksCalled, "OnCodeChange") return t } @@ -322,7 +346,7 @@ func (t *tracerAllHooks) hooks() *Hooks { hooksValue := reflect.ValueOf(h).Elem() for i := 0; i < hooksValue.NumField(); i++ { field := hooksValue.Type().Field(i) - if field.Name == "OnNonceChange" { + if field.Name == "OnNonceChange" || field.Name == "OnCodeChange" { continue } hookMethod := reflect.MakeFunc(field.Type, func(args []reflect.Value) []reflect.Value { diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 64ee3fcd9a..bfaf4d5b8e 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -21,12 +21,15 @@ import ( "container/heap" "errors" "fmt" + "maps" "math" "math/big" "os" "path/filepath" + "slices" "sort" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -55,11 +58,17 @@ const ( // tiny overflows causing all txs to move a shelf higher, wasting disk space. txAvgSize = 4 * 1024 - // txMaxSize is the maximum size a single transaction can have, outside - // the included blobs. Since blob transactions are pulled instead of pushed, - // and only a small metadata is kept in ram, the rest is on disk, there is - // no critical limit that should be enforced. Still, capping it to some sane - // limit can never hurt. + // txBlobOverhead is an approximation of the overhead that an additional blob + // has on transaction size. This is added to the slotter to avoid tiny + // overflows causing all txs to move a shelf higher, wasting disk space. A + // small buffer is added to the proof overhead. + txBlobOverhead = uint32(kzg4844.CellProofsPerBlob*len(kzg4844.Proof{}) + 64) + + // txMaxSize is the maximum size a single transaction can have, including the + // blobs. Since blob transactions are pulled instead of pushed, and only a + // small metadata is kept in ram, the rest is on disk, there is no critical + // limit that should be enforced. Still, capping it to some sane limit can + // never hurt, which is aligned with maxBlobsPerTx constraint enforced internally. txMaxSize = 1024 * 1024 // maxBlobsPerTx is the maximum number of blobs that a single transaction can @@ -83,6 +92,15 @@ const ( // limboedTransactionStore is the subfolder containing the currently included // but not yet finalized transaction blobs. limboedTransactionStore = "limbo" + + // storeVersion is the current slotter layout used for the billy.Database + // store. + storeVersion = 1 + + // conversionTimeWindow defines the period after the Osaka fork during which + // the pool will still accept and convert legacy blob transactions. After this + // window, all legacy blob transactions will be rejected. + conversionTimeWindow = time.Hour * 2 ) // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and @@ -92,6 +110,7 @@ const ( type blobTxMeta struct { hash common.Hash // Transaction hash to maintain the lookup table vhashes []common.Hash // Blob versioned hashes to maintain the lookup table + version byte // Blob transaction version to determine proof type id uint64 // Storage ID in the pool's persistent store storageSize uint32 // Byte size in the pool's persistent store @@ -115,10 +134,16 @@ type blobTxMeta struct { // newBlobTxMeta retrieves the indexed metadata fields from a blob transaction // and assembles a helper struct to track in memory. +// Requires the transaction to have a sidecar (or that we introduce a special version tag for no-sidecar). func newBlobTxMeta(id uint64, size uint64, storageSize uint32, tx *types.Transaction) *blobTxMeta { + if tx.BlobTxSidecar() == nil { + // This should never happen, as the pool only admits blob transactions with a sidecar + panic("missing blob tx sidecar") + } meta := &blobTxMeta{ hash: tx.Hash(), vhashes: tx.BlobHashes(), + version: tx.BlobTxSidecar().Version, id: id, storageSize: storageSize, size: size, @@ -312,12 +337,13 @@ type BlobPool struct { stored uint64 // Useful data size of all transactions on disk limbo *limbo // Persistent data store for the non-finalized blobs - signer types.Signer // Transaction signer to use for sender recovery - chain BlockChain // Chain object to access the state through + signer types.Signer // Transaction signer to use for sender recovery + chain BlockChain // Chain object to access the state through + cQueue *conversionQueue // The queue for performing legacy sidecar conversion (TODO: remove after Osaka) - head *types.Header // Current head of the chain - state *state.StateDB // Current state at the head of the chain - gasTip *uint256.Int // Currently accepted minimum gas tip + head atomic.Pointer[types.Header] // Current head of the chain + state *state.StateDB // Current state at the head of the chain + gasTip atomic.Pointer[uint256.Int] // Currently accepted minimum gas tip lookup *lookup // Lookup table mapping blobs to txs and txs to billy entries index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce @@ -342,6 +368,7 @@ func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bo hasPendingAuth: hasPendingAuth, signer: types.LatestSigner(chain.Config()), chain: chain, + cQueue: newConversionQueue(), // Deprecate it after the osaka fork lookup: newLookup(), index: make(map[common.Address][]*blobTxMeta), spent: make(map[common.Address]*uint256.Int), @@ -383,8 +410,17 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser if err != nil { return err } - p.head, p.state = head, state + p.head.Store(head) + p.state = state + // Create new slotter for pre-Osaka blob configuration. + slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + + // See if we need to migrate the queue blob store after fusaka + slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir) + if err != nil { + return err + } // Index all transactions on disk and delete anything unprocessable var fails []uint64 index := func(id uint64, size uint32, blob []byte) { @@ -392,7 +428,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser fails = append(fails, id) } } - slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index) if err != nil { return err @@ -416,17 +451,17 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser p.recheck(addr, nil) } var ( - basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head)) + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), head)) blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice) ) - if p.head.ExcessBlobGas != nil { - blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), p.head)) + if head.ExcessBlobGas != nil { + blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), head)) } p.evict = newPriceHeap(basefee, blobfee, p.index) // Pool initialized, attach the blob limbo to it to track blobs included // recently but not yet finalized - p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + p.limbo, err = newLimbo(p.chain.Config(), limbodir) if err != nil { p.Close() return err @@ -450,6 +485,9 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser // Close closes down the underlying persistent store. func (p *BlobPool) Close() error { + // Terminate the conversion queue + p.cQueue.close() + var errs []error if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set if err := p.limbo.Close(); err != nil { @@ -808,7 +846,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { log.Error("Failed to reset blobpool state", "err", err) return } - p.head = newHead + p.head.Store(newHead) p.state = statedb // Run the reorg between the old and new head and figure out which accounts @@ -831,7 +869,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { } } // Flush out any blobs from limbo that are older than the latest finality - if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { + if p.chain.Config().IsCancun(newHead.Number, newHead.Time) { p.limbo.finalize(p.chain.CurrentFinalBlock()) } // Reset the price heap for the new set of basefee/blobfee pairs @@ -847,6 +885,172 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { basefeeGauge.Update(int64(basefee.Uint64())) blobfeeGauge.Update(int64(blobfee.Uint64())) p.updateStorageMetrics() + + // Perform the conversion logic at the fork boundary + if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) { + // Deep copy all indexed transaction metadata. + var ( + ids = make(map[common.Address]map[uint64]uint64) + txs = make(map[common.Address]map[uint64]common.Hash) + ) + for sender, list := range p.index { + ids[sender] = make(map[uint64]uint64) + txs[sender] = make(map[uint64]common.Hash) + for _, m := range list { + ids[sender][m.nonce] = m.id + txs[sender][m.nonce] = m.hash + } + } + // Initiate the background conversion thread. + p.cQueue.launchBillyConversion(func() { + p.convertLegacySidecars(ids, txs) + }) + } +} + +// compareAndSwap checks if the specified transaction is still tracked in the pool +// and replace the metadata accordingly. It should only be used in the fork boundary +// bulk conversion. If it fails for some reason, the subsequent txs won't be dropped +// for simplicity which we assume it's very likely to happen. +// +// The returned flag indicates whether the replacement succeeded. +func (p *BlobPool) compareAndSwap(address common.Address, hash common.Hash, blob []byte, oldID uint64, oldStorageSize uint32) bool { + p.lock.Lock() + defer p.lock.Unlock() + + newId, err := p.store.Put(blob) + if err != nil { + log.Error("Failed to store transaction", "hash", hash, "err", err) + return false + } + newSize := uint64(len(blob)) + newStorageSize := p.store.Size(newId) + + // Terminate the procedure if the transaction was already evicted. The + // newly added blob should be removed before return. + if !p.lookup.update(hash, newId, newSize) { + if derr := p.store.Delete(newId); derr != nil { + log.Error("Failed to delete the dangling blob tx", "err", derr) + } else { + log.Warn("Deleted the dangling blob tx", "id", newId) + } + return false + } + // Update the metadata of blob transaction + for _, meta := range p.index[address] { + if meta.hash == hash { + meta.id = newId + meta.version = types.BlobSidecarVersion1 + meta.storageSize = newStorageSize + meta.size = newSize + + p.stored += uint64(newStorageSize) + p.stored -= uint64(oldStorageSize) + break + } + } + if err := p.store.Delete(oldID); err != nil { + log.Error("Failed to delete the legacy transaction", "hash", hash, "id", oldID, "err", err) + } + return true +} + +// convertLegacySidecar fetches transaction data from the store, performs an +// on-the-fly conversion. This function is intended for use only during the +// Osaka fork transition period. +// +// The returned flag indicates whether the replacement succeeds or not. +func (p *BlobPool) convertLegacySidecar(sender common.Address, hash common.Hash, id uint64) bool { + start := time.Now() + + // Retrieves the legacy blob transaction from the underlying store with + // read lock held, preventing any potential data race around the slot + // specified by the id. + p.lock.RLock() + data, err := p.store.Get(id) + if err != nil { + p.lock.RUnlock() + // The transaction may have been evicted simultaneously, safe to skip conversion. + log.Debug("Blob transaction is missing", "hash", hash, "id", id, "err", err) + return false + } + oldStorageSize := p.store.Size(id) + p.lock.RUnlock() + + // Decode the transaction, the failure is not expected and report the error + // loudly if possible. If the blob transaction in this slot is corrupted, + // leave it in the store, it will be dropped during the next pool + // initialization. + var tx types.Transaction + if err = rlp.DecodeBytes(data, &tx); err != nil { + log.Error("Blob transaction is corrupted", "hash", hash, "id", id, "err", err) + return false + } + + // Skip conversion if the transaction does not match the expected hash, or if it was + // already converted. This can occur if the original transaction was evicted from the + // pool and the slot was reused by a new one. + if tx.Hash() != hash { + log.Warn("Blob transaction was replaced", "hash", hash, "id", id, "stored", tx.Hash()) + return false + } + sc := tx.BlobTxSidecar() + if sc.Version >= types.BlobSidecarVersion1 { + log.Debug("Skipping conversion of blob tx", "hash", hash, "id", id) + return false + } + + // Perform the sidecar conversion, the failure is not expected and report the error + // loudly if possible. + if err := tx.BlobTxSidecar().ToV1(); err != nil { + log.Error("Failed to convert blob transaction", "hash", hash, "err", err) + return false + } + + // Encode the converted transaction, the failure is not expected and report + // the error loudly if possible. + blob, err := rlp.EncodeToBytes(&tx) + if err != nil { + log.Error("Failed to encode blob transaction", "hash", tx.Hash(), "err", err) + return false + } + + // Replace the legacy blob transaction with the converted format. + if !p.compareAndSwap(sender, hash, blob, id, oldStorageSize) { + log.Error("Failed to replace the legacy transaction", "hash", hash) + return false + } + log.Debug("Converted legacy blob transaction", "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) + return true +} + +// convertLegacySidecars converts all given transactions to sidecar version 1. +// +// If any of them fails to be converted, the subsequent transactions will still +// be processed, as we assume the failure is very unlikely to happen. If happens, +// these transactions will be stuck in the pool until eviction. +func (p *BlobPool) convertLegacySidecars(ids map[common.Address]map[uint64]uint64, txs map[common.Address]map[uint64]common.Hash) { + var ( + start = time.Now() + success int + failure int + ) + for addr, list := range txs { + // Transactions evicted from the pool must be contiguous, if in any case, + // the transactions are gapped with each other, they will be discarded. + nonces := slices.Collect(maps.Keys(list)) + slices.Sort(nonces) + + // Convert the txs with nonce order + for _, nonce := range nonces { + if p.convertLegacySidecar(addr, list[nonce], ids[addr][nonce]) { + success++ + } else { + failure++ + } + } + } + log.Info("Completed blob transaction conversion", "discarded", failure, "injected", success, "elapsed", common.PrettyDuration(time.Since(start))) } // reorg assembles all the transactors and missing transactions between an old @@ -994,6 +1198,21 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error { // TODO: seems like an easy optimization here would be getting the serialized tx // from limbo instead of re-serializing it here. + // Converts reorged-out legacy blob transactions to the new format to prevent + // them from becoming stuck in the pool until eviction. + // + // Performance note: Conversion takes ~140ms (Mac M1 Pro). Since a maximum of + // 9 legacy blob transactions are allowed in a block pre-Osaka, an adversary + // could theoretically halt a Geth node for ~1.2s by reorging per block. However, + // this attack is financially inefficient to execute. + head := p.head.Load() + if p.chain.Config().IsOsaka(head.Number, head.Time) && tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { + if err := tx.BlobTxSidecar().ToV1(); err != nil { + log.Error("Failed to convert the legacy sidecar", "err", err) + return err + } + log.Info("Legacy blob transaction is reorged", "hash", tx.Hash()) + } // Serialize the transaction back into the primary datastore. blob, err := rlp.EncodeToBytes(tx) if err != nil { @@ -1032,14 +1251,15 @@ func (p *BlobPool) SetGasTip(tip *big.Int) { defer p.lock.Unlock() // Store the new minimum gas tip - old := p.gasTip - p.gasTip = uint256.MustFromBig(tip) + old := p.gasTip.Load() + newTip := uint256.MustFromBig(tip) + p.gasTip.Store(newTip) // If the min miner fee increased, remove transactions below the new threshold - if old == nil || p.gasTip.Cmp(old) > 0 { + if old == nil || newTip.Cmp(old) > 0 { for addr, txs := range p.index { for i, tx := range txs { - if tx.execTipCap.Cmp(p.gasTip) < 0 { + if tx.execTipCap.Cmp(newTip) < 0 { // Drop the offending transaction var ( ids = []uint64{tx.id} @@ -1099,10 +1319,10 @@ func (p *BlobPool) ValidateTxBasics(tx *types.Transaction) error { Config: p.chain.Config(), Accept: 1 << types.BlobTxType, MaxSize: txMaxSize, - MinTip: p.gasTip.ToBig(), + MinTip: p.gasTip.Load().ToBig(), MaxBlobCount: maxBlobsPerTx, } - return txpool.ValidateTransaction(tx, p.head, p.signer, opts) + return txpool.ValidateTransaction(tx, p.head.Load(), p.signer, opts) } // checkDelegationLimit determines if the tx sender is delegated or has a @@ -1140,10 +1360,10 @@ func (p *BlobPool) checkDelegationLimit(tx *types.Transaction) error { // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). +// +// This function assumes the static validation has been performed already and +// only runs the stateful checks with lock protection. func (p *BlobPool) validateTx(tx *types.Transaction) error { - if err := p.ValidateTxBasics(tx); err != nil { - return err - } // Ensure the transaction adheres to the stateful pool filters (nonce, balance) stateOpts := &txpool.ValidationOptionsWithState{ State: p.state, @@ -1298,9 +1518,20 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata { } // GetBlobs returns a number of blobs and proofs for the given versioned hashes. +// Blobpool must place responses in the order given in the request, using null +// for any missing blobs. +// +// For instance, if the request is [A_versioned_hash, B_versioned_hash, +// C_versioned_hash] and blobpool has data for blobs A and C, but doesn't have +// data for B, the response MUST be [A, null, C]. +// // This is a utility method for the engine API, enabling consensus clients to // retrieve blobs from the pools directly instead of the network. -func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) { +// +// The version argument specifies the type of proofs to return, either the +// blob proofs (version 0) or the cell proofs (version 1). Proofs conversion is +// CPU intensive, so only done if explicitly requested with the convert flag. +func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte, convert bool) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) { var ( blobs = make([]*kzg4844.Blob, len(vhashes)) commitments = make([]kzg4844.Commitment, len(vhashes)) @@ -1312,31 +1543,36 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo for i, h := range vhashes { indices[h] = append(indices[h], i) } + for _, vhash := range vhashes { - // Skip duplicate vhash that was already resolved in a previous iteration if _, ok := filled[vhash]; ok { + // Skip vhash that was already resolved in a previous iteration continue } - // Retrieve the corresponding blob tx with the vhash + + // Retrieve the corresponding blob tx with the vhash. p.lock.RLock() txID, exists := p.lookup.storeidOfBlob(vhash) p.lock.RUnlock() if !exists { - return nil, nil, nil, fmt.Errorf("blob with vhash %x is not found", vhash) + continue } data, err := p.store.Get(txID) if err != nil { - return nil, nil, nil, err + log.Error("Tracked blob transaction missing from store", "id", txID, "err", err) + continue } // Decode the blob transaction tx := new(types.Transaction) if err := rlp.DecodeBytes(data, tx); err != nil { - return nil, nil, nil, err + log.Error("Blobs corrupted for traced transaction", "id", txID, "err", err) + continue } sidecar := tx.BlobTxSidecar() if sidecar == nil { - return nil, nil, nil, fmt.Errorf("blob tx without sidecar %x", tx.Hash()) + log.Error("Blob tx without sidecar", "hash", tx.Hash(), "id", txID) + continue } // Traverse the blobs in the transaction for i, hash := range tx.BlobHashes() { @@ -1344,6 +1580,14 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo if !ok { continue // non-interesting blob } + // Mark hash as seen. + filled[hash] = struct{}{} + if sidecar.Version != version && !convert { + // Skip blobs with incompatible version. Note we still track the blob hash + // in `filled` here, ensuring that we do not resolve this tx another time. + continue + } + // Get or convert the proof. var pf []kzg4844.Proof switch version { case types.BlobSidecarVersion0: @@ -1376,7 +1620,6 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blo commitments[index] = sidecar.Commitments[i] proofs[index] = pf } - filled[hash] = struct{}{} } } return blobs, commitments, proofs, nil @@ -1397,48 +1640,59 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int { return available } -// convertSidecar converts the legacy sidecar in the submitted transactions -// if Osaka fork has been activated. -func (p *BlobPool) convertSidecar(txs []*types.Transaction) ([]*types.Transaction, []error) { - head := p.chain.CurrentBlock() - if !p.chain.Config().IsOsaka(head.Number, head.Time) { - return txs, make([]error, len(txs)) +// preCheck performs the static validation upon the provided tx and converts +// the legacy sidecars if Osaka fork has been activated with a short time window. +// +// This function is pure static and lock free. +func (p *BlobPool) preCheck(tx *types.Transaction) error { + var ( + head = p.head.Load() + isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time) + deadline time.Time + ) + if isOsaka { + deadline = time.Unix(int64(*p.chain.Config().OsakaTime), 0).Add(conversionTimeWindow) } - var errs []error - for _, tx := range txs { - sidecar := tx.BlobTxSidecar() - if sidecar == nil { - errs = append(errs, errors.New("missing sidecar in blob transaction")) - continue - } - if sidecar.Version == types.BlobSidecarVersion0 { - if err := sidecar.ToV1(); err != nil { - errs = append(errs, err) - continue - } - } - errs = append(errs, nil) + // Validate the transaction statically at first to avoid unnecessary + // conversion. This step doesn't require lock protection. + if err := p.ValidateTxBasics(tx); err != nil { + return err } - return txs, errs + // Before the Osaka fork, reject the blob txs with cell proofs + if !isOsaka { + if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { + return nil + } else { + return errors.New("cell proof is not supported yet") + } + } + // After the Osaka fork, reject the legacy blob txs if the conversion + // time window is passed. + if tx.BlobTxSidecar().Version == types.BlobSidecarVersion1 { + return nil + } + if head.Time > uint64(deadline.Unix()) { + return errors.New("legacy blob tx is not supported") + } + // Convert the legacy sidecar after Osaka fork. This could be a long + // procedure which takes a few seconds, even minutes if there is a long + // queue. Fortunately it will only block the routine of the source peer + // announcing the tx, without affecting other parts. + return p.cQueue.convert(tx) } // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restrictions). -// -// Note, if sync is set the method will block until all internal maintenance -// related to the add is finished. Only use this during tests for determinism. func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error { var ( - errs []error - adds = make([]*types.Transaction, 0, len(txs)) + errs []error = make([]error, len(txs)) + adds = make([]*types.Transaction, 0, len(txs)) ) - txs, errs = p.convertSidecar(txs) for i, tx := range txs { - if errs[i] != nil { + if errs[i] = p.preCheck(tx); errs[i] != nil { continue } - errs[i] = p.add(tx) - if errs[i] == nil { + if errs[i] = p.add(tx); errs[i] == nil { adds = append(adds, tx.WithoutBlobTxSidecar()) } } @@ -1678,7 +1932,7 @@ func (p *BlobPool) drop() { func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { // If only plain transactions are requested, this pool is unsuitable as it // contains none, don't even bother. - if filter.OnlyPlainTxs { + if !filter.BlobTxs { return nil } // Track the amount of time waiting to retrieve the list of pending blob txs @@ -1699,6 +1953,11 @@ func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*tx for addr, txs := range p.index { lazies := make([]*txpool.LazyTransaction, 0, len(txs)) for _, tx := range txs { + // Skip v0 or v1 blob transactions depending on the filter + if tx.version != filter.BlobVersion { + break // skip the rest because of nonce ordering + } + // If transaction filtering was requested, discard badly priced ones if filter.MinTip != nil && filter.BaseFee != nil { if tx.execFeeCap.Lt(filter.BaseFee) { @@ -1926,7 +2185,7 @@ func (p *BlobPool) Clear() { p.spent = make(map[common.Address]*uint256.Int) var ( - basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head)) + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head.Load())) blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice) ) p.evict = newPriceHeap(basefee, blobfee, p.index) diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 51ab27eb01..f0f00c8055 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "math/big" + "math/rand" "os" "path/filepath" "reflect" @@ -41,6 +42,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/internal/testrand" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/billy" @@ -86,6 +88,12 @@ type testBlockChain struct { statedb *state.StateDB blocks map[uint64]*types.Block + + blockTime *uint64 +} + +func (bc *testBlockChain) setHeadTime(time uint64) { + bc.blockTime = &time } func (bc *testBlockChain) Config() *params.ChainConfig { @@ -103,6 +111,10 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { blockTime = *bc.config.CancunTime + 1 gasLimit = uint64(30_000_000) ) + if bc.blockTime != nil { + blockTime = *bc.blockTime + } + lo := new(big.Int) hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)) @@ -262,8 +274,8 @@ func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rnd.Intn(len(testBlobs))) } -// makeUnsignedTx is a utility method to construct a random blob transaction -// without signing it. +// makeUnsignedTxWithTestBlob is a utility method to construct a random blob transaction +// with a specific test blob without signing it. func makeUnsignedTxWithTestBlob(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobIdx int) *types.BlobTx { return &types.BlobTx{ ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), @@ -421,11 +433,11 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) { hashes = append(hashes, tx.vhashes...) } } - blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0) + blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0, false) if err != nil { t.Fatal(err) } - blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1) + blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1, false) if err != nil { t.Fatal(err) } @@ -439,22 +451,18 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) { return } for i, hash := range hashes { - // If an item is missing, but shouldn't, error - if blobs1[i] == nil || proofs1[i] == nil { - t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash) - continue - } - if blobs2[i] == nil || proofs2[i] == nil { + // If an item is missing from both, but shouldn't, error + if (blobs1[i] == nil || proofs1[i] == nil) && (blobs2[i] == nil || proofs2[i] == nil) { t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash) continue } // Item retrieved, make sure it matches the expectation index := testBlobIndices[hash] - if *blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index] { + if blobs1[i] != nil && (*blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index]) { t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash) continue } - if *blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index]) { + if blobs2[i] != nil && (*blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index])) { t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash) continue } @@ -1163,6 +1171,115 @@ func TestChangingSlotterSize(t *testing.T) { } } +// TestBillyMigration tests the billy migration from the default slotter to +// the PeerDAS slotter. This tests both the migration of the slotter +// as well as increasing the slotter size of the new slotter. +func TestBillyMigration(t *testing.T) { + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + + // Create a temporary folder for the persistent backend + storage := t.TempDir() + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + os.MkdirAll(filepath.Join(storage, limboedTransactionStore), 0700) + // Create the billy with the old slotter + oldSlotter := newSlotterEIP7594(6) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, oldSlotter, nil) + + // Create transactions from a few accounts. + var ( + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + + tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) + tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0) + tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0) + + blob1, _ = rlp.EncodeToBytes(tx1) + blob2, _ = rlp.EncodeToBytes(tx2) + ) + + // Write the two safely sized txs to store. note: although the store is + // configured for a blob count of 6, it can also support around ~1mb of call + // data - all this to say that we aren't using the the absolute largest shelf + // available. + store.Put(blob1) + store.Put(blob2) + store.Close() + + // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24. + for _, maxBlobs := range []int{6, 24} { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.Commit(0, true, false) + + // Make custom chain config where the max blob count changes based on the loop variable. + zero := uint64(0) + config := ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + LondonBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + CancunTime: &zero, + OsakaTime: &zero, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: ¶ms.BlobConfig{ + Target: maxBlobs / 2, + Max: maxBlobs, + UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction, + }, + Osaka: ¶ms.BlobConfig{ + Target: maxBlobs / 2, + Max: maxBlobs, + UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction, + }, + }, + } + chain := &testBlockChain{ + config: config, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain, nil) + if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + + // Try to add the big blob tx. In the initial iteration it should overflow + // the pool. On the subsequent iteration it should be accepted. + errs := pool.Add([]*types.Transaction{tx3}, true) + if _, ok := pool.index[addr3]; ok && maxBlobs == 6 { + t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } else if !ok && maxBlobs == 10 { + t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } + + // Verify the regular two txs are always available. + if got := pool.Get(tx1.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1) + } + if got := pool.Get(tx2.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2) + } + + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) + + pool.Close() + } +} + // TestBlobCountLimit tests the blobpool enforced limits on the max blob count. func TestBlobCountLimit(t *testing.T) { var ( @@ -1641,8 +1758,8 @@ func TestAdd(t *testing.T) { // Add each transaction one by one, verifying the pool internals in between for j, add := range tt.adds { signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(params.MainnetChainConfig), add.tx) - if err := pool.add(signed); !errors.Is(err, add.err) { - t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err) + if errs := pool.Add([]*types.Transaction{signed}, true); !errors.Is(errs[0], add.err) { + t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, errs[0], add.err) } if add.err == nil { size, exist := pool.lookup.sizeOfTx(signed.Hash()) @@ -1689,9 +1806,14 @@ func TestAdd(t *testing.T) { } } -// Tests that adding the transactions with legacy sidecar and expect them to -// be converted to new format correctly. +// Tests that transactions with legacy sidecars are accepted within the +// conversion window but rejected after it has passed. func TestAddLegacyBlobTx(t *testing.T) { + testAddLegacyBlobTx(t, true) // conversion window has not yet passed + testAddLegacyBlobTx(t, false) // conversion window passed +} + +func testAddLegacyBlobTx(t *testing.T, accept bool) { var ( key1, _ = crypto.GenerateKey() key2, _ = crypto.GenerateKey() @@ -1711,6 +1833,15 @@ func TestAddLegacyBlobTx(t *testing.T) { blobfee: uint256.NewInt(105), statedb: statedb, } + var timeDiff uint64 + if accept { + timeDiff = uint64(conversionTimeWindow.Seconds()) - 1 + } else { + timeDiff = uint64(conversionTimeWindow.Seconds()) + 1 + } + time := *params.MergedTestChainConfig.OsakaTime + timeDiff + chain.setHeadTime(time) + pool := New(Config{Datadir: t.TempDir()}, chain, nil) if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil { t.Fatalf("failed to create blob pool: %v", err) @@ -1720,12 +1851,15 @@ func TestAddLegacyBlobTx(t *testing.T) { var ( tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0) - tx3 = makeMultiBlobTx(1, 1, 800, 70, 6, 12, key2, types.BlobSidecarVersion1) + txs = []*types.Transaction{tx1, tx2} ) - errs := pool.Add([]*types.Transaction{tx1, tx2, tx3}, true) + errs := pool.Add(txs, true) for _, err := range errs { - if err != nil { - t.Fatalf("failed to add tx: %v", err) + if accept && err != nil { + t.Fatalf("expected tx add to succeed, %v", err) + } + if !accept && err == nil { + t.Fatal("expected tx add to fail") } } verifyPoolInternals(t, pool) @@ -1814,10 +1948,11 @@ func TestGetBlobs(t *testing.T) { } cases := []struct { - start int - limit int - version byte - expErr bool + start int + limit int + fillRandom bool // Whether to randomly fill some of the requested blobs with unknowns + version byte // Blob sidecar version to request + convert bool // Whether to convert version on retrieval }{ { start: 0, limit: 6, @@ -1827,6 +1962,14 @@ func TestGetBlobs(t *testing.T) { start: 0, limit: 6, version: types.BlobSidecarVersion1, }, + { + start: 0, limit: 6, fillRandom: true, + version: types.BlobSidecarVersion0, + }, + { + start: 0, limit: 6, fillRandom: true, + version: types.BlobSidecarVersion1, + }, { start: 3, limit: 9, version: types.BlobSidecarVersion0, @@ -1835,6 +1978,14 @@ func TestGetBlobs(t *testing.T) { start: 3, limit: 9, version: types.BlobSidecarVersion1, }, + { + start: 3, limit: 9, fillRandom: true, + version: types.BlobSidecarVersion0, + }, + { + start: 3, limit: 9, fillRandom: true, + version: types.BlobSidecarVersion1, + }, { start: 3, limit: 15, version: types.BlobSidecarVersion0, @@ -1843,6 +1994,14 @@ func TestGetBlobs(t *testing.T) { start: 3, limit: 15, version: types.BlobSidecarVersion1, }, + { + start: 3, limit: 15, fillRandom: true, + version: types.BlobSidecarVersion0, + }, + { + start: 3, limit: 15, fillRandom: true, + version: types.BlobSidecarVersion1, + }, { start: 0, limit: 18, version: types.BlobSidecarVersion0, @@ -1852,57 +2011,268 @@ func TestGetBlobs(t *testing.T) { version: types.BlobSidecarVersion1, }, { - start: 18, limit: 20, + start: 0, limit: 18, fillRandom: true, version: types.BlobSidecarVersion0, - expErr: true, + }, + { + start: 0, limit: 18, fillRandom: true, + version: types.BlobSidecarVersion1, + }, + { + start: 0, limit: 18, fillRandom: true, + version: types.BlobSidecarVersion1, + convert: true, // Convert some version 0 blobs to version 1 while retrieving }, } for i, c := range cases { - var vhashes []common.Hash + var ( + vhashes []common.Hash + filled = make(map[int]struct{}) + ) + if c.fillRandom { + filled[len(vhashes)] = struct{}{} + vhashes = append(vhashes, testrand.Hash()) + } for j := c.start; j < c.limit; j++ { vhashes = append(vhashes, testBlobVHashes[j]) + if c.fillRandom && rand.Intn(2) == 0 { + filled[len(vhashes)] = struct{}{} + vhashes = append(vhashes, testrand.Hash()) + } + } + if c.fillRandom { + filled[len(vhashes)] = struct{}{} + vhashes = append(vhashes, testrand.Hash()) + } + blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version, c.convert) + if err != nil { + t.Errorf("Unexpected error for case %d, %v", i, err) } - blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version) - if c.expErr { - if err == nil { - t.Errorf("Unexpected return, want error for case %d", i) - } - } else { - if err != nil { - t.Errorf("Unexpected error for case %d, %v", i, err) - } - // Cross validate what we received vs what we wanted - length := c.limit - c.start - if len(blobs) != length || len(proofs) != length { - t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), length) + // Cross validate what we received vs what we wanted + length := c.limit - c.start + wantLen := length + len(filled) + if len(blobs) != wantLen || len(proofs) != wantLen { + t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), wantLen) + continue + } + + var unknown int + for j := 0; j < len(blobs); j++ { + testBlobIndex := c.start + j - unknown + if _, exist := filled[j]; exist { + if blobs[j] != nil || proofs[j] != nil { + t.Errorf("Unexpected blob and proof, item %d", j) + } + unknown++ continue } - for j := 0; j < len(blobs); j++ { - // If an item is missing, but shouldn't, error - if blobs[j] == nil || proofs[j] == nil { + // If an item is missing, but shouldn't, error + if blobs[j] == nil || proofs[j] == nil { + // This is only an error if there was no version mismatch + if c.convert || + (c.version == types.BlobSidecarVersion1 && 6 <= testBlobIndex && testBlobIndex < 12) || + (c.version == types.BlobSidecarVersion0 && (testBlobIndex < 6 || 12 <= testBlobIndex)) { t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j]) - continue } - // Item retrieved, make sure the blob matches the expectation - if *blobs[j] != *testBlobs[c.start+j] { - t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j]) - continue + continue + } + // Item retrieved, make sure the blob matches the expectation + if *blobs[j] != *testBlobs[testBlobIndex] { + t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j]) + continue + } + // Item retrieved, make sure the proof matches the expectation + if c.version == types.BlobSidecarVersion0 { + if proofs[j][0] != testBlobProofs[testBlobIndex] { + t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j]) } - // Item retrieved, make sure the proof matches the expectation - if c.version == types.BlobSidecarVersion0 { - if proofs[j][0] != testBlobProofs[c.start+j] { - t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j]) - } - } else { - want, _ := kzg4844.ComputeCellProofs(blobs[j]) - if !reflect.DeepEqual(want, proofs[j]) { - t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j]) - } + } else { + want, _ := kzg4844.ComputeCellProofs(blobs[j]) + if !reflect.DeepEqual(want, proofs[j]) { + t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j]) } } } } + pool.Close() +} + +// TestSidecarConversion will verify that after the Osaka fork, all legacy +// sidecars in the pool are successfully convert to v1 sidecars. +func TestSidecarConversion(t *testing.T) { + // log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + + // Create a temporary folder for the persistent backend + storage := t.TempDir() + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + + var ( + preOsakaTxs = make(types.Transactions, 10) + postOsakaTxs = make(types.Transactions, 3) + keys = make([]*ecdsa.PrivateKey, len(preOsakaTxs)+len(postOsakaTxs)) + addrs = make([]common.Address, len(preOsakaTxs)+len(postOsakaTxs)) + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + ) + for i := range keys { + keys[i], _ = crypto.GenerateKey() + addrs[i] = crypto.PubkeyToAddress(keys[i].PublicKey) + statedb.AddBalance(addrs[i], uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + } + for i := range preOsakaTxs { + preOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 2, 0, keys[i], types.BlobSidecarVersion0) + } + for i := range postOsakaTxs { + if i == 0 { + // First has a v0 sidecar. + postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion0) + } + postOsakaTxs[i] = makeMultiBlobTx(0, 1, 1000, 100, 1, 0, keys[len(preOsakaTxs)+i], types.BlobSidecarVersion1) + } + statedb.Commit(0, true, false) + + // Test plan: + // 1) Create a bunch v0 sidecar txs and add to pool before Osaka. + // 2) Pass in new Osaka header to activate the conversion thread. + // 3) Continue adding both v0 and v1 transactions to the pool. + // 4) Verify that as additional blocks come in, transactions involved in the + // migration are correctly discarded. + + config := ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + LondonBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + CancunTime: newUint64(0), + PragueTime: newUint64(0), + OsakaTime: newUint64(1), + BlobScheduleConfig: params.DefaultBlobSchedule, + } + chain := &testBlockChain{ + config: config, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + blocks: make(map[uint64]*types.Block), + } + + // Create 3 blocks: + // - the current block, before Osaka + // - the first block after Osaka + // - another post-Osaka block with several transactions in it + header0 := chain.CurrentBlock() + header0.Time = 0 + chain.blocks[0] = types.NewBlockWithHeader(header0) + + header1 := chain.CurrentBlock() + header1.Number = big.NewInt(1) + header1.Time = 1 + chain.blocks[1] = types.NewBlockWithHeader(header1) + + header2 := chain.CurrentBlock() + header2.Time = 2 + header2.Number = big.NewInt(2) + + // Make a copy of one of the pre-Osaka transactions and convert it to v1 here + // so that we can add it to the pool later and ensure a duplicate is not added + // by the conversion queue. + tx := preOsakaTxs[len(preOsakaTxs)-1] + sc := *tx.BlobTxSidecar() // copy sidecar + sc.ToV1() + tx.WithBlobTxSidecar(&sc) + + block2 := types.NewBlockWithHeader(header2).WithBody(types.Body{Transactions: append(postOsakaTxs, tx)}) + chain.blocks[2] = block2 + + pool := New(Config{Datadir: storage}, chain, nil) + if err := pool.Init(1, header0, newReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + + errs := pool.Add(preOsakaTxs, true) + for i, err := range errs { + if err != nil { + t.Errorf("failed to insert blob tx from %s: %s", addrs[i], errs[i]) + } + } + + // Kick off migration. + pool.Reset(header0, header1) + + // Add the v0 sidecar tx, but don't block so we can keep doing other stuff + // while it converts the sidecar. + addDone := make(chan struct{}) + go func() { + pool.Add(types.Transactions{postOsakaTxs[0]}, false) + close(addDone) + }() + + // Add the post-Osaka v1 sidecar txs. + errs = pool.Add(postOsakaTxs[1:], false) + for _, err := range errs { + if err != nil { + t.Fatalf("expected tx add to succeed: %v", err) + } + } + + // Wait for the first tx's conversion to complete, then check that all + // transactions added after Osaka can be accounted for in the pool. + <-addDone + pending := pool.Pending(txpool.PendingFilter{BlobTxs: true, BlobVersion: types.BlobSidecarVersion1}) + for _, tx := range postOsakaTxs { + from, _ := pool.signer.Sender(tx) + if len(pending[from]) != 1 || pending[from][0].Hash != tx.Hash() { + t.Fatalf("expected post-Osaka txs to be pending") + } + } + + // Now update the pool with the next block. This should cause the pool to + // clear out the post-Osaka txs since they were included in block 2. Since the + // test blockchain doesn't manage nonces, we'll just do that manually before + // the reset is called. Don't forget about the pre-Osaka transaction we also + // added to block 2! + for i := range postOsakaTxs { + statedb.SetNonce(addrs[len(preOsakaTxs)+i], 1, tracing.NonceChangeEoACall) + } + statedb.SetNonce(addrs[len(preOsakaTxs)-1], 1, tracing.NonceChangeEoACall) + pool.Reset(header1, block2.Header()) + + // Now verify no post-Osaka transactions are tracked by the pool. + for i, tx := range postOsakaTxs { + if pool.Get(tx.Hash()) != nil { + t.Fatalf("expected txs added post-osaka to have been placed in limbo due to inclusion in a block: index %d, hash %s", i, tx.Hash()) + } + } + + // Wait for the pool migration to complete. + <-pool.cQueue.anyBillyConversionDone + + // Verify all transactions in the pool were converted and verify the + // subsequent cell proofs. + count, _ := pool.Stats() + if count != len(preOsakaTxs)-1 { + t.Errorf("expected pending count to match initial tx count: pending=%d, expected=%d", count, len(preOsakaTxs)-1) + } + for addr, acc := range pool.index { + for _, m := range acc { + if m.version != types.BlobSidecarVersion1 { + t.Errorf("expected sidecar to have been converted: from %s, hash %s", addr, m.hash) + } + tx := pool.Get(m.hash) + if tx == nil { + t.Errorf("failed to get tx by hash: %s", m.hash) + } + sc := tx.BlobTxSidecar() + if err := kzg4844.VerifyCellProofs(sc.Blobs, sc.Commitments, sc.Proofs); err != nil { + t.Errorf("failed to verify cell proofs for tx %s after conversion: %s", m.hash, err) + } + } + } + + verifyPoolInternals(t, pool) + + // Launch conversion a second time. + // This is just a sanity check to ensure we can handle it. + pool.Reset(header0, header1) pool.Close() } @@ -1989,3 +2359,5 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) { } } } + +func newUint64(val uint64) *uint64 { return &val } diff --git a/core/txpool/blobpool/conversion.go b/core/txpool/blobpool/conversion.go new file mode 100644 index 0000000000..95828d83b2 --- /dev/null +++ b/core/txpool/blobpool/conversion.go @@ -0,0 +1,203 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "errors" + "slices" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// maxPendingConversionTasks caps the number of pending conversion tasks. This +// prevents excessive memory usage; the worst-case scenario (2k transactions +// with 6 blobs each) would consume approximately 1.5GB of memory. +const maxPendingConversionTasks = 2048 + +// txConvert represents a conversion task with an attached legacy blob transaction. +type txConvert struct { + tx *types.Transaction // Legacy blob transaction + done chan error // Channel for signaling back if the conversion succeeds +} + +// conversionQueue is a dedicated queue for converting legacy blob transactions +// received from the network after the Osaka fork. Since conversion is expensive, +// it is performed in the background by a single thread, ensuring the main Geth +// process is not overloaded. +type conversionQueue struct { + tasks chan *txConvert + startBilly chan func() + quit chan struct{} + closed chan struct{} + + billyQueue []func() + billyTaskDone chan struct{} + + // This channel will be closed when the first billy conversion finishes. + // It's added for unit tests to synchronize with the conversion progress. + anyBillyConversionDone chan struct{} +} + +// newConversionQueue constructs the conversion queue. +func newConversionQueue() *conversionQueue { + q := &conversionQueue{ + tasks: make(chan *txConvert), + startBilly: make(chan func()), + quit: make(chan struct{}), + closed: make(chan struct{}), + anyBillyConversionDone: make(chan struct{}), + } + go q.loop() + return q +} + +// convert accepts a legacy blob transaction with version-0 blobs and queues it +// for conversion. +// +// This function may block for a long time until the transaction is processed. +func (q *conversionQueue) convert(tx *types.Transaction) error { + done := make(chan error, 1) + select { + case q.tasks <- &txConvert{tx: tx, done: done}: + return <-done + case <-q.closed: + return errors.New("conversion queue closed") + } +} + +// launchBillyConversion starts a conversion task in the background. +func (q *conversionQueue) launchBillyConversion(fn func()) error { + select { + case q.startBilly <- fn: + return nil + case <-q.closed: + return errors.New("conversion queue closed") + } +} + +// close terminates the conversion queue. +func (q *conversionQueue) close() { + select { + case <-q.closed: + return + default: + close(q.quit) + <-q.closed + } +} + +// run converts a batch of legacy blob txs to the new cell proof format. +func (q *conversionQueue) run(tasks []*txConvert, done chan struct{}, interrupt *atomic.Int32) { + defer close(done) + + for _, t := range tasks { + if interrupt != nil && interrupt.Load() != 0 { + t.done <- errors.New("conversion is interrupted") + continue + } + sidecar := t.tx.BlobTxSidecar() + if sidecar == nil { + t.done <- errors.New("tx without sidecar") + continue + } + // Run the conversion, the original sidecar will be mutated in place + start := time.Now() + err := sidecar.ToV1() + t.done <- err + log.Trace("Converted legacy blob tx", "hash", t.tx.Hash(), "err", err, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +func (q *conversionQueue) loop() { + defer close(q.closed) + + var ( + done chan struct{} // Non-nil if background routine is active + interrupt *atomic.Int32 // Flag to signal conversion interruption + + // The pending tasks for sidecar conversion. We assume the number of legacy + // blob transactions requiring conversion will not be excessive. However, + // a hard cap is applied as a protective measure. + txTasks []*txConvert + + firstBilly = true + ) + + for { + select { + case t := <-q.tasks: + if len(txTasks) >= maxPendingConversionTasks { + t.done <- errors.New("conversion queue is overloaded") + continue + } + txTasks = append(txTasks, t) + + // Launch the background conversion thread if it's idle + if done == nil { + done, interrupt = make(chan struct{}), new(atomic.Int32) + + tasks := slices.Clone(txTasks) + txTasks = txTasks[:0] + go q.run(tasks, done, interrupt) + } + + case <-done: + done, interrupt = nil, nil + + case fn := <-q.startBilly: + q.billyQueue = append(q.billyQueue, fn) + q.runNextBillyTask() + + case <-q.billyTaskDone: + if firstBilly { + close(q.anyBillyConversionDone) + firstBilly = false + } + q.runNextBillyTask() + + case <-q.quit: + if done != nil { + log.Debug("Waiting for blob proof conversion to exit") + interrupt.Store(1) + <-done + } + if q.billyTaskDone != nil { + log.Debug("Waiting for blobpool billy conversion to exit") + <-q.billyTaskDone + } + return + } + } +} + +func (q *conversionQueue) runNextBillyTask() { + if len(q.billyQueue) == 0 { + q.billyTaskDone = nil + return + } + + fn := q.billyQueue[0] + q.billyQueue = append(q.billyQueue[:0], q.billyQueue[1:]...) + + done := make(chan struct{}) + go func() { defer close(done); fn() }() + q.billyTaskDone = done +} diff --git a/core/txpool/blobpool/conversion_test.go b/core/txpool/blobpool/conversion_test.go new file mode 100644 index 0000000000..a9fd26dbaf --- /dev/null +++ b/core/txpool/blobpool/conversion_test.go @@ -0,0 +1,101 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "crypto/ecdsa" + "crypto/sha256" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +// createV1BlobTx creates a blob transaction with version 1 sidecar for testing. +func createV1BlobTx(nonce uint64, key *ecdsa.PrivateKey) *types.Transaction { + blob := &kzg4844.Blob{byte(nonce)} + commitment, _ := kzg4844.BlobToCommitment(blob) + cellProofs, _ := kzg4844.ComputeCellProofs(blob) + + blobtx := &types.BlobTx{ + ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), + Nonce: nonce, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1000), + Gas: 21000, + BlobFeeCap: uint256.NewInt(100), + BlobHashes: []common.Hash{kzg4844.CalcBlobHashV1(sha256.New(), &commitment)}, + Value: uint256.NewInt(100), + Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion1, []kzg4844.Blob{*blob}, []kzg4844.Commitment{commitment}, cellProofs), + } + return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) +} + +func TestConversionQueueBasic(t *testing.T) { + queue := newConversionQueue() + defer queue.close() + + key, _ := crypto.GenerateKey() + tx := makeTx(0, 1, 1, 1, key) + if err := queue.convert(tx); err != nil { + t.Fatalf("Expected successful conversion, got error: %v", err) + } + if tx.BlobTxSidecar().Version != types.BlobSidecarVersion1 { + t.Errorf("Expected sidecar version to be %d, got %d", types.BlobSidecarVersion1, tx.BlobTxSidecar().Version) + } +} + +func TestConversionQueueV1BlobTx(t *testing.T) { + queue := newConversionQueue() + defer queue.close() + + key, _ := crypto.GenerateKey() + tx := createV1BlobTx(0, key) + version := tx.BlobTxSidecar().Version + + err := queue.convert(tx) + if err != nil { + t.Fatalf("Expected successful conversion, got error: %v", err) + } + if tx.BlobTxSidecar().Version != version { + t.Errorf("Expected sidecar version to remain %d, got %d", version, tx.BlobTxSidecar().Version) + } +} + +func TestConversionQueueClosed(t *testing.T) { + queue := newConversionQueue() + + // Close the queue first + queue.close() + key, _ := crypto.GenerateKey() + tx := makeTx(0, 1, 1, 1, key) + + err := queue.convert(tx) + if err == nil { + t.Fatal("Expected error when converting on closed queue, got nil") + } +} + +func TestConversionQueueDoubleClose(t *testing.T) { + queue := newConversionQueue() + queue.close() + queue.close() // Should not panic +} diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 99d1b4ad6b..50c40c9d83 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -20,8 +20,10 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/billy" ) @@ -48,11 +50,21 @@ type limbo struct { } // newLimbo opens and indexes a set of limboed blob transactions. -func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) { +func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) { l := &limbo{ index: make(map[common.Hash]uint64), groups: make(map[uint64]map[uint64]common.Hash), } + + // Create new slotter for pre-Osaka blob configuration. + slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config)) + + // See if we need to migrate the limbo after fusaka. + slotter, err := tryMigrate(config, slotter, datadir) + if err != nil { + return nil, err + } + // Index all limboed blobs on disk and delete anything unprocessable var fails []uint64 index := func(id uint64, size uint32, data []byte) { @@ -60,7 +72,7 @@ func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) { fails = append(fails, id) } } - store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index) + store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, slotter, index) if err != nil { return nil, err } diff --git a/core/txpool/blobpool/lookup.go b/core/txpool/blobpool/lookup.go index 7607cd487a..874ca85b8c 100644 --- a/core/txpool/blobpool/lookup.go +++ b/core/txpool/blobpool/lookup.go @@ -110,3 +110,13 @@ func (l *lookup) untrack(tx *blobTxMeta) { } } } + +// update updates the transaction index. It should only be used in the conversion. +func (l *lookup) update(hash common.Hash, id uint64, size uint64) bool { + meta, exists := l.txIndex[hash] + if !exists { + return false + } + meta.id, meta.size = id, size + return true +} diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go index 84ccc0f27b..9b793e366c 100644 --- a/core/txpool/blobpool/slotter.go +++ b/core/txpool/blobpool/slotter.go @@ -16,6 +16,49 @@ package blobpool +import ( + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/billy" +) + +// tryMigrate checks if the billy needs to be migrated and migrates if needed. +// Returns a slotter that can be used for the database. +func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir string) (billy.SlotSizeFn, error) { + // Check if we need to migrate our blob db to the new slotter. + if config.OsakaTime != nil { + // Open the store using the version slotter to see if any version has been + // written. + var version int + index := func(_ uint64, _ uint32, blob []byte) { + version = max(version, parseSlotterVersion(blob)) + } + store, err := billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), index) + if err != nil { + return nil, err + } + store.Close() + + // If the version found is less than the currently configured store version, + // perform a migration then write the updated version of the store. + if version < storeVersion { + newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil { + return nil, err + } + store, err = billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), nil) + if err != nil { + return nil, err + } + writeSlotterVersion(store, storeVersion) + store.Close() + } + // Set the slotter to the format now that the Osaka is active. + slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + } + return slotter, nil +} + // newSlotter creates a helper method for the Billy datastore that returns the // individual shelf sizes used to store transactions in. // @@ -25,7 +68,7 @@ package blobpool // The slotter also creates a shelf for 0-blob transactions. Whilst those are not // allowed in the current protocol, having an empty shelf is not a relevant use // of resources, but it makes stress testing with junk transactions simpler. -func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) { +func newSlotter(maxBlobsPerTransaction int) billy.SlotSizeFn { slotsize := uint32(txAvgSize) slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return @@ -36,3 +79,42 @@ func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) { return slotsize, finished } } + +// newSlotterEIP7594 creates a different slotter for EIP-7594 transactions. +// EIP-7594 (PeerDAS) changes the average transaction size which means the current +// static 4KB average size is not enough anymore. +// This slotter adds a dynamic overhead component to the slotter, which also +// captures the notion that blob transactions with more blobs are also more likely to +// to have more calldata. +func newSlotterEIP7594(maxBlobsPerTransaction int) billy.SlotSizeFn { + slotsize := uint32(txAvgSize) + slotsize -= uint32(blobSize) + txBlobOverhead // underflows, it's ok, will overflow back in the first return + + return func() (size uint32, done bool) { + slotsize += blobSize + txBlobOverhead + finished := slotsize > uint32(maxBlobsPerTransaction)*(blobSize+txBlobOverhead)+txMaxSize + + return slotsize, finished + } +} + +// newVersionSlotter creates a slotter with a single 8 byte shelf to store +// version metadata in. +func newVersionSlotter() billy.SlotSizeFn { + return func() (size uint32, done bool) { + return 8, true + } +} + +// parseSlotterVersion will parse the slotter's version from a given data blob. +func parseSlotterVersion(blob []byte) int { + if len(blob) > 0 { + return int(blob[0]) + } + return 0 +} + +// writeSlotterVersion writes the current slotter version into the store. +func writeSlotterVersion(store billy.Database, version int) { + store.Put([]byte{byte(version)}) +} diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go index 8d46f47d2c..e4cf232f4e 100644 --- a/core/txpool/blobpool/slotter_test.go +++ b/core/txpool/blobpool/slotter_test.go @@ -16,7 +16,9 @@ package blobpool -import "testing" +import ( + "testing" +) // Tests that the slotter creates the expected database shelves. func TestNewSlotter(t *testing.T) { @@ -58,3 +60,44 @@ func TestNewSlotter(t *testing.T) { } } } + +// Tests that the slotter creates the expected database shelves. +func TestNewSlotterEIP7594(t *testing.T) { + // Generate the database shelve sizes + slotter := newSlotterEIP7594(6) + + var shelves []uint32 + for { + shelf, done := slotter() + shelves = append(shelves, shelf) + if done { + break + } + } + // Compare the database shelves to the expected ones + want := []uint32{ + 0*blobSize + 0*txBlobOverhead + txAvgSize, // 0 blob + some expected tx infos + 1*blobSize + 1*txBlobOverhead + txAvgSize, // 1 blob + some expected tx infos + 2*blobSize + 2*txBlobOverhead + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data) + 3*blobSize + 3*txBlobOverhead + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data) + 4*blobSize + 4*txBlobOverhead + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data) + 5*blobSize + 5*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 6*blobSize + 6*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 7*blobSize + 7*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 8*blobSize + 8*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 9*blobSize + 9*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 10*blobSize + 10*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 11*blobSize + 11*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 12*blobSize + 12*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 13*blobSize + 13*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 14*blobSize + 14*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size + } + if len(shelves) != len(want) { + t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want)) + } + for i := 0; i < len(shelves) && i < len(want); i++ { + if shelves[i] != want[i] { + t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i]) + } + } +} diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 425def170b..80a9faf23f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -508,7 +508,7 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { // If only blob transactions are requested, this pool is unsuitable as it // contains none, don't even bother. - if filter.OnlyBlobTxs { + if filter.BlobTxs { return nil } pool.mu.Lock() diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 1ba080b749..0c8642659d 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -2292,8 +2292,8 @@ func TestSetCodeTransactions(t *testing.T) { pending: 1, run: func(name string) { aa := common.Address{0xaa, 0xaa} - statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...)) - statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)}) + statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...), tracing.CodeChangeUnspecified) + statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)}, tracing.CodeChangeUnspecified) // Send gapped transaction, it should be rejected. if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), keyA)); !errors.Is(err, ErrOutOfOrderTxFromDelegated) { @@ -2317,7 +2317,7 @@ func TestSetCodeTransactions(t *testing.T) { } // Reset the delegation, avoid leaking state into the other tests - statedb.SetCode(addrA, nil) + statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified) }, }, { @@ -2583,7 +2583,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) { } // Simulate the chain moving blockchain.statedb.SetNonce(addrA, 1, tracing.NonceChangeAuthorization) - blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address)) + blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address), tracing.CodeChangeUnspecified) <-pool.requestReset(nil, nil) // Set an authorization for 0x00 auth, _ = types.SignSetCode(keyA, types.SetCodeAuthorization{ @@ -2601,7 +2601,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) { } // Simulate the chain moving blockchain.statedb.SetNonce(addrA, 2, tracing.NonceChangeAuthorization) - blockchain.statedb.SetCode(addrA, nil) + blockchain.statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified) <-pool.requestReset(nil, nil) // Now send two transactions from addrA if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1000), keyA)); err != nil { diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index 507c0b429a..0c9f13c62f 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -323,15 +323,22 @@ func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transa if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { return false, nil } - // Old is being replaced, subtract old cost - l.subTotalCost([]*types.Transaction{old}) } // Add new tx cost to totalcost cost, overflow := uint256.FromBig(tx.Cost()) if overflow { return false, nil } - l.totalcost.Add(l.totalcost, cost) + total, overflow := new(uint256.Int).AddOverflow(l.totalcost, cost) + if overflow { + return false, nil + } + l.totalcost = total + + // Old is being replaced, subtract old cost + if old != nil { + l.subTotalCost([]*types.Transaction{old}) + } // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index f1f6056686..519ae7b989 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -78,8 +78,10 @@ type PendingFilter struct { BlobFee *uint256.Int // Minimum 4844 blobfee needed to include a blob transaction GasLimitCap uint64 // Maximum gas can be used for a single transaction execution (0 means no limit) - OnlyPlainTxs bool // Return only plain EVM transactions (peer-join announces, block space filling) - OnlyBlobTxs bool // Return only blob transactions (block blob-space filling) + // When BlobTxs true, return only blob transactions (block blob-space filling) + // when false, return only non-blob txs (peer-join announces, block space filling) + BlobTxs bool + BlobVersion byte // Blob tx version to include. 0 means pre-Osaka, 1 means Osaka and later } // TxMetadata denotes the metadata of a transaction. diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 46974fad3c..4b54eac50d 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -116,6 +116,10 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types if _, err := types.Sender(signer, tx); err != nil { return fmt.Errorf("%w: %v", ErrInvalidSender, err) } + // Limit nonce to 2^64-1 per EIP-2681 + if tx.Nonce()+1 < tx.Nonce() { + return core.ErrNonceMax + } // Ensure the transaction has more gas than the bare minimum needed to cover // the transaction metadata intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.SetCodeAuthorizations(), tx.To() == nil, true, rules.IsIstanbul, rules.IsShanghai) @@ -176,16 +180,14 @@ func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationO return err } // Fork-specific sidecar checks, including proof verification. - if opts.Config.IsOsaka(head.Number, head.Time) { + if sidecar.Version == types.BlobSidecarVersion1 { return validateBlobSidecarOsaka(sidecar, hashes) + } else { + return validateBlobSidecarLegacy(sidecar, hashes) } - return validateBlobSidecarLegacy(sidecar, hashes) } func validateBlobSidecarLegacy(sidecar *types.BlobTxSidecar, hashes []common.Hash) error { - if sidecar.Version != types.BlobSidecarVersion0 { - return fmt.Errorf("invalid sidecar version pre-osaka: %v", sidecar.Version) - } if len(sidecar.Proofs) != len(hashes) { return fmt.Errorf("invalid number of %d blob proofs expected %d", len(sidecar.Proofs), len(hashes)) } @@ -198,9 +200,6 @@ func validateBlobSidecarLegacy(sidecar *types.BlobTxSidecar, hashes []common.Has } func validateBlobSidecarOsaka(sidecar *types.BlobTxSidecar, hashes []common.Hash) error { - if sidecar.Version != types.BlobSidecarVersion1 { - return fmt.Errorf("invalid sidecar version post-osaka: %v", sidecar.Version) - } if len(sidecar.Proofs) != len(hashes)*kzg4844.CellProofsPerBlob { return fmt.Errorf("invalid number of %d blob proofs expected %d", len(sidecar.Proofs), len(hashes)*kzg4844.CellProofsPerBlob) } diff --git a/core/txpool/validation_test.go b/core/txpool/validation_test.go new file mode 100644 index 0000000000..3945b548c1 --- /dev/null +++ b/core/txpool/validation_test.go @@ -0,0 +1,115 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package txpool + +import ( + "crypto/ecdsa" + "errors" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" +) + +func TestValidateTransactionEIP2681(t *testing.T) { + key, err := crypto.GenerateKey() + if err != nil { + t.Fatal(err) + } + + head := &types.Header{ + Number: big.NewInt(1), + GasLimit: 5000000, + Time: 1, + Difficulty: big.NewInt(1), + } + + signer := types.LatestSigner(params.TestChainConfig) + + // Create validation options + opts := &ValidationOptions{ + Config: params.TestChainConfig, + Accept: 0xFF, // Accept all transaction types + MaxSize: 32 * 1024, + MaxBlobCount: 6, + MinTip: big.NewInt(0), + } + + tests := []struct { + name string + nonce uint64 + wantErr error + }{ + { + name: "normal nonce", + nonce: 42, + wantErr: nil, + }, + { + name: "max allowed nonce (2^64-2)", + nonce: math.MaxUint64 - 1, + wantErr: nil, + }, + { + name: "EIP-2681 nonce overflow (2^64-1)", + nonce: math.MaxUint64, + wantErr: core.ErrNonceMax, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tx := createTestTransaction(key, tt.nonce) + err := ValidateTransaction(tx, head, signer, opts) + + if tt.wantErr == nil { + if err != nil { + t.Errorf("ValidateTransaction() error = %v, wantErr nil", err) + } + } else { + if err == nil { + t.Errorf("ValidateTransaction() error = nil, wantErr %v", tt.wantErr) + } else if !errors.Is(err, tt.wantErr) { + t.Errorf("ValidateTransaction() error = %v, wantErr %v", err, tt.wantErr) + } + } + }) + } +} + +// createTestTransaction creates a basic transaction for testing +func createTestTransaction(key *ecdsa.PrivateKey, nonce uint64) *types.Transaction { + to := common.HexToAddress("0x0000000000000000000000000000000000000001") + + txdata := &types.LegacyTx{ + Nonce: nonce, + To: &to, + Value: big.NewInt(1000), + Gas: 21000, + GasPrice: big.NewInt(1), + Data: nil, + } + + tx := types.NewTx(txdata) + signedTx, _ := types.SignTx(tx, types.HomesteadSigner{}, key) + return signedTx +} diff --git a/core/types/block_test.go b/core/types/block_test.go index 2130a2fcf3..5fa4756a50 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -263,9 +263,8 @@ var benchBuffer = bytes.NewBuffer(make([]byte, 0, 32000)) func BenchmarkEncodeBlock(b *testing.B) { block := makeBenchBlock() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { benchBuffer.Reset() if err := rlp.Encode(benchBuffer, block); err != nil { b.Fatal(err) diff --git a/core/types/bloom9_test.go b/core/types/bloom9_test.go index 07f6446a97..ceb6797e1f 100644 --- a/core/types/bloom9_test.go +++ b/core/types/bloom9_test.go @@ -78,7 +78,7 @@ func TestBloomExtensively(t *testing.T) { func BenchmarkBloom9(b *testing.B) { test := []byte("testestestest") - for i := 0; i < b.N; i++ { + for b.Loop() { Bloom9(test) } } @@ -86,7 +86,7 @@ func BenchmarkBloom9(b *testing.B) { func BenchmarkBloom9Lookup(b *testing.B) { toTest := []byte("testtest") bloom := new(Bloom) - for i := 0; i < b.N; i++ { + for b.Loop() { bloom.Test(toTest) } } @@ -128,7 +128,7 @@ func BenchmarkCreateBloom(b *testing.B) { } b.Run("small-createbloom", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { for _, receipt := range rSmall { receipt.Bloom = CreateBloom(receipt) } @@ -144,7 +144,7 @@ func BenchmarkCreateBloom(b *testing.B) { }) b.Run("large-createbloom", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { for _, receipt := range rLarge { receipt.Bloom = CreateBloom(receipt) } @@ -163,13 +163,11 @@ func BenchmarkCreateBloom(b *testing.B) { receipt.Bloom = CreateBloom(receipt) } b.ReportAllocs() - b.ResetTimer() var bl Bloom - for i := 0; i < b.N; i++ { + for b.Loop() { bl = MergeBloom(rSmall) } - b.StopTimer() var exp = common.HexToHash("c384c56ece49458a427c67b90fefe979ebf7104795be65dc398b280f24104949") got := crypto.Keccak256Hash(bl.Bytes()) @@ -182,13 +180,11 @@ func BenchmarkCreateBloom(b *testing.B) { receipt.Bloom = CreateBloom(receipt) } b.ReportAllocs() - b.ResetTimer() var bl Bloom - for i := 0; i < b.N; i++ { + for b.Loop() { bl = MergeBloom(rLarge) } - b.StopTimer() var exp = common.HexToHash("c384c56ece49458a427c67b90fefe979ebf7104795be65dc398b280f24104949") got := crypto.Keccak256Hash(bl.Bytes()) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index c846ecd0c5..54adbc73e8 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -84,9 +84,8 @@ func BenchmarkDeriveSha200(b *testing.B) { var exp common.Hash var got common.Hash b.Run("std_trie", func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { exp = types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))) } }) @@ -94,7 +93,7 @@ func BenchmarkDeriveSha200(b *testing.B) { b.Run("stack_trie", func(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { got = types.DeriveSha(txs, trie.NewStackTrie(nil)) } }) diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index cc41674dfd..2d854ae345 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -591,7 +591,7 @@ func BenchmarkHash(b *testing.B) { GasTipCap: big.NewInt(500), GasFeeCap: big.NewInt(500), }) - for i := 0; i < b.N; i++ { + for b.Loop() { signer.Hash(tx) } } @@ -614,7 +614,7 @@ func BenchmarkEffectiveGasTip(b *testing.B) { b.Run("Original", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := tx.EffectiveGasTip(baseFee.ToBig()) if err != nil { b.Fatal(err) @@ -625,7 +625,7 @@ func BenchmarkEffectiveGasTip(b *testing.B) { b.Run("IntoMethod", func(b *testing.B) { b.ReportAllocs() dst := new(uint256.Int) - for i := 0; i < b.N; i++ { + for b.Loop() { err := tx.calcEffectiveGasTip(dst, baseFee) if err != nil { b.Fatal(err) @@ -729,7 +729,7 @@ func BenchmarkEffectiveGasTipCmp(b *testing.B) { b.Run("Original", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { tx.EffectiveGasTipCmp(other, baseFee) } }) diff --git a/core/types/types_test.go b/core/types/types_test.go index 1fb386d5de..96d0444994 100644 --- a/core/types/types_test.go +++ b/core/types/types_test.go @@ -126,7 +126,7 @@ func benchRLP(b *testing.B, encode bool) { b.Run(tc.name, func(b *testing.B) { b.ReportAllocs() var null = &devnull{} - for i := 0; i < b.N; i++ { + for b.Loop() { rlp.Encode(null, tc.obj) } b.SetBytes(int64(null.len / b.N)) @@ -136,7 +136,7 @@ func benchRLP(b *testing.B, encode bool) { // Test decoding b.Run(tc.name, func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { if err := rlp.DecodeBytes(data, tc.obj); err != nil { b.Fatal(err) } diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go index ed85d1555f..ca0c928c3c 100644 --- a/core/verkle_witness_test.go +++ b/core/verkle_witness_test.go @@ -232,7 +232,7 @@ func TestProcessParentBlockHash(t *testing.T) { // etc checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) - statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode) + statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified) // Process n blocks, from 1 .. num var num = 2 for i := 1; i <= num; i++ { diff --git a/core/vm/analysis_legacy_test.go b/core/vm/analysis_legacy_test.go index f84a4abc92..75d8a495fa 100644 --- a/core/vm/analysis_legacy_test.go +++ b/core/vm/analysis_legacy_test.go @@ -65,21 +65,17 @@ func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) { // 1.4 ms code := make([]byte, analysisCodeSize) bench.SetBytes(analysisCodeSize) - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { codeBitmap(code) } - bench.StopTimer() } func BenchmarkJumpdestHashing_1200k(bench *testing.B) { // 4 ms code := make([]byte, analysisCodeSize) bench.SetBytes(analysisCodeSize) - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { crypto.Keccak256Hash(code) } - bench.StopTimer() } func BenchmarkJumpdestOpAnalysis(bench *testing.B) { @@ -91,8 +87,7 @@ func BenchmarkJumpdestOpAnalysis(bench *testing.B) { code[i] = byte(op) } bits := make(BitVec, len(code)/8+1+4) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { clear(bits) codeBitmapInternal(code, bits) } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index da099edb57..cae0be9f2d 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -24,11 +24,13 @@ import ( "maps" "math" "math/big" + "math/bits" "github.com/consensys/gnark-crypto/ecc" bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" "github.com/consensys/gnark-crypto/ecc/bls12-381/fp" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" + patched_big "github.com/ethereum/go-bigmodexpfix/src/math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/core/tracing" @@ -38,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/crypto/secp256r1" "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" "golang.org/x/crypto/ripemd160" ) @@ -378,21 +381,7 @@ type bigModExp struct { eip7883 bool } -var ( - big1 = big.NewInt(1) - big3 = big.NewInt(3) - big7 = big.NewInt(7) - big20 = big.NewInt(20) - big32 = big.NewInt(32) - big64 = big.NewInt(64) - big96 = big.NewInt(96) - big480 = big.NewInt(480) - big1024 = big.NewInt(1024) - big3072 = big.NewInt(3072) - big199680 = big.NewInt(199680) -) - -// modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 +// byzantiumMultComplexity implements the bigModexp multComplexity formula, as defined in EIP-198. // // def mult_complexity(x): // if x <= 64: return x ** 2 @@ -400,120 +389,221 @@ var ( // else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) -func modexpMultComplexity(x *big.Int) *big.Int { +// returns MaxUint64 if an overflow occurred. +func byzantiumMultComplexity(x uint64) uint64 { switch { - case x.Cmp(big64) <= 0: - x.Mul(x, x) // x ** 2 - case x.Cmp(big1024) <= 0: - // (x ** 2 // 4 ) + ( 96 * x - 3072) - x = new(big.Int).Add( - new(big.Int).Rsh(new(big.Int).Mul(x, x), 2), - new(big.Int).Sub(new(big.Int).Mul(big96, x), big3072), - ) + case x <= 64: + return x * x + case x <= 1024: + // x^2 / 4 + 96*x - 3072 + return x*x/4 + 96*x - 3072 + default: - // (x ** 2 // 16) + (480 * x - 199680) - x = new(big.Int).Add( - new(big.Int).Rsh(new(big.Int).Mul(x, x), 4), - new(big.Int).Sub(new(big.Int).Mul(big480, x), big199680), - ) + // For large x, use uint256 arithmetic to avoid overflow + // x^2 / 16 + 480*x - 199680 + + // xSqr = x^2 / 16 + carry, xSqr := bits.Mul64(x, x) + if carry != 0 { + return math.MaxUint64 + } + xSqr = xSqr >> 4 + + // Calculate 480 * x (can't overflow if x^2 didn't overflow) + x480 := x * 480 + // Calculate 480 * x - 199680 (will not underflow, since x > 1024) + x480 = x480 - 199680 + + // xSqr + x480 + sum, carry := bits.Add64(xSqr, x480, 0) + if carry != 0 { + return math.MaxUint64 + } + return sum + } +} + +// berlinMultComplexity implements the multiplication complexity formula for Berlin. +// +// def mult_complexity(x): +// +// ceiling(x/8)^2 +// +// where is x is max(length_of_MODULUS, length_of_BASE) +func berlinMultComplexity(x uint64) uint64 { + // x = (x + 7) / 8 + x, carry := bits.Add64(x, 7, 0) + if carry != 0 { + return math.MaxUint64 + } + x /= 8 + + // x^2 + carry, x = bits.Mul64(x, x) + if carry != 0 { + return math.MaxUint64 } return x } +// osakaMultComplexity implements the multiplication complexity formula for Osaka. +// +// For x <= 32: returns 16 +// For x > 32: returns 2 * ceiling(x/8)^2 +func osakaMultComplexity(x uint64) uint64 { + if x <= 32 { + return 16 + } + // For x > 32, return 2 * berlinMultComplexity(x) + result := berlinMultComplexity(x) + carry, result := bits.Mul64(result, 2) + if carry != 0 { + return math.MaxUint64 + } + return result +} + +// modexpIterationCount calculates the number of iterations for the modexp precompile. +// This is the adjusted exponent length used in gas calculation. +func modexpIterationCount(expLen uint64, expHead uint256.Int, multiplier uint64) uint64 { + var iterationCount uint64 + + // For large exponents (expLen > 32), add (expLen - 32) * multiplier + if expLen > 32 { + carry, count := bits.Mul64(expLen-32, multiplier) + if carry > 0 { + return math.MaxUint64 + } + iterationCount = count + } + // Add the MSB position - 1 if expHead is non-zero + if bitLen := expHead.BitLen(); bitLen > 0 { + count, carry := bits.Add64(iterationCount, uint64(bitLen-1), 0) + if carry > 0 { + return math.MaxUint64 + } + iterationCount = count + } + + return max(iterationCount, 1) +} + +// byzantiumModexpGas calculates the gas cost for the modexp precompile using Byzantium rules. +func byzantiumModexpGas(baseLen, expLen, modLen uint64, expHead uint256.Int) uint64 { + const ( + multiplier = 8 + divisor = 20 + ) + + maxLen := max(baseLen, modLen) + multComplexity := byzantiumMultComplexity(maxLen) + if multComplexity == math.MaxUint64 { + return math.MaxUint64 + } + iterationCount := modexpIterationCount(expLen, expHead, multiplier) + + // Calculate gas: (multComplexity * iterationCount) / divisor + carry, gas := bits.Mul64(iterationCount, multComplexity) + gas /= divisor + if carry != 0 { + return math.MaxUint64 + } + return gas +} + +// berlinModexpGas calculates the gas cost for the modexp precompile using Berlin rules. +func berlinModexpGas(baseLen, expLen, modLen uint64, expHead uint256.Int) uint64 { + const ( + multiplier = 8 + divisor = 3 + minGas = 200 + ) + + maxLen := max(baseLen, modLen) + multComplexity := berlinMultComplexity(maxLen) + if multComplexity == math.MaxUint64 { + return math.MaxUint64 + } + iterationCount := modexpIterationCount(expLen, expHead, multiplier) + + // Calculate gas: (multComplexity * iterationCount) / divisor + carry, gas := bits.Mul64(iterationCount, multComplexity) + gas /= divisor + if carry != 0 { + return math.MaxUint64 + } + return max(gas, minGas) +} + +// osakaModexpGas calculates the gas cost for the modexp precompile using Osaka rules. +func osakaModexpGas(baseLen, expLen, modLen uint64, expHead uint256.Int) uint64 { + const ( + multiplier = 16 + minGas = 500 + ) + + maxLen := max(baseLen, modLen) + multComplexity := osakaMultComplexity(maxLen) + if multComplexity == math.MaxUint64 { + return math.MaxUint64 + } + iterationCount := modexpIterationCount(expLen, expHead, multiplier) + + // Calculate gas: multComplexity * iterationCount + carry, gas := bits.Mul64(iterationCount, multComplexity) + if carry != 0 { + return math.MaxUint64 + } + return max(gas, minGas) +} + // RequiredGas returns the gas required to execute the pre-compiled contract. func (c *bigModExp) RequiredGas(input []byte) uint64 { - var ( - baseLen = new(big.Int).SetBytes(getData(input, 0, 32)) - expLen = new(big.Int).SetBytes(getData(input, 32, 32)) - modLen = new(big.Int).SetBytes(getData(input, 64, 32)) - ) + // Parse input lengths + baseLenBig := new(uint256.Int).SetBytes(getData(input, 0, 32)) + expLenBig := new(uint256.Int).SetBytes(getData(input, 32, 32)) + modLenBig := new(uint256.Int).SetBytes(getData(input, 64, 32)) + + // Convert to uint64, capping at max value + baseLen := baseLenBig.Uint64() + if !baseLenBig.IsUint64() { + baseLen = math.MaxUint64 + } + expLen := expLenBig.Uint64() + if !expLenBig.IsUint64() { + expLen = math.MaxUint64 + } + modLen := modLenBig.Uint64() + if !modLenBig.IsUint64() { + modLen = math.MaxUint64 + } + + // Skip the header if len(input) > 96 { input = input[96:] } else { input = input[:0] } + // Retrieve the head 32 bytes of exp for the adjusted exponent length - var expHead *big.Int - if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 { - expHead = new(big.Int) - } else { - if expLen.Cmp(big32) > 0 { - expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), 32)) + var expHead uint256.Int + if uint64(len(input)) > baseLen { + if expLen > 32 { + expHead.SetBytes(getData(input, baseLen, 32)) } else { - expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64())) + // TODO: Check that if expLen < baseLen, then getData will return an empty slice + expHead.SetBytes(getData(input, baseLen, expLen)) } } - // Calculate the adjusted exponent length - var msb int - if bitlen := expHead.BitLen(); bitlen > 0 { - msb = bitlen - 1 - } - adjExpLen := new(big.Int) - if expLen.Cmp(big32) > 0 { - adjExpLen.Sub(expLen, big32) - if c.eip7883 { - adjExpLen.Lsh(adjExpLen, 4) - } else { - adjExpLen.Lsh(adjExpLen, 3) - } - } - adjExpLen.Add(adjExpLen, big.NewInt(int64(msb))) - // Calculate the gas cost of the operation - gas := new(big.Int) - if modLen.Cmp(baseLen) < 0 { - gas.Set(baseLen) + + // Choose the appropriate gas calculation based on the EIP flags + if c.eip7883 { + return osakaModexpGas(baseLen, expLen, modLen, expHead) + } else if c.eip2565 { + return berlinModexpGas(baseLen, expLen, modLen, expHead) } else { - gas.Set(modLen) + return byzantiumModexpGas(baseLen, expLen, modLen, expHead) } - - maxLenOver32 := gas.Cmp(big32) > 0 - if c.eip2565 { - // EIP-2565 (Berlin fork) has three changes: - // - // 1. Different multComplexity (inlined here) - // in EIP-2565 (https://eips.ethereum.org/EIPS/eip-2565): - // - // def mult_complexity(x): - // ceiling(x/8)^2 - // - // where is x is max(length_of_MODULUS, length_of_BASE) - gas.Add(gas, big7) - gas.Rsh(gas, 3) - gas.Mul(gas, gas) - - var minPrice uint64 = 200 - if c.eip7883 { - minPrice = 500 - if maxLenOver32 { - gas.Add(gas, gas) - } else { - gas = big.NewInt(16) - } - } - - if adjExpLen.Cmp(big1) > 0 { - gas.Mul(gas, adjExpLen) - } - // 2. Different divisor (`GQUADDIVISOR`) (3) - if !c.eip7883 { - gas.Div(gas, big3) - } - if gas.BitLen() > 64 { - return math.MaxUint64 - } - return max(minPrice, gas.Uint64()) - } - - // Pre-Berlin logic. - gas = modexpMultComplexity(gas) - if adjExpLen.Cmp(big1) > 0 { - gas.Mul(gas, adjExpLen) - } - gas.Div(gas, big20) - if gas.BitLen() > 64 { - return math.MaxUint64 - } - return gas.Uint64() } func (c *bigModExp) Run(input []byte) ([]byte, error) { @@ -542,9 +632,9 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + base = new(patched_big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(patched_big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(patched_big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) v []byte ) switch { diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index da44e250e1..51bd7101ff 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -118,7 +118,7 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) { func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) { p := allPrecompiles[common.HexToAddress(addr)] in := common.Hex2Bytes(test.Input) - gas := p.RequiredGas(in) - 1 + gas := test.Gas - 1 t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { _, _, err := RunPrecompiledContract(p, in, gas, nil) @@ -167,12 +167,10 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) { bench.Run(fmt.Sprintf("%s-Gas=%d", test.Name, reqGas), func(bench *testing.B) { bench.ReportAllocs() start := time.Now() - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { copy(data, in) res, _, err = RunPrecompiledContract(p, data, reqGas, nil) } - bench.StopTimer() elapsed := uint64(time.Since(start)) if elapsed < 1 { elapsed = 1 @@ -257,6 +255,30 @@ func TestPrecompiledModExpOOG(t *testing.T) { for _, test := range modexpTests { testPrecompiledOOG("05", test, t) } + modexpTestsEIP2565, err := loadJson("modexp_eip2565") + if err != nil { + t.Fatal(err) + } + for _, test := range modexpTestsEIP2565 { + testPrecompiledOOG("f5", test, t) + } + modexpTestsEIP7883, err := loadJson("modexp_eip7883") + if err != nil { + t.Fatal(err) + } + for _, test := range modexpTestsEIP7883 { + testPrecompiledOOG("f6", test, t) + } + gasCostTest := precompiledTest{ + Input: "000000000000000000000000000000000000000000000000000000000000082800000000000000000000000000000000000000000000000040000000000000090000000000000000000000000000000000000000000000000000000000000600000000adadadad00000000ff31ff00000006ffffffffffffffffffffffffffffffffffffffff0000000000000004ffffffffffffff0000000000000000000000000000000000000000d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0000001000200fefffeff01000100000000000000ffff01000100ffffffff01000100ffffffff0000050001000100fefffdff02000300ff000000000000012b000000000000090000000000000000000000000000000000000000000000000000ffffff000000000200fffffeff00000001000000000001000200fefffeff010001000000000000000000423034000000000011006161ffbf640053004f00ff00fffffffffffffff3ff00000000000f00002dffffffffff0000000000000000000061999999999999999999999999899961ffffffff0100010000000000000000000000000600000000adadadad00000000ffff00000006fffffdffffffffffffffffffffffffffffffffff0000000000000004ffffffffffffff000000000000000000000000000000000000000098000000966375726c2f66000030000000000011006161ffbf640053004f002d00000000a200000000000000ff1818183fffffffff3a6e756c6c2c22223a6e7500006c2000000000002d2d0000000000000000000144ccef0100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000fdff000000ff00290001000009000000000000000000000000000000000000000000000000a50004ff2800000000000000000000000000000000000000000000000001000000000000090000000000000000000000030000000000000000002b00000000000000000600000000adadadad00000000ffff00000006ffffffffffffffffffffffffffffffffffffffff0000000000000004ffffffffffffff0000000000000000000000000000000000000000d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d000000000717a1a001a1a1a1a1a1a000000121212121212121212121212121212121212121212d0d0d0d01212121212121212121212121212121212121212121212121212121212121212121212121212121212121212373800002d35373837346137346161610000000000000000d0d0d0d0d0d0d0d0002d3533321a1a000000d0d0d0d0d0d0d0d0d0d0d0d0d0d000000000717a1a001a1a1a1a1a1a000000121212121212121212121212121212121212121212d0d0d0d012121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121a1212121212121212000000000000000000000000d0d0d0d0d0d0d0d0002d3533321a1a0000000000000000000000003300000001000f5b00001100712c6eff9e61000000000061000000fbffff1a1a3a6e353900756c6c7d3b00000000009100002d35ff00600000000000000000002d3533321a1a1a1a3a6e353900756c6c7d3b000000000091373800002d3537383734613734616161d0d0d0d0d000000000717a1a001a1a1a1a1a1a000000121212121212121212121212121212121212121212d0d0d0d012121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121a1212121212121212000000000000000000000000d0d0d0d0d0d0d0d0002d3533321a1a0000000000000000000000003300000001000f5b00001100712c6eff9e61000000000061000000fbffff1a1a3a6e353900756c6c7d3b00000000009100002d35ff00600000000000000000002d3533321a1a1a1a3a6e353900756c6c7d3b000000000091373800002d353738373461373461616100000000000000000000000000000000000000000000000001000000000000090000000000000000000000030000000000000000002b00000000000000000600000000adadadad00000000ffff00000006ffffffffffffffffffffffffffffffffffffffff0000000000000004ffffffffffffff0000000000000000000000000000000000000000d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d000000000717a1a001a1a1a1a1a1a000000121212121212121212121212121212121212121212d0d0d0d01212121212121212121212121212121212121212121212121212121212121212121212121212121212121212373800002d35373837346137346161610000000000000000d0d0d0d0d0d0d0d0002d3533321a1a000000d0d0d0d0d0d0d0d0d0d0d0d0d0d000000000717a1a001a1a1a1a1a1a000000121212121212121212121212121212121212121212d0d0d0d012121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121a1212121212121212000000000000000000000000d0d0d0d0d0d0d0d0002d3533321a1a0000000000000000000000003300000001000f5b00001100712c6eff9e61000000000061000000fbffff1a1a3a6e353900756c6c7d3b00000000009100002d35ff00600000000000000000002d3533321a1a1a1a3a6e353900756c6c7d3b000000000091373800002d3537383734613734616161d0d0d0d0d000000000717a1a001a1a1a1a1a1a0000001212121212121212121212121212121212121212000000000000003300000001000f5b00001100712c6eff9e61000000000061000000fbffff1a1a3a6e353900756c6c7d3b00000000009100002d35ff00600000000000000000002d3533321a1a1a1a3a6e353900756c6c7d3b000000000091373800002d3537383734613734616161", + Expected: "000000000000000000000000000000000000000000000000", + Name: "oss_fuzz_gas_calc", + Gas: 18446744073709551615, + NoBenchmark: false, + } + testPrecompiledOOG("05", gasCostTest, t) + testPrecompiledOOG("f5", gasCostTest, t) + testPrecompiledOOG("f6", gasCostTest, t) } // Tests the sample inputs from the elliptic curve scalar multiplication EIP 213. diff --git a/core/vm/eips.go b/core/vm/eips.go index 10ca1fe9ab..d7ed18648e 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -294,7 +294,7 @@ func opBlobBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { return nil, nil } -// opCLZ implements the CLZ opcode (count leading zero bytes) +// opCLZ implements the CLZ opcode (count leading zero bits) func opCLZ(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) { x := scope.Stack.peek() x.SetUint64(256 - uint64(x.BitLen())) diff --git a/core/vm/evm.go b/core/vm/evm.go index e360187f7b..88ef1cf121 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -601,7 +601,7 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]b } } - evm.StateDB.SetCode(address, ret) + evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation) return ret, nil } diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index cb6143c0b5..7fe76b0a63 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" @@ -87,7 +88,7 @@ func TestEIP2200(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb.CreateAccount(address) - statedb.SetCode(address, hexutil.MustDecode(tt.input)) + statedb.SetCode(address, hexutil.MustDecode(tt.input), tracing.CodeChangeUnspecified) statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) statedb.Finalise(true) // Push the state into the "original" slot @@ -139,7 +140,7 @@ func TestCreateGas(t *testing.T) { address := common.BytesToAddress([]byte("contract")) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb.CreateAccount(address) - statedb.SetCode(address, hexutil.MustDecode(tt.code)) + statedb.SetCode(address, hexutil.MustDecode(tt.code), tracing.CodeChangeUnspecified) statedb.Finalise(true) vmctx := BlockContext{ CanTransfer: func(StateDB, common.Address, *uint256.Int) bool { return true }, diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index cd31829a7e..72f561f4bf 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -291,15 +291,13 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { intArgs[i] = new(uint256.Int).SetBytes(common.Hex2Bytes(arg)) } pc := uint64(0) - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { for _, arg := range intArgs { stack.push(arg) } op(&pc, evm, scope) stack.pop() } - bench.StopTimer() for i, arg := range args { want := new(uint256.Int).SetBytes(common.Hex2Bytes(arg)) @@ -551,8 +549,7 @@ func BenchmarkOpMstore(bench *testing.B) { memStart := new(uint256.Int) value := new(uint256.Int).SetUint64(0x1337) - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { stack.push(value) stack.push(memStart) opMstore(&pc, evm, &ScopeContext{mem, stack, nil}) @@ -609,8 +606,7 @@ func BenchmarkOpKeccak256(bench *testing.B) { pc := uint64(0) start := new(uint256.Int) - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { stack.push(uint256.NewInt(32)) stack.push(start) opKeccak256(&pc, evm, &ScopeContext{mem, stack, nil}) diff --git a/core/vm/interface.go b/core/vm/interface.go index 42a72db482..d7f4c10e1f 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -43,7 +43,7 @@ type StateDB interface { GetCode(common.Address) []byte // SetCode sets the new code for the address, and returns the previous code, if any. - SetCode(common.Address, []byte) []byte + SetCode(common.Address, []byte, tracing.CodeChangeReason) []byte GetCodeSize(common.Address) int AddRefund(uint64) diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go index 8ed512316b..79531f78d2 100644 --- a/core/vm/interpreter_test.go +++ b/core/vm/interpreter_test.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" @@ -45,7 +46,7 @@ func TestLoopInterrupt(t *testing.T) { for i, tt := range loopInterruptTests { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb.CreateAccount(address) - statedb.SetCode(address, common.Hex2Bytes(tt)) + statedb.SetCode(address, common.Hex2Bytes(tt), tracing.CodeChangeUnspecified) statedb.Finalise(true) evm := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, Config{}) @@ -89,8 +90,7 @@ func BenchmarkInterpreter(b *testing.B) { stack.push(uint256.NewInt(123)) stack.push(uint256.NewInt(123)) gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { gasSStoreEIP3529(evm, contract, stack, mem, 1234) } } diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 9d984291f2..b40e99d047 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -139,7 +140,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) cfg.State.CreateAccount(address) // set the receiver's (the executing contract) code for execution. - cfg.State.SetCode(address, code) + cfg.State.SetCode(address, code, tracing.CodeChangeUnspecified) // Call the code with the given configuration. ret, leftOverGas, err := vmenv.Call( cfg.Origin, diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index d75a5b0459..ddd32df039 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -114,7 +114,7 @@ func TestCall(t *testing.T) { byte(vm.PUSH1), 32, byte(vm.PUSH1), 0, byte(vm.RETURN), - }) + }, tracing.CodeChangeUnspecified) ret, _, err := Call(address, nil, &Config{State: state}) if err != nil { @@ -150,8 +150,7 @@ func BenchmarkCall(b *testing.B) { b.Fatal(err) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { for j := 0; j < 400; j++ { Execute(code, cpurchase, nil) Execute(code, creceived, nil) @@ -167,7 +166,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) { ) statedb.CreateAccount(sender) - statedb.SetCode(receiver, common.FromHex(code)) + statedb.SetCode(receiver, common.FromHex(code), tracing.CodeChangeUnspecified) runtimeConfig := Config{ Origin: sender, State: statedb, @@ -190,11 +189,9 @@ func benchmarkEVM_Create(bench *testing.B, code string) { EVMConfig: vm.Config{}, } // Warm up the intpools and stuff - bench.ResetTimer() - for i := 0; i < bench.N; i++ { + for bench.Loop() { Call(receiver, []byte{}, &runtimeConfig) } - bench.StopTimer() } func BenchmarkEVM_CREATE_500(bench *testing.B) { @@ -232,9 +229,8 @@ func BenchmarkEVM_SWAP1(b *testing.B) { b.Run("10k", func(b *testing.B) { contractCode := swapContract(10_000) - state.SetCode(contractAddr, contractCode) - - for i := 0; i < b.N; i++ { + state.SetCode(contractAddr, contractCode, tracing.CodeChangeUnspecified) + for b.Loop() { _, _, err := Call(contractAddr, []byte{}, &Config{State: state}) if err != nil { b.Fatal(err) @@ -263,9 +259,8 @@ func BenchmarkEVM_RETURN(b *testing.B) { b.ReportAllocs() contractCode := returnContract(n) - state.SetCode(contractAddr, contractCode) - - for i := 0; i < b.N; i++ { + state.SetCode(contractAddr, contractCode, tracing.CodeChangeUnspecified) + for b.Loop() { ret, _, err := Call(contractAddr, []byte{}, &Config{State: state}) if err != nil { b.Fatal(err) @@ -422,17 +417,17 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode byte(vm.PUSH1), 0x00, byte(vm.PUSH1), 0x00, byte(vm.REVERT), - }) + }, tracing.CodeChangeUnspecified) } //cfg.State.CreateAccount(cfg.Origin) // set the receiver's (the executing contract) code for execution. - cfg.State.SetCode(destination, code) + cfg.State.SetCode(destination, code, tracing.CodeChangeUnspecified) Call(destination, nil, cfg) b.Run(name, func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { Call(destination, nil, cfg) } }) @@ -772,12 +767,12 @@ func TestRuntimeJSTracer(t *testing.T) { for i, jsTracer := range jsTracers { for j, tc := range tests { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - statedb.SetCode(main, tc.code) - statedb.SetCode(common.HexToAddress("0xbb"), calleeCode) - statedb.SetCode(common.HexToAddress("0xcc"), calleeCode) - statedb.SetCode(common.HexToAddress("0xdd"), calleeCode) - statedb.SetCode(common.HexToAddress("0xee"), calleeCode) - statedb.SetCode(common.HexToAddress("0xff"), suicideCode) + statedb.SetCode(main, tc.code, tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xbb"), calleeCode, tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xcc"), calleeCode, tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xdd"), calleeCode, tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xee"), calleeCode, tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xff"), suicideCode, tracing.CodeChangeUnspecified) tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig) if err != nil { @@ -862,8 +857,8 @@ func BenchmarkTracerStepVsCallFrame(b *testing.B) { // delegation designator incurs the correct amount of gas based on the tracer. func TestDelegatedAccountAccessCost(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - statedb.SetCode(common.HexToAddress("0xff"), types.AddressToDelegation(common.HexToAddress("0xaa"))) - statedb.SetCode(common.HexToAddress("0xaa"), program.New().Return(0, 0).Bytes()) + statedb.SetCode(common.HexToAddress("0xff"), types.AddressToDelegation(common.HexToAddress("0xaa")), tracing.CodeChangeUnspecified) + statedb.SetCode(common.HexToAddress("0xaa"), program.New().Return(0, 0).Bytes(), tracing.CodeChangeUnspecified) for i, tc := range []struct { code []byte diff --git a/core/vm/testdata/precompiles/modexp_eip2565.json b/core/vm/testdata/precompiles/modexp_eip2565.json index c55441439e..24455eeca7 100644 --- a/core/vm/testdata/precompiles/modexp_eip2565.json +++ b/core/vm/testdata/precompiles/modexp_eip2565.json @@ -117,5 +117,215 @@ "Name": "nagydani-5-pow0x10001", "Gas": 87381, "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000c1000000000000000000000000000000000000000000000000000000000000000cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000007d7d7d83828282348286877d7d827d407d797d7d7d7d7d7d7d7d7d7d7d5b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000021000000000000000000000000000000000000000000000000000000000000000cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4000007d7d7d83828282348286877d7d82", + "Expected": "36a385a417859b5e178d3ab9", + "Name": "marius-1-even", + "Gas": 2057, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000010ffffffffffffffff76ffffffffffffff1cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c76ec7c7c7c7ffffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7ffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c76ec7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7ffffffffff3f000000000000000000000000", + "Expected": "c3745de81615f80088ffffffffffffff", + "Name": "guido-1-even", + "Gas": 2298, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000010e0060000a921212121212121ff0000212b212121ffff1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f00feffff212121212121ffffffff1fe1e0e0e01e1f1f169f1f1f1f490afcefffffffffffffffff82828282828282828282828282828282828282828200ffff28ff2b212121ffff1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1fffffffffff0afceffffff7ffffffffff7c8282828282a1828282828282828282828282828200ffff28ff2b212121ffff1f1f1f1f1f1fd11f1f1f1f1f1f1f1f1f1f1fffffffffffffffff21212121212121fb2121212121ffff1f1f1f1f1f1f1f1fffaf82828282828200ffff28ff2b21828200", + "Expected": "458ef0af2549d46d24c89079499479e1", + "Name": "guido-2-even", + "Gas": 2300, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001e7000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002cb0193585a48e18aad777e9c1b54221a0f58140392e4f091cd5f42b2e8644a9384fbd58ae1edec2477ebf7edbf7c0a3f8bd21d1890ee87646feab3c47be716f842cc3da9b940af312dc54450a960e3fc0b86e56abddd154068e10571a96fff6259431632bc15695c6c8679057e66c2c25c127e97e64ee5de6ea1fc0a4a0e431343fed1daafa072c238a45841da86a9806680bc9f298411173210790359209cd454b5af7b4d5688b4403924e5f863d97e2c5349e1a04b54fcf385b1e9d7714bab8fbf5835f6ff9ed575e77dff7af5cbb641db5d537933bae1fa6555d6c12d6fb31ca27b57771f4aebfbe0bf95e8990c0108ffe7cbdaf370be52cf3ade594543af75ad9329d2d11a402270b5b9a6bf4b83307506e118fca4862749d04e916fc7a039f0d13f2a02e0eedb800199ec95df15b4ccd8669b52586879624d51219e72102fad810b5909b1e372ddf33888fb9beb09b416e4164966edbabd89e4a286be36277fc576ed519a15643dac602e92b63d0b9121f0491da5b16ef793a967f096d80b6c81ecaaffad7e3f06a4a5ac2796f1ed9f68e6a0fd5cf191f0c5c2eec338952ff8d31abc68bf760febeb57e088995ba1d7726a2fdd6d8ca28a181378b8b4ab699bfd4b696739bbf17a9eb2df6251143046137fdbbfacac312ebf67a67da9741b596000000000000419a2917c61722b0713d3b00a2f0e1dd5aebbbe09615de424700eea3c3020fe6e9ea5de9fa1ace781df28b21f746d2ab61d0da496e08473c90ff7dfe25b43bcde76f4bafb82e0975bea75f5a0591dba80ba2fff80a07d8853bea5be13ab326ba70c57b153acc646151948d1cf061ca31b02d4719fac710e7c723ca44f5b1737824b7ccc74ba5bff980aabdbf267621cafc3d6dcc29d0ca9c16839a92ed34de136da7900aa3ee43d21aa57498981124357cf0ca9b86f9a8d3f9c604ca00c726e48f7a9945021ea6dfff92d6b2d6514693169ca133e993541bfa4c4c191de806aa80c48109bcfc9901eccfdeb2395ab75fe63c67de900829d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "Expected": "1194e971c875bb45f030316793bc6916343335b18670dfad7a4646fba99749b30283b78b818836de7400ff1a68ddad1a2dd850ec0f227441e2d4d13f5ee4b5d7a856db0a9ad1f86987e1f117d70f9e6a1a2b8d083fa82653aa16f1773b6deb2ed8a1f9e7f3a5db121c4a0c91cb954e2ec53e63422efe86c7984d79cd0e7b5e3eb8ca4980551d63f302c7d72500a84baf12c82fc7bd9b5c2ab8b9c33baf1df28b2031c58a8b2928a42c9f456e98874e22fe13cf17aa5915b11bb108b6ae40842d434604ccddcb4f64324c67b2dde32e6cd759d964f17d9cdf0046cd0ed3588e1fc4b88f67a5d4f3a870aad1cba89ead265d6ad327c8ea7ff54fe4b5e7dbe87c5c59c468543eab3675751111bfa1d6c51daf789d41dc21fd8ba9e05490f881a973a3c1567ff3129a49aa6658cf06f0a79530a7256ce5a07c2a77b4306383d538866bba376d90621c4f82d1f5f32304ee2b7170805d42418fc6967642e5648d8c64fe9c0fdff2d7c114a47add7767c8fccb8808de8c3c6e1a8880c05e16fafc1513fded8eba222dcaaa809bdb36999cc27ab8d0055009e9690e8a35b859df865dc510d25c7812d8eebbb35607ad595573f0fabd1b57970a2bc113ac6f0ca01e985032b9c2c139316ac099ed1632d2bc0fcc341343d303db2a9c3cf2ad572c6c43084b08d458bf822e92da16079f39cdb0dd10ef47f87ecae404117fc72660372cee9ea42266e7f8d973e7f6b09930ca5f96e04976bf23b9d356bbd2429597b04b7663e0e1a1228f4dfda3b854233e4888dc60c6886c1e0e8aec1705f681027b1e0b3017337557f107ef5cd272df5fd31dfec2bdffe163a8369895ffe124c0aa0ee00ca0fe1db4d5cf37b4af0e49bd73a89d88ced3d88f8e6f00d8e61ad09946d0e72cd3e25bc688a021a83758b5023daae7c269a6cbbd447aba5da7629b75801e1654ce85b8e21ccb9865654f8662e538625d75fea31000000000000000000000000000000000000000000000", + "Name": "guido-3-even", + "Gas": 5400, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000181000000000000000000000000000000000000000000000000000000000000000801ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff2cffffffffffffffffffffffffffffffffffffffffffffffffffffffff3b10000000006c01ffffffffffffffffffffffffffffffffffffffffffffffdffffb97ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3bffffffffffffffffffffffffffffffffffffffffffffffffffffffffebafd93b37ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc5bb6affffffff3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff2a", + "Expected": "0000000000000001", + "Name": "guido-4-even", + "Gas": 1026, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c0e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c51f81bcdfc324a0dff2b5bec9d92e21cbebc4d5e29d3a3d30de3e03fbeab8d7f2ee5f854d076701c8753d72779187e404f9b2fb705c495137d78551250314a463ef5a213fe22de1cea28d60f518364ff95fe0b73660793e3efcfbe31bda68aeccc21cc9477a6aea5df8cae73422b700c47e54d892691e099167e77befc94780a920ae4155769cd69c30626f054134b5f003772473f57f84837402df6d166e66303f01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c954b27ac388a17c8e9a7ba12a968f288f3308d6fe7bcdf28e685e9a2e00d8be1af19726b7662016d6404f9336493ad633777feb88c9d02a1d2428e566ac38f42c0bb66d2962ec349088ce0d03b35bf27f6114414ef558c87ad8e543754a352f7dffcaca429690688595ab1d1b349d9295b480a82f43ac5c9112fe40720545cc78501cd8b42f3605212fe06a835c9cbc0328e07e94aedb2ac11f6d6649e7fcd8c43", + "Expected": "120cf297dbe810911c7d060e109e03699ccefa00a257d296c5a14b4180776c5f7c0d7f1cd789c694807689729af267b53f00373f395dee264a3daba11fcac1fa8875aee0950acd8fa656f1fc58077a7549d794dd160506ecea1acc9c0cda13795749c94f9973b683ce2162866e8d6b5b1165a4c7fa4234964d394d6ec4e0113698b89d173e24a962dd7a41a1819b0fef188ef64e7ee264595dce0d76fbc3ba42d5de833b143c8744366effede8bc8197e8f747ff8cdbc0bf1a93560bec960ca9", + "Name": "marcin-1-base-heavy", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "0000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000005100000000000000000000000000000000000000000000000000000000000000080001020304050607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0001020304050607", + "Expected": "0000000000000000", + "Name": "marcin-1-exp-heavy", + "Gas": 215, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000028e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c000102030405060701fffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c950001020304050607", + "Expected": "1abce71dc2205cce4eb6934397a88136f94641342e283cbcd30e929e85605c6718ed67f475192ffd", + "Name": "marcin-1-balanced", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000019800000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000198e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c51f81bcdfc324a0dff2b5bec9d92e21cbebc4d5e29d3a3d30de3e03fbeab8d7f2ee5f854d076701c8753d72779187e404f9b2fb705c495137d78551250314a463ef5a213fe22de1cea28d60f518364ff95fe0b73660793e3efcfbe31bda68aeccc21cc9477a6aea5df8cae73422b700c47e54d892691e099167e77befc94780a920ae4155769cd69c30626f054134b5f003772473f57f84837402df6d166e663c29021b0e084f7dc16f6ec88cc597f1aea9f8e0b9501e0f7a546805d2a20eeda0bf080aeb3ed7ea6f9174d804bd242f0b31ff1ea24800344abb580cd87f61ca75a013a87733553966400242399dee3760877fead2cd87287747155e47a854acb50fe07922f57ae3b4553201bfd7c11aca85e1541f91db8e62dca9c418dc5feae086c9487350539c884510044efce5e3f2aaffca4215c12b9044506375097fecd9b22e2ef46f01f1af8aff742aebf96bdcaf55a341600971dc62555376b9e98a8000102030405060708090a0b0c0d0e0f101112131415161703f01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c954b27ac388a17c8e9a7ba12a968f288f3308d6fe7bcdf28e685e9a2e00d8be1af19726b7662016d6404f9336493ad633777feb88c9d02a1d2428e566ac38f42c0bb66d2962ec349088ce0d03b35bf27f6114414ef558c87ad8e543754a352f7dffcaca429690688595ab1d1b349d9295b480a82f43ac5c9112fe40720545cc78501cd8b42f3605212fe06a835c9cbc0328e07e94aedb2ac11f6d6649e7fcd8c43ddce2bd0cdc6c22c4dcd345735040d5bfe3f09b7c61362089f728e2222db96cab2f2c2ccf43574f9e119f4860fd0f1b6036a43ad9db8a428ea09a4ee385112f3fe9c6656ea2cec604cbb5a9227526653bfa7035e4ae80010b1ba16a76608d5dde0a62bc019e9047b5ec05b1005fd017366130a4ba555e7be654561ee3f539c93cb2c9988fca71bf0ad9c4a426b924641a28e1e4adb93609bfa5b2bc81714cbba1110208b86d7b87be28bdf63a62e33ae81dbcc43de9192bd192c40e85faab539000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "817d05c4d540ace3f250fd082e2deb8c2097410fcfe4ce40862cfa015b7f62a7bb72ec1af2915cad66294447b45d177fe759eb80370b0dbd3c0e1c448d54db81eadc11e40c19e394d066a5c019c443798a98d4afd116fc220593d42fbf191b6af0ae75410badb641187ba24a0b968f742a75e2822853f137151d9ea972fd1f36b7c7cc4e71355ecf50648aec094b864cfae9316c0a7be3ddb8ab2d0050b2a029ee956c2366d49430c8f889f29ab514aea8e5b8dec40ba1b49432e30aee32ec45e96dd548205a79d8f8f918eed46acb2115c59086b1011b1d2b093cea723535c3d95efc8e51a7da43b80586d69eb7f213dfb06f7a8e789a9472392bd224411b50f8ca6f2862bcd63431912d1ff99c8d76408245da9e4bea649f0eb930b32922f2d0a345a206160be44d418f1a6c74bd49c4618392ef9350b264a461dfc684c7343211e27675b027054f1cb3d4a5b1a066d3a3ea2eed9caf13251d8be936818f15274e8e3d7539b7c5f216cef327a270fd2a886fbf679c163fb5806249f2c74da5ee0e3ffe9ad1fde2634b29b35da6da6d184ab6ae70199229", + "Name": "marcin-2-base-heavy", + "Gas": 867, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000010000102030405060708090a0b0c0d0e0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000102030405060708090a0b0c0d0e0f", + "Expected": "00000000000000000000000000000000", + "Name": "marcin-2-exp-heavy", + "Gas": 852, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000038e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c000102030405060708090a0b0c0d0e0f10111213141516172bfffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "86bef3367fc7117c8a6b825cadebe80f3e94c321dda73e9e240b98188a1d5c071c60a195097c8d1fb85ce03a2e1b6964846edee5aa2c3f46", + "Name": "marcin-2-balanced", + "Gas": 996, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244cfffffffffffffffffffffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95", + "Expected": "4004762c491606a5132134da6086284f74cc8e14b08f18b90fc09f31bca3d78f", + "Name": "marcin-3-base-heavy", + "Gas": 677, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000018000102030405060708090a0b0c0d0e0f1011121314151617ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "000000000000000000000000000000000000000000000000", + "Name": "marcin-3-exp-heavy", + "Gas": 765, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244cfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95", + "Expected": "2d3feee20d394af68dd6744b86a8aca6a4a0b7f01bbcd3c3eec768245ca6acee", + "Name": "marcin-3-balanced", + "Gas": 1360, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000051000000000000000000000000000000000000000000000000000000000000000800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffff", + "Name": "mod-8-exp-648", + "Gas": 215, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffff", + "Name": "mod-8-exp-896", + "Gas": 298, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-32", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-36", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-40", + "Gas": 208, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-64", + "Gas": 336, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-65", + "Gas": 341, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-128", + "Gas": 677, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "02fd01000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03feffffffffff", + "Name": "mod-256-exp-2", + "Gas": 341, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00", + "Expected": "0100fefffffffffffff710f80000000006f108000000000000feffffffffffff0100fefffffffffffef90ff80000000105f008ffffffffff02fdffffffffffff0100feffffffffff00f80ff80000010101f407fffffffd06fc0000000000fd020000feffffffff00fff80ff8000101fa08f207fffffb0afa0000000001fb03000000feffffff00fffff80ff80102f80405f207fff90ef80000000002f90400000000feffff00fffffff80ff903f6050005f207f712f60000000003f7050000000000feff00fffffffff810fcf406000005f1fd16f40000000004f506000000000000fe00fffffffffff915ea0700000005e522f20000000005f306ffffffffffffffffffffffffff", + "Name": "mod-264-exp-2", + "Gas": 363, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000040000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02feffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-1024-exp-2", + "Gas": 5461, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000008ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "2a02d5f86c2375ff", + "Name": "pawel-1-exp-heavy", + "Gas": 298, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000010ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "823ef7dc60d6d9616756c48f69b7c4ff", + "Name": "pawel-2-exp-heavy", + "Gas": 425, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000150000000000000000000000000000000000000000000000000000000000000018ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "c817dd5aa60a41948eed409706c2aa97be3000d4da0261ff", + "Name": "pawel-3-exp-heavy", + "Gas": 501, + "NoBenchmark": false + }, + { + "Input": "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "2defaca0137d6edacbbd5d36d6ed70cbf8a998ffb19fc270d45a18d37e0f35ff", + "Name": "pawel-4-exp-heavy", + "Gas": 506, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000017bffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffe", + "Expected": "200f14de1d474710c1c979920452e0ffc2ac6f618afba5", + "Name": "mod_vul_pawel_3_exp_8", + "Gas": 200, + "NoBenchmark": false } ] \ No newline at end of file diff --git a/core/vm/testdata/precompiles/modexp_eip7883.json b/core/vm/testdata/precompiles/modexp_eip7883.json index 85e9ad1849..fb3169d148 100644 --- a/core/vm/testdata/precompiles/modexp_eip7883.json +++ b/core/vm/testdata/precompiles/modexp_eip7883.json @@ -103,5 +103,215 @@ "Name": "nagydani-5-pow0x10001", "Gas": 524288, "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000c1000000000000000000000000000000000000000000000000000000000000000cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000007d7d7d83828282348286877d7d827d407d797d7d7d7d7d7d7d7d7d7d7d5b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000021000000000000000000000000000000000000000000000000000000000000000cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4000007d7d7d83828282348286877d7d82", + "Expected": "36a385a417859b5e178d3ab9", + "Name": "marius-1-even", + "Gas": 45296, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000010ffffffffffffffff76ffffffffffffff1cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c76ec7c7c7c7ffffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7ffffffffffffc7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c76ec7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7ffffffffff3f000000000000000000000000", + "Expected": "c3745de81615f80088ffffffffffffff", + "Name": "guido-1-even", + "Gas": 51136, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000d80000000000000000000000000000000000000000000000000000000000000010e0060000a921212121212121ff0000212b212121ffff1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f00feffff212121212121ffffffff1fe1e0e0e01e1f1f169f1f1f1f490afcefffffffffffffffff82828282828282828282828282828282828282828200ffff28ff2b212121ffff1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1fffffffffff0afceffffff7ffffffffff7c8282828282a1828282828282828282828282828200ffff28ff2b212121ffff1f1f1f1f1f1fd11f1f1f1f1f1f1f1f1f1f1fffffffffffffffff21212121212121fb2121212121ffff1f1f1f1f1f1f1f1fffaf82828282828200ffff28ff2b21828200", + "Expected": "458ef0af2549d46d24c89079499479e1", + "Name": "guido-2-even", + "Gas": 51152, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001e7000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002cb0193585a48e18aad777e9c1b54221a0f58140392e4f091cd5f42b2e8644a9384fbd58ae1edec2477ebf7edbf7c0a3f8bd21d1890ee87646feab3c47be716f842cc3da9b940af312dc54450a960e3fc0b86e56abddd154068e10571a96fff6259431632bc15695c6c8679057e66c2c25c127e97e64ee5de6ea1fc0a4a0e431343fed1daafa072c238a45841da86a9806680bc9f298411173210790359209cd454b5af7b4d5688b4403924e5f863d97e2c5349e1a04b54fcf385b1e9d7714bab8fbf5835f6ff9ed575e77dff7af5cbb641db5d537933bae1fa6555d6c12d6fb31ca27b57771f4aebfbe0bf95e8990c0108ffe7cbdaf370be52cf3ade594543af75ad9329d2d11a402270b5b9a6bf4b83307506e118fca4862749d04e916fc7a039f0d13f2a02e0eedb800199ec95df15b4ccd8669b52586879624d51219e72102fad810b5909b1e372ddf33888fb9beb09b416e4164966edbabd89e4a286be36277fc576ed519a15643dac602e92b63d0b9121f0491da5b16ef793a967f096d80b6c81ecaaffad7e3f06a4a5ac2796f1ed9f68e6a0fd5cf191f0c5c2eec338952ff8d31abc68bf760febeb57e088995ba1d7726a2fdd6d8ca28a181378b8b4ab699bfd4b696739bbf17a9eb2df6251143046137fdbbfacac312ebf67a67da9741b596000000000000419a2917c61722b0713d3b00a2f0e1dd5aebbbe09615de424700eea3c3020fe6e9ea5de9fa1ace781df28b21f746d2ab61d0da496e08473c90ff7dfe25b43bcde76f4bafb82e0975bea75f5a0591dba80ba2fff80a07d8853bea5be13ab326ba70c57b153acc646151948d1cf061ca31b02d4719fac710e7c723ca44f5b1737824b7ccc74ba5bff980aabdbf267621cafc3d6dcc29d0ca9c16839a92ed34de136da7900aa3ee43d21aa57498981124357cf0ca9b86f9a8d3f9c604ca00c726e48f7a9945021ea6dfff92d6b2d6514693169ca133e993541bfa4c4c191de806aa80c48109bcfc9901eccfdeb2395ab75fe63c67de900829d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "Expected": "1194e971c875bb45f030316793bc6916343335b18670dfad7a4646fba99749b30283b78b818836de7400ff1a68ddad1a2dd850ec0f227441e2d4d13f5ee4b5d7a856db0a9ad1f86987e1f117d70f9e6a1a2b8d083fa82653aa16f1773b6deb2ed8a1f9e7f3a5db121c4a0c91cb954e2ec53e63422efe86c7984d79cd0e7b5e3eb8ca4980551d63f302c7d72500a84baf12c82fc7bd9b5c2ab8b9c33baf1df28b2031c58a8b2928a42c9f456e98874e22fe13cf17aa5915b11bb108b6ae40842d434604ccddcb4f64324c67b2dde32e6cd759d964f17d9cdf0046cd0ed3588e1fc4b88f67a5d4f3a870aad1cba89ead265d6ad327c8ea7ff54fe4b5e7dbe87c5c59c468543eab3675751111bfa1d6c51daf789d41dc21fd8ba9e05490f881a973a3c1567ff3129a49aa6658cf06f0a79530a7256ce5a07c2a77b4306383d538866bba376d90621c4f82d1f5f32304ee2b7170805d42418fc6967642e5648d8c64fe9c0fdff2d7c114a47add7767c8fccb8808de8c3c6e1a8880c05e16fafc1513fded8eba222dcaaa809bdb36999cc27ab8d0055009e9690e8a35b859df865dc510d25c7812d8eebbb35607ad595573f0fabd1b57970a2bc113ac6f0ca01e985032b9c2c139316ac099ed1632d2bc0fcc341343d303db2a9c3cf2ad572c6c43084b08d458bf822e92da16079f39cdb0dd10ef47f87ecae404117fc72660372cee9ea42266e7f8d973e7f6b09930ca5f96e04976bf23b9d356bbd2429597b04b7663e0e1a1228f4dfda3b854233e4888dc60c6886c1e0e8aec1705f681027b1e0b3017337557f107ef5cd272df5fd31dfec2bdffe163a8369895ffe124c0aa0ee00ca0fe1db4d5cf37b4af0e49bd73a89d88ced3d88f8e6f00d8e61ad09946d0e72cd3e25bc688a021a83758b5023daae7c269a6cbbd447aba5da7629b75801e1654ce85b8e21ccb9865654f8662e538625d75fea31000000000000000000000000000000000000000000000", + "Name": "guido-3-even", + "Gas": 32400, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000181000000000000000000000000000000000000000000000000000000000000000801ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff2cffffffffffffffffffffffffffffffffffffffffffffffffffffffff3b10000000006c01ffffffffffffffffffffffffffffffffffffffffffffffdffffb97ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3bffffffffffffffffffffffffffffffffffffffffffffffffffffffffebafd93b37ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc5bb6affffffff3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff2a", + "Expected": "0000000000000001", + "Name": "guido-4-even", + "Gas": 94448, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c0e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c51f81bcdfc324a0dff2b5bec9d92e21cbebc4d5e29d3a3d30de3e03fbeab8d7f2ee5f854d076701c8753d72779187e404f9b2fb705c495137d78551250314a463ef5a213fe22de1cea28d60f518364ff95fe0b73660793e3efcfbe31bda68aeccc21cc9477a6aea5df8cae73422b700c47e54d892691e099167e77befc94780a920ae4155769cd69c30626f054134b5f003772473f57f84837402df6d166e66303f01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c954b27ac388a17c8e9a7ba12a968f288f3308d6fe7bcdf28e685e9a2e00d8be1af19726b7662016d6404f9336493ad633777feb88c9d02a1d2428e566ac38f42c0bb66d2962ec349088ce0d03b35bf27f6114414ef558c87ad8e543754a352f7dffcaca429690688595ab1d1b349d9295b480a82f43ac5c9112fe40720545cc78501cd8b42f3605212fe06a835c9cbc0328e07e94aedb2ac11f6d6649e7fcd8c43", + "Expected": "120cf297dbe810911c7d060e109e03699ccefa00a257d296c5a14b4180776c5f7c0d7f1cd789c694807689729af267b53f00373f395dee264a3daba11fcac1fa8875aee0950acd8fa656f1fc58077a7549d794dd160506ecea1acc9c0cda13795749c94f9973b683ce2162866e8d6b5b1165a4c7fa4234964d394d6ec4e0113698b89d173e24a962dd7a41a1819b0fef188ef64e7ee264595dce0d76fbc3ba42d5de833b143c8744366effede8bc8197e8f747ff8cdbc0bf1a93560bec960ca9", + "Name": "marcin-1-base-heavy", + "Gas": 1152, + "NoBenchmark": false + }, + { + "Input": "0000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000005100000000000000000000000000000000000000000000000000000000000000080001020304050607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0001020304050607", + "Expected": "0000000000000000", + "Name": "marcin-1-exp-heavy", + "Gas": 16624, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000028e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c000102030405060701fffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c950001020304050607", + "Expected": "1abce71dc2205cce4eb6934397a88136f94641342e283cbcd30e929e85605c6718ed67f475192ffd", + "Name": "marcin-1-balanced", + "Gas": 1200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000019800000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000198e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c51f81bcdfc324a0dff2b5bec9d92e21cbebc4d5e29d3a3d30de3e03fbeab8d7f2ee5f854d076701c8753d72779187e404f9b2fb705c495137d78551250314a463ef5a213fe22de1cea28d60f518364ff95fe0b73660793e3efcfbe31bda68aeccc21cc9477a6aea5df8cae73422b700c47e54d892691e099167e77befc94780a920ae4155769cd69c30626f054134b5f003772473f57f84837402df6d166e663c29021b0e084f7dc16f6ec88cc597f1aea9f8e0b9501e0f7a546805d2a20eeda0bf080aeb3ed7ea6f9174d804bd242f0b31ff1ea24800344abb580cd87f61ca75a013a87733553966400242399dee3760877fead2cd87287747155e47a854acb50fe07922f57ae3b4553201bfd7c11aca85e1541f91db8e62dca9c418dc5feae086c9487350539c884510044efce5e3f2aaffca4215c12b9044506375097fecd9b22e2ef46f01f1af8aff742aebf96bdcaf55a341600971dc62555376b9e98a8000102030405060708090a0b0c0d0e0f101112131415161703f01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c954b27ac388a17c8e9a7ba12a968f288f3308d6fe7bcdf28e685e9a2e00d8be1af19726b7662016d6404f9336493ad633777feb88c9d02a1d2428e566ac38f42c0bb66d2962ec349088ce0d03b35bf27f6114414ef558c87ad8e543754a352f7dffcaca429690688595ab1d1b349d9295b480a82f43ac5c9112fe40720545cc78501cd8b42f3605212fe06a835c9cbc0328e07e94aedb2ac11f6d6649e7fcd8c43ddce2bd0cdc6c22c4dcd345735040d5bfe3f09b7c61362089f728e2222db96cab2f2c2ccf43574f9e119f4860fd0f1b6036a43ad9db8a428ea09a4ee385112f3fe9c6656ea2cec604cbb5a9227526653bfa7035e4ae80010b1ba16a76608d5dde0a62bc019e9047b5ec05b1005fd017366130a4ba555e7be654561ee3f539c93cb2c9988fca71bf0ad9c4a426b924641a28e1e4adb93609bfa5b2bc81714cbba1110208b86d7b87be28bdf63a62e33ae81dbcc43de9192bd192c40e85faab539000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "817d05c4d540ace3f250fd082e2deb8c2097410fcfe4ce40862cfa015b7f62a7bb72ec1af2915cad66294447b45d177fe759eb80370b0dbd3c0e1c448d54db81eadc11e40c19e394d066a5c019c443798a98d4afd116fc220593d42fbf191b6af0ae75410badb641187ba24a0b968f742a75e2822853f137151d9ea972fd1f36b7c7cc4e71355ecf50648aec094b864cfae9316c0a7be3ddb8ab2d0050b2a029ee956c2366d49430c8f889f29ab514aea8e5b8dec40ba1b49432e30aee32ec45e96dd548205a79d8f8f918eed46acb2115c59086b1011b1d2b093cea723535c3d95efc8e51a7da43b80586d69eb7f213dfb06f7a8e789a9472392bd224411b50f8ca6f2862bcd63431912d1ff99c8d76408245da9e4bea649f0eb930b32922f2d0a345a206160be44d418f1a6c74bd49c4618392ef9350b264a461dfc684c7343211e27675b027054f1cb3d4a5b1a066d3a3ea2eed9caf13251d8be936818f15274e8e3d7539b7c5f216cef327a270fd2a886fbf679c163fb5806249f2c74da5ee0e3ffe9ad1fde2634b29b35da6da6d184ab6ae70199229", + "Name": "marcin-2-base-heavy", + "Gas": 5202, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000010000102030405060708090a0b0c0d0e0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000102030405060708090a0b0c0d0e0f", + "Expected": "00000000000000000000000000000000", + "Name": "marcin-2-exp-heavy", + "Gas": 16368, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000038e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c000102030405060708090a0b0c0d0e0f10111213141516172bfffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "86bef3367fc7117c8a6b825cadebe80f3e94c321dda73e9e240b98188a1d5c071c60a195097c8d1fb85ce03a2e1b6964846edee5aa2c3f46", + "Name": "marcin-2-balanced", + "Gas": 5978, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244cfffffffffffffffffffffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95", + "Expected": "4004762c491606a5132134da6086284f74cc8e14b08f18b90fc09f31bca3d78f", + "Name": "marcin-3-base-heavy", + "Gas": 2032, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000018000102030405060708090a0b0c0d0e0f1011121314151617ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000102030405060708090a0b0c0d0e0f1011121314151617", + "Expected": "000000000000000000000000000000000000000000000000", + "Name": "marcin-3-exp-heavy", + "Gas": 4080, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244cfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c95", + "Expected": "2d3feee20d394af68dd6744b86a8aca6a4a0b7f01bbcd3c3eec768245ca6acee", + "Name": "marcin-3-balanced", + "Gas": 4080, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000051000000000000000000000000000000000000000000000000000000000000000800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffff", + "Name": "mod-8-exp-648", + "Gas": 16624, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffff", + "Name": "mod-8-exp-896", + "Gas": 24560, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-32", + "Gas": 500, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-36", + "Gas": 560, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-40", + "Gas": 624, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-64", + "Gas": 1008, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-65", + "Gas": 1024, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-32-exp-128", + "Gas": 2032, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "02fd01000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03ff0000000000000000000000000000000000000000000000fefffffffffffd03feffffffffff", + "Name": "mod-256-exp-2", + "Gas": 2048, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010800ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffff00", + "Expected": "0100fefffffffffffff710f80000000006f108000000000000feffffffffffff0100fefffffffffffef90ff80000000105f008ffffffffff02fdffffffffffff0100feffffffffff00f80ff80000010101f407fffffffd06fc0000000000fd020000feffffffff00fff80ff8000101fa08f207fffffb0afa0000000001fb03000000feffffff00fffff80ff80102f80405f207fff90ef80000000002f90400000000feffff00fffffff80ff903f6050005f207f712f60000000003f7050000000000feff00fffffffff810fcf406000005f1fd16f40000000004f506000000000000fe00fffffffffff915ea0700000005e522f20000000005f306ffffffffffffffffffffffffff", + "Name": "mod-264-exp-2", + "Gas": 2178, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000040000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00", + "Expected": "00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02fefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe02feffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Name": "mod-1024-exp-2", + "Gas": 32768, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000008ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "2a02d5f86c2375ff", + "Name": "pawel-1-exp-heavy", + "Gas": 24560, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000010ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "823ef7dc60d6d9616756c48f69b7c4ff", + "Name": "pawel-2-exp-heavy", + "Gas": 6128, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000150000000000000000000000000000000000000000000000000000000000000018ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "c817dd5aa60a41948eed409706c2aa97be3000d4da0261ff", + "Name": "pawel-3-exp-heavy", + "Gas": 2672, + "NoBenchmark": false + }, + { + "Input": "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "Expected": "2defaca0137d6edacbbd5d36d6ed70cbf8a998ffb19fc270d45a18d37e0f35ff", + "Name": "pawel-4-exp-heavy", + "Gas": 1520, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000001700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000017bffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffe", + "Expected": "200f14de1d474710c1c979920452e0ffc2ac6f618afba5", + "Name": "mod_vul_pawel_3_exp_8", + "Gas": 1008, + "NoBenchmark": false } ] diff --git a/crypto/blake2b/blake2b.go b/crypto/blake2b/blake2b.go index c24a88b99d..e00ee2e6d2 100644 --- a/crypto/blake2b/blake2b.go +++ b/crypto/blake2b/blake2b.go @@ -302,7 +302,7 @@ func appendUint64(b []byte, x uint64) []byte { return append(b, a[:]...) } -//nolint:unused,deadcode +//nolint:unused func appendUint32(b []byte, x uint32) []byte { var a [4]byte binary.BigEndian.PutUint32(a[:], x) @@ -314,7 +314,7 @@ func consumeUint64(b []byte) ([]byte, uint64) { return b[8:], x } -//nolint:unused,deadcode +//nolint:unused func consumeUint32(b []byte) ([]byte, uint32) { x := binary.BigEndian.Uint32(b) return b[4:], x diff --git a/crypto/blake2b/blake2b_generic.go b/crypto/blake2b/blake2b_generic.go index 61e678fdf5..4d3f69d292 100644 --- a/crypto/blake2b/blake2b_generic.go +++ b/crypto/blake2b/blake2b_generic.go @@ -25,7 +25,7 @@ var precomputed = [10][16]byte{ {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, } -// nolint:unused,deadcode +// nolint:unused func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { var m [16]uint64 c0, c1 := c[0], c[1] diff --git a/crypto/blake2b/blake2b_test.go b/crypto/blake2b/blake2b_test.go index 9d24444a27..8f210445fc 100644 --- a/crypto/blake2b/blake2b_test.go +++ b/crypto/blake2b/blake2b_test.go @@ -303,8 +303,7 @@ func benchmarkSum(b *testing.B, size int, sse4, avx, avx2 bool) { data := make([]byte, size) b.SetBytes(int64(size)) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { Sum512(data) } } @@ -319,8 +318,7 @@ func benchmarkWrite(b *testing.B, size int, sse4, avx, avx2 bool) { data := make([]byte, size) h, _ := New512(nil) b.SetBytes(int64(size)) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { h.Write(data) } } diff --git a/crypto/bn256/bn256_fast.go b/crypto/bn256/bn256_fast.go index e3c9b60518..93a2eef879 100644 --- a/crypto/bn256/bn256_fast.go +++ b/crypto/bn256/bn256_fast.go @@ -9,18 +9,18 @@ package bn256 import ( - bn256cf "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" + gnark "github.com/ethereum/go-ethereum/crypto/bn256/gnark" ) // G1 is an abstract cyclic group. The zero value is suitable for use as the // output of an operation, but cannot be used as an input. -type G1 = bn256cf.G1 +type G1 = gnark.G1 // G2 is an abstract cyclic group. The zero value is suitable for use as the // output of an operation, but cannot be used as an input. -type G2 = bn256cf.G2 +type G2 = gnark.G2 // PairingCheck calculates the Optimal Ate pairing for a set of points. func PairingCheck(a []*G1, b []*G2) bool { - return bn256cf.PairingCheck(a, b) + return gnark.PairingCheck(a, b) } diff --git a/crypto/bn256/cloudflare/gfp_decl.go b/crypto/bn256/cloudflare/gfp_decl.go index 1954d14a4a..b7dd1a8aac 100644 --- a/crypto/bn256/cloudflare/gfp_decl.go +++ b/crypto/bn256/cloudflare/gfp_decl.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/cpu" ) -//nolint:varcheck,unused,deadcode +//nolint:unused var hasBMI2 = cpu.X86.HasBMI2 //go:noescape diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index e620d6ee3a..d803ab27c5 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -60,7 +60,7 @@ func TestToECDSAErrors(t *testing.T) { func BenchmarkSha3(b *testing.B) { a := []byte("hello world") - for i := 0; i < b.N; i++ { + for b.Loop() { Keccak256(a) } } @@ -310,7 +310,7 @@ func BenchmarkKeccak256Hash(b *testing.B) { rand.Read(input[:]) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { Keccak256Hash(input[:]) } } @@ -329,7 +329,7 @@ func BenchmarkHashData(b *testing.B) { rand.Read(input[:]) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { HashData(buffer, input[:]) } } diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index e3da71010e..e822db2783 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -164,7 +164,7 @@ func TestTooBigSharedKey(t *testing.T) { // Benchmark the generation of P256 keys. func BenchmarkGenerateKeyP256(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := GenerateKey(rand.Reader, elliptic.P256(), nil); err != nil { b.Fatal(err) } @@ -177,8 +177,7 @@ func BenchmarkGenSharedKeyP256(b *testing.B) { if err != nil { b.Fatal(err) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := prv.GenerateShared(&prv.PublicKey, 16, 16) if err != nil { b.Fatal(err) @@ -192,8 +191,7 @@ func BenchmarkGenSharedKeyS256(b *testing.B) { if err != nil { b.Fatal(err) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := prv.GenerateShared(&prv.PublicKey, 16, 16) if err != nil { b.Fatal(err) diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go index 9da2386368..3ccc204838 100644 --- a/crypto/kzg4844/kzg4844.go +++ b/crypto/kzg4844/kzg4844.go @@ -34,10 +34,10 @@ var ( blobT = reflect.TypeFor[Blob]() commitmentT = reflect.TypeFor[Commitment]() proofT = reflect.TypeFor[Proof]() - - CellProofsPerBlob = 128 ) +const CellProofsPerBlob = 128 + // Blob represents a 4844 data blob. type Blob [131072]byte diff --git a/crypto/kzg4844/kzg4844_test.go b/crypto/kzg4844/kzg4844_test.go index 7e73efd850..743a277199 100644 --- a/crypto/kzg4844/kzg4844_test.go +++ b/crypto/kzg4844/kzg4844_test.go @@ -105,8 +105,7 @@ func benchmarkBlobToCommitment(b *testing.B, ckzg bool) { blob := randBlob() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { BlobToCommitment(blob) } } @@ -125,8 +124,7 @@ func benchmarkComputeProof(b *testing.B, ckzg bool) { point = randFieldElement() ) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ComputeProof(blob, point) } } @@ -147,8 +145,7 @@ func benchmarkVerifyProof(b *testing.B, ckzg bool) { proof, claim, _ = ComputeProof(blob, point) ) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { VerifyProof(commitment, point, claim, proof) } } @@ -167,8 +164,7 @@ func benchmarkComputeBlobProof(b *testing.B, ckzg bool) { commitment, _ = BlobToCommitment(blob) ) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ComputeBlobProof(blob, commitment) } } @@ -188,8 +184,7 @@ func benchmarkVerifyBlobProof(b *testing.B, ckzg bool) { proof, _ = ComputeBlobProof(blob, commitment) ) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { VerifyBlobProof(blob, commitment, proof) } } @@ -230,3 +225,31 @@ func testKZGCells(t *testing.T, ckzg bool) { t.Fatalf("failed to verify KZG proof at point: %v", err) } } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/crypto/kzg4844 +// cpu: Apple M1 Pro +// BenchmarkGOKZGComputeCellProofs +// BenchmarkGOKZGComputeCellProofs-8 8 139012286 ns/op +func BenchmarkGOKZGComputeCellProofs(b *testing.B) { benchmarkComputeCellProofs(b, false) } +func BenchmarkCKZGComputeCellProofs(b *testing.B) { benchmarkComputeCellProofs(b, true) } + +func benchmarkComputeCellProofs(b *testing.B, ckzg bool) { + if ckzg && !ckzgAvailable { + b.Skip("CKZG unavailable in this test build") + } + defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load()) + useCKZG.Store(ckzg) + + blob := randBlob() + _, _ = ComputeCellProofs(blob) // for kzg initialization + b.ResetTimer() + + for b.Loop() { + _, err := ComputeCellProofs(blob) + if err != nil { + b.Fatalf("failed to create KZG proof at point: %v", err) + } + } +} diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go index 4827cc5b25..c7485bca08 100644 --- a/crypto/secp256k1/secp256_test.go +++ b/crypto/secp256k1/secp256_test.go @@ -221,9 +221,7 @@ func TestRecoverSanity(t *testing.T) { func BenchmarkSign(b *testing.B) { _, seckey := generateKeyPair() msg := csprngEntropy(32) - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { Sign(msg, seckey) } } @@ -232,9 +230,7 @@ func BenchmarkRecover(b *testing.B) { msg := csprngEntropy(32) _, seckey := generateKeyPair() sig, _ := Sign(msg, seckey) - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { RecoverPubkey(msg, sig) } } diff --git a/crypto/signature_test.go b/crypto/signature_test.go index 74d683b507..b1f2960254 100644 --- a/crypto/signature_test.go +++ b/crypto/signature_test.go @@ -135,7 +135,7 @@ func TestPubkeyRandom(t *testing.T) { } func BenchmarkEcrecoverSignature(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := Ecrecover(testmsg, testsig); err != nil { b.Fatal("ecrecover error", err) } @@ -144,7 +144,7 @@ func BenchmarkEcrecoverSignature(b *testing.B) { func BenchmarkVerifySignature(b *testing.B) { sig := testsig[:len(testsig)-1] // remove recovery id - for i := 0; i < b.N; i++ { + for b.Loop() { if !VerifySignature(testpubkey, testmsg, sig) { b.Fatal("verify error") } @@ -152,7 +152,7 @@ func BenchmarkVerifySignature(b *testing.B) { } func BenchmarkDecompressPubkey(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := DecompressPubkey(testpubkeyc); err != nil { b.Fatal(err) } diff --git a/eth/api_backend_test.go b/eth/api_backend_test.go index d0718ede1c..aa0539511b 100644 --- a/eth/api_backend_test.go +++ b/eth/api_backend_test.go @@ -20,6 +20,7 @@ import ( "context" "crypto/ecdsa" "errors" + "math" "math/big" "testing" "time" @@ -130,6 +131,27 @@ func TestSendTx(t *testing.T) { testSendTx(t, true) } +func TestSendTxEIP2681(t *testing.T) { + b := initBackend(false) + + // Test EIP-2681: nonce overflow should be rejected + tx := makeTx(uint64(math.MaxUint64), nil, nil, key) // max uint64 nonce + err := b.SendTx(context.Background(), tx) + if err == nil { + t.Fatal("Expected EIP-2681 nonce overflow error, but transaction was accepted") + } + if !errors.Is(err, core.ErrNonceMax) { + t.Errorf("Expected core.ErrNonceMax, got: %v", err) + } + + // Test normal case: should succeed + normalTx := makeTx(0, nil, nil, key) + err = b.SendTx(context.Background(), normalTx) + if err != nil { + t.Errorf("Normal transaction should succeed, got error: %v", err) + } +} + func testSendTx(t *testing.T, withLocal bool) { b := initBackend(withLocal) diff --git a/eth/api_debug.go b/eth/api_debug.go index 188dee11aa..892e103213 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -443,3 +444,91 @@ func (api *DebugAPI) GetTrieFlushInterval() (string, error) { } return api.eth.blockchain.GetTrieFlushInterval().String(), nil } + +// StateSize returns the current state size statistics from the state size tracker. +// Returns an error if the state size tracker is not initialized or if stats are not ready. +func (api *DebugAPI) StateSize(blockHashOrNumber *rpc.BlockNumberOrHash) (interface{}, error) { + sizer := api.eth.blockchain.StateSizer() + if sizer == nil { + return nil, errors.New("state size tracker is not enabled") + } + var ( + err error + stats *state.SizeStats + ) + if blockHashOrNumber == nil { + stats, err = sizer.Query(nil) + } else { + header, herr := api.eth.APIBackend.HeaderByNumberOrHash(context.Background(), *blockHashOrNumber) + if herr != nil || header == nil { + return nil, fmt.Errorf("block %s is unknown", blockHashOrNumber) + } + stats, err = sizer.Query(&header.Root) + } + if err != nil { + return nil, err + } + if stats == nil { + var s string + if blockHashOrNumber == nil { + s = "chain head" + } else { + s = blockHashOrNumber.String() + } + return nil, fmt.Errorf("state size of %s is not available", s) + } + return map[string]interface{}{ + "stateRoot": stats.StateRoot, + "blockNumber": hexutil.Uint64(stats.BlockNumber), + "accounts": hexutil.Uint64(stats.Accounts), + "accountBytes": hexutil.Uint64(stats.AccountBytes), + "storages": hexutil.Uint64(stats.Storages), + "storageBytes": hexutil.Uint64(stats.StorageBytes), + "accountTrienodes": hexutil.Uint64(stats.AccountTrienodes), + "accountTrienodeBytes": hexutil.Uint64(stats.AccountTrienodeBytes), + "storageTrienodes": hexutil.Uint64(stats.StorageTrienodes), + "storageTrienodeBytes": hexutil.Uint64(stats.StorageTrienodeBytes), + "contractCodes": hexutil.Uint64(stats.ContractCodes), + "contractCodeBytes": hexutil.Uint64(stats.ContractCodeBytes), + }, nil +} + +func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness, error) { + bc := api.eth.blockchain + block, err := api.eth.APIBackend.BlockByNumber(context.Background(), bn) + if err != nil { + return &stateless.ExtWitness{}, fmt.Errorf("block number %v not found", bn) + } + + parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return &stateless.ExtWitness{}, fmt.Errorf("block number %v found, but parent missing", bn) + } + + result, err := bc.ProcessBlock(parent.Root, block, false, true) + if err != nil { + return nil, err + } + + return result.Witness().ToExtWitness(), nil +} + +func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWitness, error) { + bc := api.eth.blockchain + block := bc.GetBlockByHash(hash) + if block == nil { + return &stateless.ExtWitness{}, fmt.Errorf("block hash %x not found", hash) + } + + parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return &stateless.ExtWitness{}, fmt.Errorf("block number %x found, but parent missing", hash) + } + + result, err := bc.ProcessBlock(parent.Root, block, false, true) + if err != nil { + return nil, err + } + + return result.Witness().ToExtWitness(), nil +} diff --git a/eth/backend.go b/eth/backend.go index 7616ec9d31..8509561822 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,12 +235,15 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { TxLookupLimit: int64(min(config.TransactionHistory, math.MaxInt64)), VmConfig: vm.Config{ EnablePreimageRecording: config.EnablePreimageRecording, + EnableWitnessStats: config.EnableWitnessStats, + StatelessSelfValidation: config.StatelessSelfValidation, }, // Enables file journaling for the trie database. The journal files will be stored // within the data directory. The corresponding paths will be either: // - DATADIR/triedb/merkle.journal // - DATADIR/triedb/verkle.journal TrieJournalDirectory: stack.ResolvePath("triedb"), + StateSizeTracking: config.EnableStateSizeTracking, } ) if config.VMTrace != "" { @@ -259,6 +262,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverrideOsaka != nil { overrides.OverrideOsaka = config.OverrideOsaka } + if config.OverrideBPO1 != nil { + overrides.OverrideBPO1 = config.OverrideBPO1 + } + if config.OverrideBPO2 != nil { + overrides.OverrideBPO2 = config.OverrideBPO2 + } if config.OverrideVerkle != nil { overrides.OverrideVerkle = config.OverrideVerkle } diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 7f6dd40907..b37c26149f 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -213,8 +213,8 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa return engine.STATUS_INVALID, attributesErr("missing withdrawals") case params.BeaconRoot == nil: return engine.STATUS_INVALID, attributesErr("missing beacon root") - case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka): - return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun or prague payloads") + case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): + return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads") } } // TODO(matt): the spec requires that fcu is applied when called on a valid @@ -418,13 +418,21 @@ func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.Execu // GetPayloadV2 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV2(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + // executionPayload: ExecutionPayloadV1 | ExecutionPayloadV2 where: + // + // - ExecutionPayloadV1 MUST be returned if the payload timestamp is lower + // than the Shanghai timestamp + // + // - ExecutionPayloadV2 MUST be returned if the payload timestamp is greater + // or equal to the Shanghai timestamp if !payloadID.Is(engine.PayloadV1, engine.PayloadV2) { return nil, engine.UnsupportedFork } return api.getPayload(payloadID, false) } -// GetPayloadV3 returns a cached payload by id. +// GetPayloadV3 returns a cached payload by id. This endpoint should only +// be used for the Cancun fork. func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { if !payloadID.Is(engine.PayloadV3) { return nil, engine.UnsupportedFork @@ -432,7 +440,8 @@ func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.Execu return api.getPayload(payloadID, false) } -// GetPayloadV4 returns a cached payload by id. +// GetPayloadV4 returns a cached payload by id. This endpoint should only +// be used for the Prague fork. func (api *ConsensusAPI) GetPayloadV4(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { if !payloadID.Is(engine.PayloadV3) { return nil, engine.UnsupportedFork @@ -440,7 +449,11 @@ func (api *ConsensusAPI) GetPayloadV4(payloadID engine.PayloadID) (*engine.Execu return api.getPayload(payloadID, false) } -// GetPayloadV5 returns a cached payload by id. +// GetPayloadV5 returns a cached payload by id. This endpoint should only +// be used after the Osaka fork. +// +// This method follows the same specification as engine_getPayloadV4 with +// changes of returning BlobsBundleV2 with BlobSidecar version 1. func (api *ConsensusAPI) GetPayloadV5(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { if !payloadID.Is(engine.PayloadV3) { return nil, engine.UnsupportedFork @@ -458,16 +471,40 @@ func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*eng } // GetBlobsV1 returns a blob from the transaction pool. +// +// Specification: +// +// Given an array of blob versioned hashes client software MUST respond with an +// array of BlobAndProofV1 objects with matching versioned hashes, respecting the +// order of versioned hashes in the input array. +// +// Client software MUST place responses in the order given in the request, using +// null for any missing blobs. For instance: +// +// if the request is [A_versioned_hash, B_versioned_hash, C_versioned_hash] and +// client software has data for blobs A and C, but doesn't have data for B, the +// response MUST be [A, null, C]. +// +// Client software MUST support request sizes of at least 128 blob versioned hashes. +// The client MUST return -38004: Too large request error if the number of requested +// blobs is too large. +// +// Client software MAY return an array of all null entries if syncing or otherwise +// unable to serve blob pool data. func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProofV1, error) { if len(hashes) > 128 { return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) } - blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0) + blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0, false) if err != nil { return nil, engine.InvalidParams.With(err) } res := make([]*engine.BlobAndProofV1, len(hashes)) for i := 0; i < len(blobs); i++ { + // Skip the non-existing blob + if blobs[i] == nil { + continue + } res[i] = &engine.BlobAndProofV1{ Blob: blobs[i][:], Proof: proofs[i][0][:], @@ -477,6 +514,33 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo } // GetBlobsV2 returns a blob from the transaction pool. +// +// Specification: +// Refer to the specification for engine_getBlobsV1 with changes of the following: +// +// Given an array of blob versioned hashes client software MUST respond with an +// array of BlobAndProofV2 objects with matching versioned hashes, respecting +// the order of versioned hashes in the input array. +// +// Client software MUST return null in case of any missing or older version blobs. +// For instance, +// +// - if the request is [A_versioned_hash, B_versioned_hash, C_versioned_hash] and +// client software has data for blobs A and C, but doesn't have data for B, the +// response MUST be null. +// +// - if the request is [A_versioned_hash_for_blob_with_blob_proof], the response +// MUST be null as well. +// +// Note, geth internally make the conversion from old version to new one, so the +// data will be returned normally. +// +// Client software MUST support request sizes of at least 128 blob versioned +// hashes. The client MUST return -38004: Too large request error if the number +// of requested blobs is too large. +// +// Client software MUST return null if syncing or otherwise unable to serve +// blob pool data. func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) { if len(hashes) > 128 { return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) @@ -490,12 +554,21 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo getBlobsV2RequestMiss.Inc(1) return nil, nil } - getBlobsV2RequestHit.Inc(1) - blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1) + blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1, false) if err != nil { return nil, engine.InvalidParams.With(err) } + + // To comply with API spec, check again that we really got all data needed + for _, blob := range blobs { + if blob == nil { + getBlobsV2RequestMiss.Inc(1) + return nil, nil + } + } + getBlobsV2RequestHit.Inc(1) + res := make([]*engine.BlobAndProofV2, len(hashes)) for i := 0; i < len(blobs); i++ { var cellProofs []hexutil.Bytes @@ -576,8 +649,8 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas return invalidStatus, paramsErr("nil beaconRoot post-cancun") case executionRequests == nil: return invalidStatus, paramsErr("nil executionRequests post-prague") - case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka): - return invalidStatus, unsupportedForkErr("newPayloadV4 must only be called for prague payloads") + case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): + return invalidStatus, unsupportedForkErr("newPayloadV4 must only be called for prague/osaka payloads") } requests := convertRequests(executionRequests) if err := validateRequests(requests); err != nil { diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index ad377113b5..659280bf3b 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -19,7 +19,9 @@ package catalyst import ( "bytes" "context" + "crypto/ecdsa" crand "crypto/rand" + "crypto/sha256" "errors" "fmt" "math/big" @@ -40,6 +42,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/internal/testrand" "github.com/ethereum/go-ethereum/internal/version" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" @@ -47,6 +50,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" + "github.com/holiman/uint256" ) var ( @@ -112,7 +116,7 @@ func TestEth2AssembleBlock(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID) tx, err := types.SignTx(types.NewTransaction(uint64(10), blocks[9].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey) if err != nil { @@ -151,7 +155,7 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks[:9]) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() @@ -173,7 +177,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks[:9]) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() @@ -238,8 +242,9 @@ func TestInvalidPayloadTimestamp(t *testing.T) { genesis, preMergeBlocks := generateMergeChain(10, false) n, ethservice := startEthService(t, genesis, preMergeBlocks) defer n.Close() + var ( - api = NewConsensusAPI(ethservice) + api = newConsensusAPIWithoutHeartbeat(ethservice) parent = ethservice.BlockChain().CurrentBlock() ) tests := []struct { @@ -281,7 +286,7 @@ func TestEth2NewBlock(t *testing.T) { defer n.Close() var ( - api = NewConsensusAPI(ethservice) + api = newConsensusAPIWithoutHeartbeat(ethservice) parent = preMergeBlocks[len(preMergeBlocks)-1] // This EVM code generates a log when the contract is created. @@ -434,8 +439,14 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) t.Fatal("can't create node:", err) } - mcfg := miner.DefaultConfig - ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: ethconfig.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256, Miner: mcfg} + ethcfg := ðconfig.Config{ + Genesis: genesis, + SyncMode: ethconfig.FullSync, + TrieTimeout: time.Minute, + TrieDirtyCache: 256, + TrieCleanCache: 256, + Miner: miner.DefaultConfig, + } ethservice, err := eth.New(n, ethcfg) if err != nil { t.Fatal("can't create eth service:", err) @@ -459,6 +470,7 @@ func TestFullAPI(t *testing.T) { genesis, preMergeBlocks := generateMergeChain(10, false) n, ethservice := startEthService(t, genesis, preMergeBlocks) defer n.Close() + var ( parent = ethservice.BlockChain().CurrentBlock() // This EVM code generates a log when the contract is created. @@ -476,7 +488,7 @@ func TestFullAPI(t *testing.T) { } func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Header, callback func(parent *types.Header), withdrawals [][]*types.Withdrawal, beaconRoots []common.Hash) []*types.Header { - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) var blocks []*types.Header for i := 0; i < n; i++ { callback(parent) @@ -524,7 +536,7 @@ func TestExchangeTransitionConfig(t *testing.T) { defer n.Close() // invalid ttd - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) config := engine.TransitionConfigurationV1{ TerminalTotalDifficulty: (*hexutil.Big)(big.NewInt(0)), TerminalBlockHash: common.Hash{}, @@ -585,7 +597,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) { defer n.Close() var ( - api = NewConsensusAPI(ethservice) + api = newConsensusAPIWithoutHeartbeat(ethservice) parent = ethservice.BlockChain().CurrentBlock() signer = types.LatestSigner(ethservice.BlockChain().Config()) // This EVM code generates a log when the contract is created. @@ -688,7 +700,7 @@ func TestEmptyBlocks(t *testing.T) { defer n.Close() commonAncestor := ethservice.BlockChain().CurrentBlock() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // Setup 10 blocks on the canonical chain setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil, nil) @@ -814,8 +826,8 @@ func TestTrickRemoteBlockCache(t *testing.T) { } nodeA.Server().AddPeer(nodeB.Server().Self()) nodeB.Server().AddPeer(nodeA.Server().Self()) - apiA := NewConsensusAPI(ethserviceA) - apiB := NewConsensusAPI(ethserviceB) + apiA := newConsensusAPIWithoutHeartbeat(ethserviceA) + apiB := newConsensusAPIWithoutHeartbeat(ethserviceB) commonAncestor := ethserviceA.BlockChain().CurrentBlock() @@ -872,7 +884,7 @@ func TestInvalidBloom(t *testing.T) { defer n.Close() commonAncestor := ethservice.BlockChain().CurrentBlock() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // Setup 10 blocks on the canonical chain setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil, nil) @@ -898,7 +910,7 @@ func TestSimultaneousNewBlock(t *testing.T) { defer n.Close() var ( - api = NewConsensusAPI(ethservice) + api = newConsensusAPIWithoutHeartbeat(ethservice) parent = preMergeBlocks[len(preMergeBlocks)-1] ) for i := 0; i < 10; i++ { @@ -988,7 +1000,7 @@ func TestWithdrawals(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // 10: Build Shanghai block with no withdrawals. parent := ethservice.BlockChain().CurrentHeader() @@ -1105,7 +1117,7 @@ func TestNilWithdrawals(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) parent := ethservice.BlockChain().CurrentHeader() aa := common.Address{0xaa} @@ -1301,7 +1313,7 @@ func allBodies(blocks []*types.Block) []*types.Body { func TestGetBlockBodiesByHash(t *testing.T) { node, eth, blocks := setupBodies(t) - api := NewConsensusAPI(eth) + api := newConsensusAPIWithoutHeartbeat(eth) defer node.Close() tests := []struct { @@ -1357,7 +1369,7 @@ func TestGetBlockBodiesByHash(t *testing.T) { func TestGetBlockBodiesByRange(t *testing.T) { node, eth, blocks := setupBodies(t) - api := NewConsensusAPI(eth) + api := newConsensusAPIWithoutHeartbeat(eth) defer node.Close() tests := []struct { @@ -1438,7 +1450,7 @@ func TestGetBlockBodiesByRange(t *testing.T) { func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) { node, eth, _ := setupBodies(t) - api := NewConsensusAPI(eth) + api := newConsensusAPIWithoutHeartbeat(eth) defer node.Close() tests := []struct { start hexutil.Uint64 @@ -1550,7 +1562,7 @@ func TestParentBeaconBlockRoot(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // 11: Build Shanghai block with no withdrawals. parent := ethservice.BlockChain().CurrentHeader() @@ -1633,7 +1645,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) { n, ethservice := startEthService(t, genesis, blocks[:9]) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() @@ -1725,7 +1737,7 @@ func TestGetClientVersion(t *testing.T) { n, ethservice := startEthService(t, genesis, preMergeBlocks) defer n.Close() - api := NewConsensusAPI(ethservice) + api := newConsensusAPIWithoutHeartbeat(ethservice) info := engine.ClientVersionV1{ Code: "TT", Name: "test", @@ -1799,3 +1811,245 @@ func TestValidateRequests(t *testing.T) { }) } } + +var ( + testBlobs []*kzg4844.Blob + testBlobCommits []kzg4844.Commitment + testBlobProofs []kzg4844.Proof + testBlobCellProofs [][]kzg4844.Proof + testBlobVHashes [][32]byte +) + +func init() { + for i := 0; i < 6; i++ { + testBlob := &kzg4844.Blob{byte(i)} + testBlobs = append(testBlobs, testBlob) + + testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob) + testBlobCommits = append(testBlobCommits, testBlobCommit) + + testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit) + testBlobProofs = append(testBlobProofs, testBlobProof) + + testBlobCellProof, _ := kzg4844.ComputeCellProofs(testBlob) + testBlobCellProofs = append(testBlobCellProofs, testBlobCellProof) + + testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit) + testBlobVHashes = append(testBlobVHashes, testBlobVHash) + } +} + +// makeMultiBlobTx is a utility method to construct a random blob tx with +// certain number of blobs in its sidecar. +func makeMultiBlobTx(chainConfig *params.ChainConfig, nonce uint64, blobCount int, blobOffset int, key *ecdsa.PrivateKey, version byte) *types.Transaction { + var ( + blobs []kzg4844.Blob + blobHashes []common.Hash + commitments []kzg4844.Commitment + proofs []kzg4844.Proof + ) + for i := 0; i < blobCount; i++ { + blobs = append(blobs, *testBlobs[blobOffset+i]) + commitments = append(commitments, testBlobCommits[blobOffset+i]) + if version == types.BlobSidecarVersion0 { + proofs = append(proofs, testBlobProofs[blobOffset+i]) + } else { + cellProofs, _ := kzg4844.ComputeCellProofs(testBlobs[blobOffset+i]) + proofs = append(proofs, cellProofs...) + } + blobHashes = append(blobHashes, testBlobVHashes[blobOffset+i]) + } + blobtx := &types.BlobTx{ + ChainID: uint256.MustFromBig(chainConfig.ChainID), + Nonce: nonce, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1000), + Gas: 21000, + BlobFeeCap: uint256.NewInt(1000), + BlobHashes: blobHashes, + Value: uint256.NewInt(100), + Sidecar: types.NewBlobTxSidecar(version, blobs, commitments, proofs), + } + return types.MustSignNewTx(key, types.LatestSigner(chainConfig), blobtx) +} + +func newGetBlobEnv(t *testing.T, version byte) (*node.Node, *ConsensusAPI) { + var ( + // Create a database pre-initialize with a genesis block + config = *params.MergedTestChainConfig + + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + ) + // Disable Osaka fork for GetBlobsV1 + if version == 0 { + config.OsakaTime = nil + } + gspec := &core.Genesis{ + Config: &config, + Alloc: types.GenesisAlloc{ + testAddr: {Balance: testBalance}, + addr1: {Balance: testBalance}, + addr2: {Balance: testBalance}, + addr3: {Balance: testBalance}, + }, + Difficulty: common.Big0, + } + n, ethServ := startEthService(t, gspec, nil) + + // fill blob txs into the pool + tx1 := makeMultiBlobTx(&config, 0, 2, 0, key1, version) // blob[0, 2) + tx2 := makeMultiBlobTx(&config, 0, 2, 2, key2, version) // blob[2, 4) + tx3 := makeMultiBlobTx(&config, 0, 2, 4, key3, version) // blob[4, 6) + ethServ.TxPool().Add([]*types.Transaction{tx1, tx2, tx3}, true) + + api := newConsensusAPIWithoutHeartbeat(ethServ) + return n, api +} + +func TestGetBlobsV1(t *testing.T) { + n, api := newGetBlobEnv(t, 0) + defer n.Close() + + suites := []struct { + start int + limit int + fillRandom bool + }{ + { + start: 0, limit: 1, + }, + { + start: 0, limit: 1, fillRandom: true, + }, + { + start: 0, limit: 2, + }, + { + start: 0, limit: 2, fillRandom: true, + }, + { + start: 1, limit: 3, + }, + { + start: 1, limit: 3, fillRandom: true, + }, + { + start: 0, limit: 6, + }, + { + start: 0, limit: 6, fillRandom: true, + }, + { + start: 1, limit: 5, + }, + { + start: 1, limit: 5, fillRandom: true, + }, + } + for i, suite := range suites { + // Fill the request for retrieving blobs + var ( + vhashes []common.Hash + expect []*engine.BlobAndProofV1 + ) + // fill missing blob at the beginning + if suite.fillRandom { + vhashes = append(vhashes, testrand.Hash()) + expect = append(expect, nil) + } + for j := suite.start; j < suite.limit; j++ { + vhashes = append(vhashes, testBlobVHashes[j]) + expect = append(expect, &engine.BlobAndProofV1{ + Blob: testBlobs[j][:], + Proof: testBlobProofs[j][:], + }) + + // fill missing blobs in the middle + if suite.fillRandom && rand.Intn(2) == 0 { + vhashes = append(vhashes, testrand.Hash()) + expect = append(expect, nil) + } + } + // fill missing blobs at the end + if suite.fillRandom { + vhashes = append(vhashes, testrand.Hash()) + expect = append(expect, nil) + } + result, err := api.GetBlobsV1(vhashes) + if err != nil { + t.Errorf("Unexpected error for case %d, %v", i, err) + } + if !reflect.DeepEqual(result, expect) { + t.Fatalf("Unexpected result for case %d", i) + } + } +} + +func TestGetBlobsV2(t *testing.T) { + n, api := newGetBlobEnv(t, 1) + defer n.Close() + + suites := []struct { + start int + limit int + fillRandom bool + }{ + { + start: 0, limit: 1, + }, + { + start: 0, limit: 2, + }, + { + start: 1, limit: 3, + }, + { + start: 0, limit: 6, + }, + { + start: 1, limit: 5, + }, + { + start: 0, limit: 6, fillRandom: true, + }, + } + for i, suite := range suites { + // Fill the request for retrieving blobs + var ( + vhashes []common.Hash + expect []*engine.BlobAndProofV2 + ) + // fill missing blob + if suite.fillRandom { + vhashes = append(vhashes, testrand.Hash()) + } + for j := suite.start; j < suite.limit; j++ { + vhashes = append(vhashes, testBlobVHashes[j]) + var cellProofs []hexutil.Bytes + for _, proof := range testBlobCellProofs[j] { + cellProofs = append(cellProofs, proof[:]) + } + expect = append(expect, &engine.BlobAndProofV2{ + Blob: testBlobs[j][:], + CellProofs: cellProofs, + }) + } + result, err := api.GetBlobsV2(vhashes) + if err != nil { + t.Errorf("Unexpected error for case %d, %v", i, err) + } + // null is responded if any blob is missing + if suite.fillRandom { + expect = nil + } + if !reflect.DeepEqual(result, expect) { + t.Fatalf("Unexpected result for case %d", i) + } + } +} diff --git a/eth/catalyst/witness.go b/eth/catalyst/witness.go index 703f1b0881..0df612a695 100644 --- a/eth/catalyst/witness.go +++ b/eth/catalyst/witness.go @@ -73,8 +73,8 @@ func (api *ConsensusAPI) ForkchoiceUpdatedWithWitnessV3(update engine.Forkchoice return engine.STATUS_INVALID, attributesErr("missing withdrawals") case params.BeaconRoot == nil: return engine.STATUS_INVALID, attributesErr("missing beacon root") - case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague): - return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun or prague payloads") + case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): + return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun/prague/osaka payloads") } } // TODO(matt): the spec requires that fcu is applied when called on a valid @@ -151,8 +151,8 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v return invalidStatus, paramsErr("nil beaconRoot post-cancun") case executionRequests == nil: return invalidStatus, paramsErr("nil executionRequests post-prague") - case !api.checkFork(params.Timestamp, forks.Prague): - return invalidStatus, unsupportedForkErr("newPayloadV4 must only be called for prague payloads") + case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): + return invalidStatus, unsupportedForkErr("newPayloadV4 must only be called for prague/osaka payloads") } requests := convertRequests(executionRequests) if err := validateRequests(requests); err != nil { @@ -228,8 +228,8 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil beaconRoot post-cancun") case executionRequests == nil: return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil executionRequests post-prague") - case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka): - return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV4 must only be called for prague payloads") + case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka, forks.BPO1, forks.BPO2, forks.BPO3, forks.BPO4, forks.BPO5): + return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("newPayloadV4 must only be called for prague/osaka payloads") } requests := convertRequests(executionRequests) if err := validateRequests(requests); err != nil { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 82c3c500a7..6020387bcd 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -62,6 +62,7 @@ var Defaults = Config{ TrieTimeout: 60 * time.Minute, SnapshotCache: 102, FilterLogCacheSize: 32, + LogQueryLimit: 1000, Miner: miner.DefaultConfig, TxPool: legacypool.DefaultConfig, BlobPool: blobpool.DefaultConfig, @@ -131,6 +132,10 @@ type Config struct { // This is the number of blocks for which logs will be cached in the filter system. FilterLogCacheSize int + // This is the maximum number of addresses or topics allowed in filter criteria + // for eth_getLogs. + LogQueryLimit int + // Mining options Miner miner.Config @@ -144,6 +149,15 @@ type Config struct { // Enables tracking of SHA3 preimages in the VM EnablePreimageRecording bool + // Enables collection of witness trie access statistics + EnableWitnessStats bool + + // Generate execution witnesses and self-check against them (testing purpose) + StatelessSelfValidation bool + + // Enables tracking of state size + EnableStateSizeTracking bool + // Enables VM tracing VMTrace string VMTraceJsonConfig string @@ -161,6 +175,12 @@ type Config struct { // OverrideOsaka (TODO: remove after the fork) OverrideOsaka *uint64 `toml:",omitempty"` + // OverrideBPO1 (TODO: remove after the fork) + OverrideBPO1 *uint64 `toml:",omitempty"` + + // OverrideBPO2 (TODO: remove after the fork) + OverrideBPO2 *uint64 `toml:",omitempty"` + // OverrideVerkle (TODO: remove after the fork) OverrideVerkle *uint64 `toml:",omitempty"` } diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 0a188ba23c..6f6e541368 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -44,17 +44,23 @@ func (c Config) MarshalTOML() (interface{}, error) { SnapshotCache int Preimages bool FilterLogCacheSize int + LogQueryLimit int Miner miner.Config TxPool legacypool.Config BlobPool blobpool.Config GPO gasprice.Config EnablePreimageRecording bool + EnableWitnessStats bool + StatelessSelfValidation bool + EnableStateSizeTracking bool VMTrace string VMTraceJsonConfig string RPCGasCap uint64 RPCEVMTimeout time.Duration RPCTxFeeCap float64 OverrideOsaka *uint64 `toml:",omitempty"` + OverrideBPO1 *uint64 `toml:",omitempty"` + OverrideBPO2 *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` } var enc Config @@ -85,17 +91,23 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.SnapshotCache = c.SnapshotCache enc.Preimages = c.Preimages enc.FilterLogCacheSize = c.FilterLogCacheSize + enc.LogQueryLimit = c.LogQueryLimit enc.Miner = c.Miner enc.TxPool = c.TxPool enc.BlobPool = c.BlobPool enc.GPO = c.GPO enc.EnablePreimageRecording = c.EnablePreimageRecording + enc.EnableWitnessStats = c.EnableWitnessStats + enc.StatelessSelfValidation = c.StatelessSelfValidation + enc.EnableStateSizeTracking = c.EnableStateSizeTracking enc.VMTrace = c.VMTrace enc.VMTraceJsonConfig = c.VMTraceJsonConfig enc.RPCGasCap = c.RPCGasCap enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideOsaka = c.OverrideOsaka + enc.OverrideBPO1 = c.OverrideBPO1 + enc.OverrideBPO2 = c.OverrideBPO2 enc.OverrideVerkle = c.OverrideVerkle return &enc, nil } @@ -130,17 +142,23 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { SnapshotCache *int Preimages *bool FilterLogCacheSize *int + LogQueryLimit *int Miner *miner.Config TxPool *legacypool.Config BlobPool *blobpool.Config GPO *gasprice.Config EnablePreimageRecording *bool + EnableWitnessStats *bool + StatelessSelfValidation *bool + EnableStateSizeTracking *bool VMTrace *string VMTraceJsonConfig *string RPCGasCap *uint64 RPCEVMTimeout *time.Duration RPCTxFeeCap *float64 OverrideOsaka *uint64 `toml:",omitempty"` + OverrideBPO1 *uint64 `toml:",omitempty"` + OverrideBPO2 *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` } var dec Config @@ -228,6 +246,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.FilterLogCacheSize != nil { c.FilterLogCacheSize = *dec.FilterLogCacheSize } + if dec.LogQueryLimit != nil { + c.LogQueryLimit = *dec.LogQueryLimit + } if dec.Miner != nil { c.Miner = *dec.Miner } @@ -243,6 +264,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.EnablePreimageRecording != nil { c.EnablePreimageRecording = *dec.EnablePreimageRecording } + if dec.EnableWitnessStats != nil { + c.EnableWitnessStats = *dec.EnableWitnessStats + } + if dec.StatelessSelfValidation != nil { + c.StatelessSelfValidation = *dec.StatelessSelfValidation + } + if dec.EnableStateSizeTracking != nil { + c.EnableStateSizeTracking = *dec.EnableStateSizeTracking + } if dec.VMTrace != nil { c.VMTrace = *dec.VMTrace } @@ -261,6 +291,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideOsaka != nil { c.OverrideOsaka = dec.OverrideOsaka } + if dec.OverrideBPO1 != nil { + c.OverrideBPO1 = dec.OverrideBPO1 + } + if dec.OverrideBPO2 != nil { + c.OverrideBPO2 = dec.OverrideBPO2 + } if dec.OverrideVerkle != nil { c.OverrideVerkle = dec.OverrideVerkle } diff --git a/eth/filters/api.go b/eth/filters/api.go index c929810a12..d678c40389 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -42,12 +42,10 @@ var ( errBlockHashWithRange = errors.New("can't specify fromBlock/toBlock with blockHash") errPendingLogsUnsupported = errors.New("pending logs are not supported") errExceedMaxTopics = errors.New("exceed max topics") - errExceedMaxAddresses = errors.New("exceed max addresses") + errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position") ) const ( - // The maximum number of addresses allowed in a filter criteria - maxAddresses = 1000 // The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0 maxTopics = 4 // The maximum number of allowed topics within a topic criteria @@ -70,20 +68,22 @@ type filter struct { // FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various // information related to the Ethereum protocol such as blocks, transactions and logs. type FilterAPI struct { - sys *FilterSystem - events *EventSystem - filtersMu sync.Mutex - filters map[rpc.ID]*filter - timeout time.Duration + sys *FilterSystem + events *EventSystem + filtersMu sync.Mutex + filters map[rpc.ID]*filter + timeout time.Duration + logQueryLimit int } // NewFilterAPI returns a new FilterAPI instance. func NewFilterAPI(system *FilterSystem) *FilterAPI { api := &FilterAPI{ - sys: system, - events: NewEventSystem(system), - filters: make(map[rpc.ID]*filter), - timeout: system.cfg.Timeout, + sys: system, + events: NewEventSystem(system), + filters: make(map[rpc.ID]*filter), + timeout: system.cfg.Timeout, + logQueryLimit: system.cfg.LogQueryLimit, } go api.timeoutLoop(system.cfg.Timeout) @@ -347,8 +347,15 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type if len(crit.Topics) > maxTopics { return nil, errExceedMaxTopics } - if len(crit.Addresses) > maxAddresses { - return nil, errExceedMaxAddresses + if api.logQueryLimit != 0 { + if len(crit.Addresses) > api.logQueryLimit { + return nil, errExceedLogQueryLimit + } + for _, topics := range crit.Topics { + if len(topics) > api.logQueryLimit { + return nil, errExceedLogQueryLimit + } + } } var filter *Filter @@ -545,9 +552,6 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { // raw.Address can contain a single address or an array of addresses switch rawAddr := raw.Addresses.(type) { case []interface{}: - if len(rawAddr) > maxAddresses { - return errExceedMaxAddresses - } for i, addr := range rawAddr { if strAddr, ok := addr.(string); ok { addr, err := decodeAddress(strAddr) diff --git a/eth/filters/api_test.go b/eth/filters/api_test.go index 2eb3ee97b3..822bc826f6 100644 --- a/eth/filters/api_test.go +++ b/eth/filters/api_test.go @@ -19,7 +19,6 @@ package filters import ( "encoding/json" "fmt" - "strings" "testing" "github.com/ethereum/go-ethereum/common" @@ -183,15 +182,4 @@ func TestUnmarshalJSONNewFilterArgs(t *testing.T) { if len(test7.Topics[2]) != 0 { t.Fatalf("expected 0 topics, got %d topics", len(test7.Topics[2])) } - - // multiple address exceeding max - var test8 FilterCriteria - addresses := make([]string, maxAddresses+1) - for i := 0; i < maxAddresses+1; i++ { - addresses[i] = fmt.Sprintf(`"%s"`, common.HexToAddress(fmt.Sprintf("0x%x", i)).Hex()) - } - vector = fmt.Sprintf(`{"address": [%s]}`, strings.Join(addresses, ", ")) - if err := json.Unmarshal([]byte(vector), &test8); err != errExceedMaxAddresses { - t.Fatal("expected errExceedMaxAddresses, got", err) - } } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 751cd417e8..ecf1c870c1 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -41,8 +41,9 @@ import ( // Config represents the configuration of the filter system. type Config struct { - LogCacheSize int // maximum number of cached blocks (default: 32) - Timeout time.Duration // how long filters stay active (default: 5min) + LogCacheSize int // maximum number of cached blocks (default: 32) + Timeout time.Duration // how long filters stay active (default: 5min) + LogQueryLimit int // maximum number of addresses allowed in filter criteria (default: 1000) } func (cfg Config) withDefaults() Config { @@ -291,8 +292,15 @@ func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*typ if len(crit.Topics) > maxTopics { return nil, errExceedMaxTopics } - if len(crit.Addresses) > maxAddresses { - return nil, errExceedMaxAddresses + if es.sys.cfg.LogQueryLimit != 0 { + if len(crit.Addresses) > es.sys.cfg.LogQueryLimit { + return nil, errExceedLogQueryLimit + } + for _, topics := range crit.Topics { + if len(topics) > es.sys.cfg.LogQueryLimit { + return nil, errExceedLogQueryLimit + } + } } var from, to rpc.BlockNumber if crit.FromBlock == nil { diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 013c1ae527..0048e74995 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/triedb" ) type testBackend struct { @@ -424,7 +425,7 @@ func TestInvalidLogFilterCreation(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() - _, sys = newTestFilterSystem(db, Config{}) + _, sys = newTestFilterSystem(db, Config{LogQueryLimit: 1000}) api = NewFilterAPI(sys) ) @@ -435,7 +436,7 @@ func TestInvalidLogFilterCreation(t *testing.T) { 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 3: {Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, - 4: {Addresses: make([]common.Address, maxAddresses+1)}, + 4: {Addresses: make([]common.Address, api.logQueryLimit+1)}, } for i, test := range testCases { @@ -455,7 +456,7 @@ func TestInvalidGetLogsRequest(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), } db, blocks, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {}) - _, sys = newTestFilterSystem(db, Config{}) + _, sys = newTestFilterSystem(db, Config{LogQueryLimit: 10}) api = NewFilterAPI(sys) blockHash = blocks[0].Hash() unknownBlockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") @@ -500,8 +501,8 @@ func TestInvalidGetLogsRequest(t *testing.T) { err: errExceedMaxTopics, }, { - f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)}, - err: errExceedMaxAddresses, + f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, api.logQueryLimit+1)}, + err: errExceedLogQueryLimit, }, } @@ -528,6 +529,92 @@ func TestInvalidGetRangeLogsRequest(t *testing.T) { } } +// TestExceedLogQueryLimit tests getLogs with too many addresses or topics +func TestExceedLogQueryLimit(t *testing.T) { + t.Parallel() + + // Test with custom config (LogQueryLimit = 5 for easier testing) + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(db, Config{LogQueryLimit: 5}) + api = NewFilterAPI(sys) + gspec = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: types.GenesisAlloc{}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + ) + + _, err := gspec.Commit(db, triedb.NewDatabase(db, nil)) + if err != nil { + t.Fatal(err) + } + chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {}) + + options := core.DefaultConfig().WithStateScheme(rawdb.HashScheme) + options.TxLookupLimit = 0 // index all txs + bc, err := core.NewBlockChain(db, gspec, ethash.NewFaker(), options) + if err != nil { + t.Fatal(err) + } + _, err = bc.InsertChain(chain[:600]) + if err != nil { + t.Fatal(err) + } + + backend.startFilterMaps(200, false, filtermaps.RangeTestParams) + defer backend.stopFilterMaps() + + addresses := make([]common.Address, 6) + for i := range addresses { + addresses[i] = common.HexToAddress("0x1234567890123456789012345678901234567890") + } + + topics := make([]common.Hash, 6) + for i := range topics { + topics[i] = common.HexToHash("0x123456789012345678901234567890123456789001234567890012345678901234") + } + + // Test that 5 addresses do not result in error + // Add FromBlock and ToBlock to make it similar to other invalid tests + if _, err := api.GetLogs(context.Background(), FilterCriteria{ + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(100), + Addresses: addresses[:5], + }); err != nil { + t.Errorf("Expected GetLogs with 5 addresses to return with no error, got: %v", err) + } + + // Test that 6 addresses fails with correct error + if _, err := api.GetLogs(context.Background(), FilterCriteria{ + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(100), + Addresses: addresses, + }); err != errExceedLogQueryLimit { + t.Errorf("Expected GetLogs with 6 addresses to return errExceedLogQueryLimit, got: %v", err) + } + + // Test that 5 topics at one position do not result in error + if _, err := api.GetLogs(context.Background(), FilterCriteria{ + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(100), + Addresses: addresses[:1], + Topics: [][]common.Hash{topics[:5]}, + }); err != nil { + t.Errorf("Expected GetLogs with 5 topics at one position to return with no error, got: %v", err) + } + + // Test that 6 topics at one position fails with correct error + if _, err := api.GetLogs(context.Background(), FilterCriteria{ + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(100), + Addresses: addresses[:1], + Topics: [][]common.Hash{topics}, + }); err != errExceedLogQueryLimit { + t.Errorf("Expected GetLogs with 6 topics at one position to return errExceedLogQueryLimit, got: %v", err) + } +} + // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. func TestLogFilter(t *testing.T) { t.Parallel() diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index e99b2f6caa..edec3e027f 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -109,11 +109,8 @@ func benchmarkFilters(b *testing.B, history uint64, noHistory bool) { backend.startFilterMaps(history, noHistory, filtermaps.DefaultParams) defer backend.stopFilterMaps() - b.ResetTimer() - filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil) - - for i := 0; i < b.N; i++ { + for b.Loop() { filter.begin = 0 logs, _ := filter.Logs(context.Background()) if len(logs) != 4 { diff --git a/eth/protocols/snap/gentrie_test.go b/eth/protocols/snap/gentrie_test.go index 2da4f3c866..5998840b52 100644 --- a/eth/protocols/snap/gentrie_test.go +++ b/eth/protocols/snap/gentrie_test.go @@ -239,7 +239,6 @@ func TestPartialGentree(t *testing.T) { {1, len(entries) - 1}, // no left {2, len(entries) - 1}, // no left {2, len(entries) - 2}, // no left and right - {2, len(entries) - 2}, // no left and right {len(entries) / 2, len(entries) / 2}, // single {0, 0}, // single first {len(entries) - 1, len(entries) - 1}, // single last @@ -348,7 +347,6 @@ func TestGentreeDanglingClearing(t *testing.T) { {1, len(entries) - 1}, // no left {2, len(entries) - 1}, // no left {2, len(entries) - 2}, // no left and right - {2, len(entries) - 2}, // no left and right {len(entries) / 2, len(entries) / 2}, // single {0, 0}, // single first {len(entries) - 1, len(entries) - 1}, // single last diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index d599e7ecc3..713b358ff8 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -106,13 +106,13 @@ func BenchmarkHashing(b *testing.B) { } b.Run("old", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { old() } }) b.Run("new", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { new() } }) @@ -597,7 +597,6 @@ func testSyncBloatedProof(t *testing.T, scheme string) { proof := trienode.NewProofSet() if err := t.accountTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove origin", "origin", origin, "error", err) - t.logger.Error("Could not prove origin", "origin", origin, "error", err) } // The bloat: add proof of every single element for _, entry := range t.accountValues { diff --git a/eth/sync.go b/eth/sync.go index 61f2b2b376..ddae8443a3 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -25,7 +25,7 @@ import ( // syncTransactions starts sending all currently pending transactions to the given peer. func (h *handler) syncTransactions(p *eth.Peer) { var hashes []common.Hash - for _, batch := range h.txpool.Pending(txpool.PendingFilter{OnlyPlainTxs: true}) { + for _, batch := range h.txpool.Pending(txpool.PendingFilter{BlobTxs: false}) { for _, tx := range batch { hashes = append(hashes, tx.Hash) } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index b50a532b60..a05b7a7a4a 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -984,7 +984,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc return nil, err } var ( - msg = args.ToMessage(blockContext.BaseFee, true, true) + msg = args.ToMessage(blockContext.BaseFee, true) tx = args.ToTransaction(types.LegacyTxType) traceConfig *TraceConfig ) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index b454522978..ba0706c598 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -211,11 +211,9 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { defer state.Close() b.ReportAllocs() - b.ResetTimer() evm := vm.NewEVM(context, state.StateDB, test.Genesis.Config, vm.Config{}) - - for i := 0; i < b.N; i++ { + for b.Loop() { snap := state.StateDB.Snapshot() tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config) if err != nil { diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go index d1fa44e9d8..1882ef315e 100644 --- a/eth/tracers/internal/tracetest/flat_calltrace_test.go +++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -201,7 +201,7 @@ func BenchmarkFlatCallTracer(b *testing.B) { for _, file := range files { filename := strings.TrimPrefix(file, "testdata/call_tracer_flat/") b.Run(camel(strings.TrimSuffix(filename, ".json")), func(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { err := flatCallTracerTestRunner("flatCallTracer", filename, "call_tracer_flat", b) if err != nil { b.Fatal(err) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index a72dbf6ee6..06edeaf698 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -84,10 +84,8 @@ func BenchmarkTransactionTraceV2(b *testing.B) { if err != nil { b.Fatalf("failed to prepare transaction for tracing: %v", err) } - b.ResetTimer() b.ReportAllocs() - - for i := 0; i < b.N; i++ { + for b.Loop() { tracer := logger.NewStructLogger(&logger.Config{}).Hooks() tracer.OnTxStart(evm.GetVMContext(), tx, msg.From) evm.Config.Tracer = tracer diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 2370d4654f..8abe7d4bc7 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -218,9 +218,10 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( memTableSize = maxMemTableSize - 1 } db := &Database{ - fn: file, - log: logger, - quitChan: make(chan chan error), + fn: file, + log: logger, + quitChan: make(chan chan error), + namespace: namespace, // Use asynchronous write mode by default. Otherwise, the overhead of frequent fsync // operations can be significant, especially on platforms with slow fsync performance diff --git a/go.mod b/go.mod index d701c08ad5..03cdf3bb2d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/ethereum/go-ethereum -go 1.23.0 +go 1.24.0 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 @@ -14,7 +14,7 @@ require ( github.com/cloudflare/cloudflare-go v0.114.0 github.com/cockroachdb/pebble v1.1.5 github.com/consensys/gnark-crypto v0.18.0 - github.com/crate-crypto/go-eth-kzg v1.3.0 + github.com/crate-crypto/go-eth-kzg v1.4.0 github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a github.com/davecgh/go-spew v1.1.1 github.com/dchest/siphash v1.2.3 @@ -22,11 +22,11 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 - github.com/ethereum/c-kzg-4844/v2 v2.1.0 + github.com/ethereum/c-kzg-4844/v2 v2.1.3 + github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab github.com/ethereum/go-verkle v0.2.2 github.com/fatih/color v1.16.0 github.com/ferranbt/fastssz v0.1.4 - github.com/fjl/gencodec v0.1.0 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff github.com/gofrs/flock v0.12.1 @@ -37,7 +37,7 @@ require ( github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 - github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 + github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.3.2 github.com/huin/goupnp v1.3.0 @@ -60,7 +60,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.10.0 - github.com/supranational/blst v0.3.14 + github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.27.5 go.uber.org/automaxprocs v1.5.2 @@ -68,7 +68,7 @@ require ( golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 + golang.org/x/sys v0.36.0 golang.org/x/text v0.23.0 golang.org/x/time v0.9.0 golang.org/x/tools v0.29.0 @@ -103,6 +103,7 @@ require ( github.com/deepmap/oapi-codegen v1.6.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/emicklei/dot v1.6.2 // indirect + github.com/fjl/gencodec v0.1.0 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -146,3 +147,9 @@ require ( golang.org/x/net v0.38.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) + +tool ( + github.com/fjl/gencodec + golang.org/x/tools/cmd/stringer + google.golang.org/protobuf/cmd/protoc-gen-go +) diff --git a/go.sum b/go.sum index 53913262ae..764cfdb668 100644 --- a/go.sum +++ b/go.sum @@ -78,8 +78,8 @@ github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEf github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= -github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg= +github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -112,8 +112,10 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= -github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/c-kzg-4844/v2 v2.1.3 h1:DQ21UU0VSsuGy8+pcMJHDS0CV1bKmJmxsJYK8l3MiLU= +github.com/ethereum/c-kzg-4844/v2 v2.1.3/go.mod h1:fyNcYI/yAuLWJxf4uzVtS8VDKeoAaRM8G/+ADz/pRdA= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= +github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -188,8 +190,8 @@ github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY4 github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= @@ -349,8 +351,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= -github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw= +github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -449,8 +451,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/internal/build/util.go b/internal/build/util.go index aee8bf0fc8..6e6632c750 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -21,8 +21,6 @@ import ( "bytes" "flag" "fmt" - "go/parser" - "go/token" "io" "log" "os" @@ -39,6 +37,9 @@ var DryRunFlag = flag.Bool("n", false, "dry run, don't execute commands") // MustRun executes the given command and exits the host process for // any error. func MustRun(cmd *exec.Cmd) { + if cmd.Dir != "" && cmd.Dir != "." { + fmt.Printf("(in %s) ", cmd.Dir) + } fmt.Println(">>>", printArgs(cmd.Args)) if !*DryRunFlag { cmd.Stderr = os.Stderr @@ -71,6 +72,13 @@ func MustRunCommand(cmd string, args ...string) { // printed while it runs. This is useful for CI builds where the process will be stopped // when there is no output. func MustRunCommandWithOutput(cmd string, args ...string) { + MustRunWithOutput(exec.Command(cmd, args...)) +} + +// MustRunWithOutput runs the given command, and ensures that some output will be printed +// while it runs. This is useful for CI builds where the process will be stopped when +// there is no output. +func MustRunWithOutput(cmd *exec.Cmd) { interval := time.NewTicker(time.Minute) done := make(chan struct{}) defer interval.Stop() @@ -85,7 +93,7 @@ func MustRunCommandWithOutput(cmd string, args ...string) { } } }() - MustRun(exec.Command(cmd, args...)) + MustRun(cmd) } var warnedAboutGit bool @@ -209,28 +217,18 @@ func UploadSFTP(identityFile, host, dir string, files []string) error { // FindMainPackages finds all 'main' packages in the given directory and returns their // package paths. -func FindMainPackages(dir string) []string { - var commands []string - cmds, err := os.ReadDir(dir) +func FindMainPackages(tc *GoToolchain, pattern string) []string { + list := tc.Go("list", "-f", `{{if eq .Name "main"}}{{.ImportPath}}{{end}}`, pattern) + output, err := list.Output() if err != nil { - log.Fatal(err) + log.Fatal("go list failed:", err) } - for _, cmd := range cmds { - pkgdir := filepath.Join(dir, cmd.Name()) - if !cmd.IsDir() { - continue - } - pkgs, err := parser.ParseDir(token.NewFileSet(), pkgdir, nil, parser.PackageClauseOnly) - if err != nil { - log.Fatal(err) - } - for name := range pkgs { - if name == "main" { - path := "./" + filepath.ToSlash(pkgdir) - commands = append(commands, path) - break - } + var result []string + for l := range bytes.Lines(output) { + l = bytes.TrimSpace(l) + if len(l) > 0 { + result = append(result, string(l)) } } - return commands + return result } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 554f525290..ebb8ece730 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -699,15 +699,15 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S } else { gp.AddGas(globalGasCap) } - return applyMessage(ctx, b, args, state, header, timeout, gp, &blockCtx, &vm.Config{NoBaseFee: true}, precompiles, true) + return applyMessage(ctx, b, args, state, header, timeout, gp, &blockCtx, &vm.Config{NoBaseFee: true}, precompiles) } -func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, timeout time.Duration, gp *core.GasPool, blockContext *vm.BlockContext, vmConfig *vm.Config, precompiles vm.PrecompiledContracts, skipChecks bool) (*core.ExecutionResult, error) { +func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, timeout time.Duration, gp *core.GasPool, blockContext *vm.BlockContext, vmConfig *vm.Config, precompiles vm.PrecompiledContracts) (*core.ExecutionResult, error) { // Get a new instance of the EVM. if err := args.CallDefaults(gp.Gas(), blockContext.BaseFee, b.ChainConfig().ChainID); err != nil { return nil, err } - msg := args.ToMessage(header.BaseFee, skipChecks, skipChecks) + msg := args.ToMessage(header.BaseFee, true) // Lower the basefee to 0 to avoid breaking EVM // invariants (basefee < feecap). if msg.GasPrice.Sign() == 0 { @@ -858,7 +858,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr if err := args.CallDefaults(gasCap, header.BaseFee, b.ChainConfig().ChainID); err != nil { return 0, err } - call := args.ToMessage(header.BaseFee, true, true) + call := args.ToMessage(header.BaseFee, true) // Run the gas estimation and wrap any revertals into a custom return estimate, revert, err := gasestimator.Estimate(ctx, call, opts, gasCap) @@ -1301,7 +1301,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH statedb := db.Copy() // Set the accesslist to the last al args.AccessList = &accessList - msg := args.ToMessage(header.BaseFee, true, true) + msg := args.ToMessage(header.BaseFee, true) // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, addressesToExclude) diff --git a/internal/ethapi/override/override.go b/internal/ethapi/override/override.go index 0bcf3c444d..9d57a78651 100644 --- a/internal/ethapi/override/override.go +++ b/internal/ethapi/override/override.go @@ -91,7 +91,7 @@ func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.Precompi } // Override account(contract) code. if account.Code != nil { - statedb.SetCode(addr, *account.Code) + statedb.SetCode(addr, *account.Code, tracing.CodeChangeUnspecified) } // Override account balance. if account.Balance != nil { diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 362536bcb5..75b5c5ffa8 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -287,7 +287,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, tracer.reset(txHash, uint(i)) sim.state.SetTxContext(txHash, i) // EoA check is always skipped, even in validation mode. - msg := call.ToMessage(header.BaseFee, !sim.validate, true) + msg := call.ToMessage(header.BaseFee, !sim.validate) result, err := applyMessageWithEVM(ctx, evm, msg, timeout, sim.gp) if err != nil { txErr := txValidationError(err) @@ -474,22 +474,30 @@ func (sim *simulator) makeHeaders(blocks []simBlock) ([]*types.Header, error) { overrides := block.BlockOverrides var withdrawalsHash *common.Hash - if sim.chainConfig.IsShanghai(overrides.Number.ToInt(), (uint64)(*overrides.Time)) { + number := overrides.Number.ToInt() + timestamp := (uint64)(*overrides.Time) + if sim.chainConfig.IsShanghai(number, timestamp) { withdrawalsHash = &types.EmptyWithdrawalsHash } var parentBeaconRoot *common.Hash - if sim.chainConfig.IsCancun(overrides.Number.ToInt(), (uint64)(*overrides.Time)) { + if sim.chainConfig.IsCancun(number, timestamp) { parentBeaconRoot = &common.Hash{} if overrides.BeaconRoot != nil { parentBeaconRoot = overrides.BeaconRoot } } + // Set difficulty to zero if the given block is post-merge. Without this, all post-merge hardforks would remain inactive. + // For example, calling eth_simulateV1(..., blockParameter: 0x0) on hoodi network will cause all blocks to have a difficulty of 1 and be treated as pre-merge. + difficulty := header.Difficulty + if sim.chainConfig.IsPostMerge(number.Uint64(), timestamp) { + difficulty = big.NewInt(0) + } header = overrides.MakeHeader(&types.Header{ UncleHash: types.EmptyUncleHash, ReceiptHash: types.EmptyReceiptsHash, TxHash: types.EmptyTxsHash, Coinbase: header.Coinbase, - Difficulty: header.Difficulty, + Difficulty: difficulty, GasLimit: header.GasLimit, WithdrawalsHash: withdrawalsHash, ParentBeaconRoot: parentBeaconRoot, diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index f80ef6d080..23aa8e5947 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -443,7 +443,7 @@ func (args *TransactionArgs) CallDefaults(globalGasCap uint64, baseFee *big.Int, // core evm. This method is used in calls and traces that do not require a real // live transaction. // Assumes that fields are not nil, i.e. setDefaults or CallDefaults has been called. -func (args *TransactionArgs) ToMessage(baseFee *big.Int, skipNonceCheck, skipEoACheck bool) *core.Message { +func (args *TransactionArgs) ToMessage(baseFee *big.Int, skipNonceCheck bool) *core.Message { var ( gasPrice *big.Int gasFeeCap *big.Int @@ -491,7 +491,7 @@ func (args *TransactionArgs) ToMessage(baseFee *big.Int, skipNonceCheck, skipEoA BlobHashes: args.BlobHashes, SetCodeAuthorizations: args.AuthorizationList, SkipNonceChecks: skipNonceCheck, - SkipFromEOACheck: skipEoACheck, + SkipTransactionChecks: true, } } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index e81e23ef16..0aedffe230 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -468,6 +468,12 @@ web3._extend({ call: 'debug_sync', params: 1 }), + new web3._extend.Method({ + name: 'stateSize', + call: 'debug_stateSize', + params: 1, + inputFormatter: [null], + }), ], properties: [] }); diff --git a/log/format_test.go b/log/format_test.go index d4c1df4abc..bb740ceb84 100644 --- a/log/format_test.go +++ b/log/format_test.go @@ -10,7 +10,7 @@ var sink []byte func BenchmarkPrettyInt64Logfmt(b *testing.B) { buf := make([]byte, 100) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { sink = appendInt64(buf, rand.Int63()) } } @@ -18,7 +18,7 @@ func BenchmarkPrettyInt64Logfmt(b *testing.B) { func BenchmarkPrettyUint64Logfmt(b *testing.B) { buf := make([]byte, 100) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { sink = appendUint64(buf, rand.Uint64(), false) } } diff --git a/log/logger_test.go b/log/logger_test.go index 3ec6d2e19c..dae8497204 100644 --- a/log/logger_test.go +++ b/log/logger_test.go @@ -70,9 +70,10 @@ func TestJSONHandler(t *testing.T) { func BenchmarkTraceLogging(b *testing.B) { SetDefault(NewLogger(NewTerminalHandler(io.Discard, true))) - b.ResetTimer() - for i := 0; i < b.N; i++ { + i := 0 + for b.Loop() { Trace("a message", "v", i) + i++ } } @@ -99,8 +100,8 @@ func benchmarkLogger(b *testing.B, l Logger) { err = errors.New("oh nooes it's crap") ) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + i := 0 + for b.Loop() { l.Info("This is a message", "foo", int16(i), "bytes", bb, @@ -109,8 +110,8 @@ func benchmarkLogger(b *testing.B, l Logger) { "bigint", bigint, "nilbig", nilbig, "err", err) + i++ } - b.StopTimer() } func TestLoggerOutput(t *testing.T) { @@ -161,18 +162,18 @@ const termTimeFormat = "01-02|15:04:05.000" func BenchmarkAppendFormat(b *testing.B) { var now = time.Now() b.Run("fmt time.Format", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { fmt.Fprintf(io.Discard, "%s", now.Format(termTimeFormat)) } }) b.Run("time.AppendFormat", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { now.AppendFormat(nil, termTimeFormat) } }) var buf = new(bytes.Buffer) b.Run("time.Custom", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { writeTimeTermFormat(buf, now) buf.Reset() } diff --git a/metrics/runtimehistogram.go b/metrics/runtimehistogram.go index 0ab8914602..efbed498af 100644 --- a/metrics/runtimehistogram.go +++ b/metrics/runtimehistogram.go @@ -204,7 +204,7 @@ func (h *runtimeHistogramSnapshot) Percentiles(ps []float64) []float64 { thresholds := make([]float64, len(ps)) indexes := make([]int, len(ps)) for i, percentile := range ps { - thresholds[i] = count * math.Max(0, math.Min(1.0, percentile)) + thresholds[i] = count * max(0, min(1.0, percentile)) indexes[i] = i } sort.Sort(floatsAscendingKeepingIndex{thresholds, indexes}) diff --git a/miner/miner.go b/miner/miner.go index 20845b5036..810cc20a6c 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -52,7 +52,7 @@ type Config struct { // DefaultConfig contains default settings for miner. var DefaultConfig = Config{ - GasCeil: 45_000_000, + GasCeil: 60_000_000, GasPrice: big.NewInt(params.GWei / 1000), // The default recommit time is chosen as two seconds since diff --git a/miner/worker.go b/miner/worker.go index 6baec5e365..c0574eac23 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -481,10 +481,15 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) if miner.chainConfig.IsOsaka(env.header.Number, env.header.Time) { filter.GasLimitCap = params.MaxTxGas } - filter.OnlyPlainTxs, filter.OnlyBlobTxs = true, false + filter.BlobTxs = false pendingPlainTxs := miner.txpool.Pending(filter) - filter.OnlyPlainTxs, filter.OnlyBlobTxs = false, true + filter.BlobTxs = true + if miner.chainConfig.IsOsaka(env.header.Number, env.header.Time) { + filter.BlobVersion = types.BlobSidecarVersion1 + } else { + filter.BlobVersion = types.BlobSidecarVersion0 + } pendingBlobTxs := miner.txpool.Pending(filter) // Split the pending transactions into locals and remotes. diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 09808b71e0..9cca0118ac 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -27,6 +27,7 @@ import ( // lookup performs a network search for nodes close to the given target. It approaches the // target by querying nodes that are closer to it on each iteration. The given target does // not need to be an actual node identifier. +// lookup on an empty table will return immediately with no nodes. type lookup struct { tab *Table queryfunc queryFunc @@ -49,11 +50,15 @@ func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *l result: nodesByDistance{target: target}, replyCh: make(chan []*enode.Node, alpha), cancelCh: ctx.Done(), - queries: -1, } // Don't query further if we hit ourself. // Unlikely to happen often in practice. it.asked[tab.self().ID()] = true + it.seen[tab.self().ID()] = true + + // Initialize the lookup with nodes from table. + closest := it.tab.findnodeByID(it.result.target, bucketSize, false) + it.addNodes(closest.entries) return it } @@ -64,22 +69,19 @@ func (it *lookup) run() []*enode.Node { return it.result.entries } +func (it *lookup) empty() bool { + return len(it.replyBuffer) == 0 +} + // advance advances the lookup until any new nodes have been found. // It returns false when the lookup has ended. func (it *lookup) advance() bool { for it.startQueries() { select { case nodes := <-it.replyCh: - it.replyBuffer = it.replyBuffer[:0] - for _, n := range nodes { - if n != nil && !it.seen[n.ID()] { - it.seen[n.ID()] = true - it.result.push(n, bucketSize) - it.replyBuffer = append(it.replyBuffer, n) - } - } it.queries-- - if len(it.replyBuffer) > 0 { + it.addNodes(nodes) + if !it.empty() { return true } case <-it.cancelCh: @@ -89,6 +91,17 @@ func (it *lookup) advance() bool { return false } +func (it *lookup) addNodes(nodes []*enode.Node) { + it.replyBuffer = it.replyBuffer[:0] + for _, n := range nodes { + if n != nil && !it.seen[n.ID()] { + it.seen[n.ID()] = true + it.result.push(n, bucketSize) + it.replyBuffer = append(it.replyBuffer, n) + } + } +} + func (it *lookup) shutdown() { for it.queries > 0 { <-it.replyCh @@ -103,20 +116,6 @@ func (it *lookup) startQueries() bool { return false } - // The first query returns nodes from the local table. - if it.queries == -1 { - closest := it.tab.findnodeByID(it.result.target, bucketSize, false) - // Avoid finishing the lookup too quickly if table is empty. It'd be better to wait - // for the table to fill in this case, but there is no good mechanism for that - // yet. - if len(closest.entries) == 0 { - it.slowdown() - } - it.queries = 1 - it.replyCh <- closest.entries - return true - } - // Ask the closest nodes that we haven't asked yet. for i := 0; i < len(it.result.entries) && it.queries < alpha; i++ { n := it.result.entries[i] @@ -130,15 +129,6 @@ func (it *lookup) startQueries() bool { return it.queries > 0 } -func (it *lookup) slowdown() { - sleep := time.NewTimer(1 * time.Second) - defer sleep.Stop() - select { - case <-sleep.C: - case <-it.tab.closeReq: - } -} - func (it *lookup) query(n *enode.Node, reply chan<- []*enode.Node) { r, err := it.queryfunc(n) if !errors.Is(err, errClosed) { // avoid recording failures on shutdown. @@ -153,12 +143,16 @@ func (it *lookup) query(n *enode.Node, reply chan<- []*enode.Node) { // lookupIterator performs lookup operations and iterates over all seen nodes. // When a lookup finishes, a new one is created through nextLookup. +// LookupIterator waits for table initialization and triggers a table refresh +// when necessary. + type lookupIterator struct { - buffer []*enode.Node - nextLookup lookupFunc - ctx context.Context - cancel func() - lookup *lookup + buffer []*enode.Node + nextLookup lookupFunc + ctx context.Context + cancel func() + lookup *lookup + tabRefreshing <-chan struct{} } type lookupFunc func(ctx context.Context) *lookup @@ -182,6 +176,7 @@ func (it *lookupIterator) Next() bool { if len(it.buffer) > 0 { it.buffer = it.buffer[1:] } + // Advance the lookup to refill the buffer. for len(it.buffer) == 0 { if it.ctx.Err() != nil { @@ -191,17 +186,55 @@ func (it *lookupIterator) Next() bool { } if it.lookup == nil { it.lookup = it.nextLookup(it.ctx) + if it.lookup.empty() { + // If the lookup is empty right after creation, it means the local table + // is in a degraded state, and we need to wait for it to fill again. + it.lookupFailed(it.lookup.tab, 1*time.Minute) + it.lookup = nil + continue + } + // Yield the initial nodes from the iterator before advancing the lookup. + it.buffer = it.lookup.replyBuffer continue } - if !it.lookup.advance() { - it.lookup = nil - continue - } + + newNodes := it.lookup.advance() it.buffer = it.lookup.replyBuffer + if !newNodes { + it.lookup = nil + } } return true } +// lookupFailed handles failed lookup attempts. This can be called when the table has +// exited, or when it runs out of nodes. +func (it *lookupIterator) lookupFailed(tab *Table, timeout time.Duration) { + tout, cancel := context.WithTimeout(it.ctx, timeout) + defer cancel() + + // Wait for Table initialization to complete, in case it is still in progress. + select { + case <-tab.initDone: + case <-tout.Done(): + return + } + + // Wait for ongoing refresh operation, or trigger one. + if it.tabRefreshing == nil { + it.tabRefreshing = tab.refresh() + } + select { + case <-it.tabRefreshing: + it.tabRefreshing = nil + case <-tout.Done(): + return + } + + // Wait for the table to fill. + tab.waitForNodes(tout, 1) +} + // Close ends the iterator. func (it *lookupIterator) Close() { it.cancel() diff --git a/p2p/discover/table.go b/p2p/discover/table.go index b6c35aaaa9..6a1c7494ee 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" @@ -84,6 +85,7 @@ type Table struct { closeReq chan struct{} closed chan struct{} + nodeFeed event.FeedOf[*enode.Node] nodeAddedHook func(*bucket, *tableNode) nodeRemovedHook func(*bucket, *tableNode) } @@ -567,6 +569,8 @@ func (tab *Table) nodeAdded(b *bucket, n *tableNode) { } n.addedToBucket = time.Now() tab.revalidation.nodeAdded(tab, n) + + tab.nodeFeed.Send(n.Node) if tab.nodeAddedHook != nil { tab.nodeAddedHook(b, n) } @@ -702,3 +706,38 @@ func (tab *Table) deleteNode(n *enode.Node) { b := tab.bucket(n.ID()) tab.deleteInBucket(b, n.ID()) } + +// waitForNodes blocks until the table contains at least n nodes. +func (tab *Table) waitForNodes(ctx context.Context, n int) error { + getlength := func() (count int) { + for _, b := range &tab.buckets { + count += len(b.entries) + } + return count + } + + var ch chan *enode.Node + for { + tab.mutex.Lock() + if getlength() >= n { + tab.mutex.Unlock() + return nil + } + if ch == nil { + // Init subscription. + ch = make(chan *enode.Node) + sub := tab.nodeFeed.Subscribe(ch) + defer sub.Unsubscribe() + } + tab.mutex.Unlock() + + // Wait for a node add event. + select { + case <-ch: + case <-ctx.Done(): + return ctx.Err() + case <-tab.closeReq: + return errClosed + } + } +} diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 1af31f4f1b..44863183fa 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -24,10 +24,12 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net" "net/netip" "reflect" + "slices" "sync" "testing" "time" @@ -509,18 +511,26 @@ func TestUDPv4_smallNetConvergence(t *testing.T) { // they have all found each other. status := make(chan error, len(nodes)) for i := range nodes { - node := nodes[i] + self := nodes[i] go func() { - found := make(map[enode.ID]bool, len(nodes)) - it := node.RandomNodes() + missing := make(map[enode.ID]bool, len(nodes)) + for _, n := range nodes { + if n.Self().ID() == self.Self().ID() { + continue // skip self + } + missing[n.Self().ID()] = true + } + + it := self.RandomNodes() for it.Next() { - found[it.Node().ID()] = true - if len(found) == len(nodes) { + delete(missing, it.Node().ID()) + if len(missing) == 0 { status <- nil return } } - status <- fmt.Errorf("node %s didn't find all nodes", node.Self().ID().TerminalString()) + missingIDs := slices.Collect(maps.Keys(missing)) + status <- fmt.Errorf("node %s didn't find all nodes, missing %v", self.Self().ID().TerminalString(), missingIDs) }() } @@ -537,7 +547,6 @@ func TestUDPv4_smallNetConvergence(t *testing.T) { received++ if err != nil { t.Error("ERROR:", err) - return } } } diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 9679f5c61a..c13032e1af 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -328,12 +328,6 @@ func (t *UDPv5) TalkRequestToID(id enode.ID, addr netip.AddrPort, protocol strin // RandomNodes returns an iterator that finds random nodes in the DHT. func (t *UDPv5) RandomNodes() enode.Iterator { - if t.tab.len() == 0 { - // All nodes were dropped, refresh. The very first query will hit this - // case and run the bootstrapping logic. - <-t.tab.refresh() - } - return newLookupIterator(t.closeCtx, t.newRandomLookup) } diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index 2304d0f132..5774cb3d8c 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -477,10 +477,9 @@ func BenchmarkV5_DecodeHandshakePingSecp256k1(b *testing.B) { b.Fatal("can't encode handshake packet") } challenge.Node = nil // force ENR signature verification in decoder - b.ResetTimer() input := make([]byte, len(enc)) - for i := 0; i < b.N; i++ { + for b.Loop() { copy(input, enc) net.nodeB.c.sc.storeSentHandshake(idA, "", challenge) _, _, _, err := net.nodeB.c.Decode(input, "") @@ -507,10 +506,9 @@ func BenchmarkV5_DecodePing(b *testing.B) { if err != nil { b.Fatalf("can't encode: %v", err) } - b.ResetTimer() input := make([]byte, len(enc)) - for i := 0; i < b.N; i++ { + for b.Loop() { copy(input, enc) _, _, packet, _ := net.nodeB.c.Decode(input, addrB) if _, ok := packet.(*Ping); !ok { diff --git a/p2p/netutil/net_test.go b/p2p/netutil/net_test.go index 569c7ac454..8c810f494f 100644 --- a/p2p/netutil/net_test.go +++ b/p2p/netutil/net_test.go @@ -187,7 +187,7 @@ func TestCheckRelayIP(t *testing.T) { func BenchmarkCheckRelayIP(b *testing.B) { sender := parseIP("23.55.1.242") addr := parseIP("23.55.1.2") - for i := 0; i < b.N; i++ { + for b.Loop() { CheckRelayIP(sender, addr) } } diff --git a/p2p/rlpx/rlpx_test.go b/p2p/rlpx/rlpx_test.go index 27d51546e7..a02c1dc2cc 100644 --- a/p2p/rlpx/rlpx_test.go +++ b/p2p/rlpx/rlpx_test.go @@ -369,8 +369,7 @@ func TestHandshakeForwardCompatibility(t *testing.T) { func BenchmarkHandshakeRead(b *testing.B) { var input = unhex(eip8HandshakeAuthTests[0].input) - - for i := 0; i < b.N; i++ { + for b.Loop() { var ( h handshakeState r = bytes.NewReader(input) @@ -427,7 +426,7 @@ func BenchmarkThroughput(b *testing.B) { // Read N messages. b.SetBytes(int64(len(msgdata))) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _, _, err := conn2.Read() if err != nil { b.Fatal("read error:", err) diff --git a/params/config.go b/params/config.go index 7576170d26..42a2c61ab5 100644 --- a/params/config.go +++ b/params/config.go @@ -91,11 +91,17 @@ var ( ShanghaiTime: newUint64(1696000704), CancunTime: newUint64(1707305664), PragueTime: newUint64(1740434112), + OsakaTime: newUint64(1759308480), + BPO1Time: newUint64(1759800000), + BPO2Time: newUint64(1760389824), DepositContractAddress: common.HexToAddress("0x4242424242424242424242424242424242424242"), Ethash: new(EthashConfig), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, + BPO1: DefaultBPO1BlobConfig, + BPO2: DefaultBPO2BlobConfig, }, } // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. @@ -121,11 +127,17 @@ var ( ShanghaiTime: newUint64(1677557088), CancunTime: newUint64(1706655072), PragueTime: newUint64(1741159776), + OsakaTime: newUint64(1760427360), + BPO1Time: newUint64(1761017184), + BPO2Time: newUint64(1761607008), DepositContractAddress: common.HexToAddress("0x7f02c3e3c98b133055b8b348b2ac625669ed295d"), Ethash: new(EthashConfig), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, + BPO1: DefaultBPO1BlobConfig, + BPO2: DefaultBPO2BlobConfig, }, } // HoodiChainConfig contains the chain parameters to run a node on the Hoodi test network. @@ -151,11 +163,17 @@ var ( ShanghaiTime: newUint64(0), CancunTime: newUint64(0), PragueTime: newUint64(1742999832), + OsakaTime: newUint64(1761677592), + BPO1Time: newUint64(1762365720), + BPO2Time: newUint64(1762955544), DepositContractAddress: common.HexToAddress("0x00000000219ab540356cBB839Cbe05303d7705Fa"), Ethash: new(EthashConfig), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, + BPO1: DefaultBPO1BlobConfig, + BPO2: DefaultBPO2BlobConfig, }, } // AllEthashProtocolChanges contains every protocol change (EIPs) introduced @@ -359,6 +377,30 @@ var ( Max: 9, UpdateFraction: 5007716, } + // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. + DefaultBPO1BlobConfig = &BlobConfig{ + Target: 10, + Max: 15, + UpdateFraction: 8346193, + } + // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. + DefaultBPO2BlobConfig = &BlobConfig{ + Target: 14, + Max: 21, + UpdateFraction: 11684671, + } + // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. + DefaultBPO3BlobConfig = &BlobConfig{ + Target: 21, + Max: 32, + UpdateFraction: 20609697, + } + // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork. + DefaultBPO4BlobConfig = &BlobConfig{ + Target: 14, + Max: 21, + UpdateFraction: 13739630, + } // DefaultBlobSchedule is the latest configured blob schedule for Ethereum mainnet. DefaultBlobSchedule = &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, @@ -485,33 +527,33 @@ func (c *ChainConfig) Description() string { // makes sense for mainnet should be optional at printing to avoid bloating // the output for testnets and private networks. banner += "Pre-Merge hard forks (block based):\n" - banner += fmt.Sprintf(" - Homestead: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock) + banner += fmt.Sprintf(" - Homestead: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/homestead/__init__.py.html)\n", c.HomesteadBlock) if c.DAOForkBlock != nil { - banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/dao-fork.md)\n", c.DAOForkBlock) + banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/dao_fork/__init__.py.html)\n", c.DAOForkBlock) } - banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block) - banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Byzantium: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/byzantium.md)\n", c.ByzantiumBlock) - banner += fmt.Sprintf(" - Constantinople: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/constantinople.md)\n", c.ConstantinopleBlock) - banner += fmt.Sprintf(" - Petersburg: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/petersburg.md)\n", c.PetersburgBlock) - banner += fmt.Sprintf(" - Istanbul: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/istanbul.md)\n", c.IstanbulBlock) + banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/tangerine_whistle/__init__.py.html)\n", c.EIP150Block) + banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) + banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/spurious_dragon/__init__.py.html)\n", c.EIP155Block) + banner += fmt.Sprintf(" - Byzantium: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/byzantium/__init__.py.html)\n", c.ByzantiumBlock) + banner += fmt.Sprintf(" - Constantinople: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.ConstantinopleBlock) + banner += fmt.Sprintf(" - Petersburg: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/constantinople/__init__.py.html)\n", c.PetersburgBlock) + banner += fmt.Sprintf(" - Istanbul: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/istanbul/__init__.py.html)\n", c.IstanbulBlock) if c.MuirGlacierBlock != nil { - banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock) + banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/muir_glacier/__init__.py.html)\n", c.MuirGlacierBlock) } - banner += fmt.Sprintf(" - Berlin: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/berlin.md)\n", c.BerlinBlock) - banner += fmt.Sprintf(" - London: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/london.md)\n", c.LondonBlock) + banner += fmt.Sprintf(" - Berlin: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/berlin/__init__.py.html)\n", c.BerlinBlock) + banner += fmt.Sprintf(" - London: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/london/__init__.py.html)\n", c.LondonBlock) if c.ArrowGlacierBlock != nil { - banner += fmt.Sprintf(" - Arrow Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/arrow-glacier.md)\n", c.ArrowGlacierBlock) + banner += fmt.Sprintf(" - Arrow Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/arrow_glacier/__init__.py.html)\n", c.ArrowGlacierBlock) } if c.GrayGlacierBlock != nil { - banner += fmt.Sprintf(" - Gray Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/gray-glacier.md)\n", c.GrayGlacierBlock) + banner += fmt.Sprintf(" - Gray Glacier: #%-8v (https://ethereum.github.io/execution-specs/src/ethereum/forks/gray_glacier/__init__.py.html)\n", c.GrayGlacierBlock) } banner += "\n" // Add a special section for the merge as it's non-obvious banner += "Merge configured:\n" - banner += " - Hard-fork specification: https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md\n" + banner += " - Hard-fork specification: https://ethereum.github.io/execution-specs/src/ethereum/forks/paris/__init__.py.html\n" banner += " - Network known to be merged\n" banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty) if c.MergeNetsplitBlock != nil { @@ -522,34 +564,34 @@ func (c *ChainConfig) Description() string { // Create a list of forks post-merge banner += "Post-Merge hard forks (timestamp based):\n" if c.ShanghaiTime != nil { - banner += fmt.Sprintf(" - Shanghai: @%-10v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/shanghai.md)\n", *c.ShanghaiTime) + banner += fmt.Sprintf(" - Shanghai: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/shanghai/__init__.py.html)\n", *c.ShanghaiTime) } if c.CancunTime != nil { - banner += fmt.Sprintf(" - Cancun: @%-10v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/cancun.md)\n", *c.CancunTime) + banner += fmt.Sprintf(" - Cancun: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/cancun/__init__.py.html)\n", *c.CancunTime) } if c.PragueTime != nil { - banner += fmt.Sprintf(" - Prague: @%-10v\n", *c.PragueTime) + banner += fmt.Sprintf(" - Prague: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/prague/__init__.py.html)\n", *c.PragueTime) } if c.OsakaTime != nil { - banner += fmt.Sprintf(" - Osaka: @%-10v\n", *c.OsakaTime) + banner += fmt.Sprintf(" - Osaka: @%-10v (https://ethereum.github.io/execution-specs/src/ethereum/forks/osaka/__init__.py.html)\n", *c.OsakaTime) } if c.VerkleTime != nil { banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) } if c.BPO1Time != nil { - banner += fmt.Sprintf(" - BPO1: @%-10v\n", *c.BPO1Time) + banner += fmt.Sprintf(" - BPO1: @%-10v\n", *c.BPO1Time) } if c.BPO2Time != nil { - banner += fmt.Sprintf(" - BPO2: @%-10v\n", *c.BPO2Time) + banner += fmt.Sprintf(" - BPO2: @%-10v\n", *c.BPO2Time) } if c.BPO3Time != nil { - banner += fmt.Sprintf(" - BPO3: @%-10v\n", *c.BPO3Time) + banner += fmt.Sprintf(" - BPO3: @%-10v\n", *c.BPO3Time) } if c.BPO4Time != nil { - banner += fmt.Sprintf(" - BPO4: @%-10v\n", *c.BPO4Time) + banner += fmt.Sprintf(" - BPO4: @%-10v\n", *c.BPO4Time) } if c.BPO5Time != nil { - banner += fmt.Sprintf(" - BPO5: @%-10v\n", *c.BPO5Time) + banner += fmt.Sprintf(" - BPO5: @%-10v\n", *c.BPO5Time) } return banner } @@ -654,6 +696,16 @@ func (c *ChainConfig) IsTerminalPoWBlock(parentTotalDiff *big.Int, totalDiff *bi return parentTotalDiff.Cmp(c.TerminalTotalDifficulty) < 0 && totalDiff.Cmp(c.TerminalTotalDifficulty) >= 0 } +// IsPostMerge reports whether the given block number is assumed to be post-merge. +// Here we check the MergeNetsplitBlock to allow configuring networks with a PoW or +// PoA chain for unit testing purposes. +func (c *ChainConfig) IsPostMerge(blockNum uint64, timestamp uint64) bool { + mergedAtGenesis := c.TerminalTotalDifficulty != nil && c.TerminalTotalDifficulty.Sign() == 0 + return mergedAtGenesis || + c.MergeNetsplitBlock != nil && blockNum >= c.MergeNetsplitBlock.Uint64() || + c.ShanghaiTime != nil && timestamp >= *c.ShanghaiTime +} + // IsShanghai returns whether time is either equal to the Shanghai fork time or greater. func (c *ChainConfig) IsShanghai(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.ShanghaiTime, time) @@ -972,6 +1024,16 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork { london := c.LondonBlock switch { + case c.IsBPO5(london, time): + return forks.BPO5 + case c.IsBPO4(london, time): + return forks.BPO4 + case c.IsBPO3(london, time): + return forks.BPO3 + case c.IsBPO2(london, time): + return forks.BPO2 + case c.IsBPO1(london, time): + return forks.BPO1 case c.IsOsaka(london, time): return forks.Osaka case c.IsPrague(london, time): @@ -988,12 +1050,22 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork { // BlobConfig returns the blob config associated with the provided fork. func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig { switch fork { + case forks.BPO5: + return c.BlobScheduleConfig.BPO5 + case forks.BPO4: + return c.BlobScheduleConfig.BPO4 + case forks.BPO3: + return c.BlobScheduleConfig.BPO3 + case forks.BPO2: + return c.BlobScheduleConfig.BPO2 + case forks.BPO1: + return c.BlobScheduleConfig.BPO1 case forks.Osaka: - return DefaultOsakaBlobConfig + return c.BlobScheduleConfig.Osaka case forks.Prague: - return DefaultPragueBlobConfig + return c.BlobScheduleConfig.Prague case forks.Cancun: - return DefaultCancunBlobConfig + return c.BlobScheduleConfig.Cancun default: return nil } @@ -1023,6 +1095,16 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre // the fork isn't defined or isn't a time-based fork. func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 { switch { + case fork == forks.BPO5: + return c.BPO5Time + case fork == forks.BPO4: + return c.BPO4Time + case fork == forks.BPO3: + return c.BPO3Time + case fork == forks.BPO2: + return c.BPO2Time + case fork == forks.BPO1: + return c.BPO1Time case fork == forks.Osaka: return c.OsakaTime case fork == forks.Prague: diff --git a/params/forks/forks.go b/params/forks/forks.go index 5c9612a625..adb65c8624 100644 --- a/params/forks/forks.go +++ b/params/forks/forks.go @@ -40,6 +40,11 @@ const ( Cancun Prague Osaka + BPO1 + BPO2 + BPO3 + BPO4 + BPO5 ) // String implements fmt.Stringer. @@ -72,4 +77,9 @@ var forkToString = map[Fork]string{ Cancun: "Cancun", Prague: "Prague", Osaka: "Osaka", + BPO1: "BPO1", + BPO2: "BPO2", + BPO3: "BPO3", + BPO4: "BPO4", + BPO5: "BPO5", } diff --git a/rlp/decode_test.go b/rlp/decode_test.go index 3e492188e8..4b3a322873 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -350,7 +350,7 @@ func TestDecodeErrors(t *testing.T) { } if err := Decode(r, new(uint)); err != io.EOF { - t.Errorf("Decode(r, new(int)) error mismatch, got %q, want %q", err, io.EOF) + t.Errorf("Decode(r, new(uint)) error mismatch, got %q, want %q", err, io.EOF) } } diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 314958eb56..e63ea319b4 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -317,7 +317,6 @@ var encTests = []encTest{ {val: &optionalAndTailField{A: 1}, output: "C101"}, {val: &optionalAndTailField{A: 1, B: 2}, output: "C20102"}, {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, - {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, {val: &optionalBigIntField{A: 1}, output: "C101"}, {val: &optionalPtrField{A: 1}, output: "C101"}, {val: &optionalPtrFieldNil{A: 1}, output: "C101"}, diff --git a/tests/block_test.go b/tests/block_test.go index 91d9f2e653..c718b304b6 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -81,8 +81,9 @@ func TestExecutionSpecBlocktests(t *testing.T) { } bt := new(testMatcher) - bt.skipLoad(".*prague/eip7251_consolidations/contract_deployment/system_contract_deployment.json") - bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/contract_deployment/system_contract_deployment.json") + // These tests require us to handle scenarios where a system contract is not deployed at a fork + bt.skipLoad(".*prague/eip7251_consolidations/test_system_contract_deployment.json") + bt.skipLoad(".*prague/eip7002_el_triggerable_withdrawals/test_system_contract_deployment.json") bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) { execBlockTest(t, bt, test) diff --git a/tests/init.go b/tests/init.go index a8bc424fa2..705e929ae9 100644 --- a/tests/init.go +++ b/tests/init.go @@ -464,6 +464,274 @@ var Forks = map[string]*params.ChainConfig{ Osaka: params.DefaultOsakaBlobConfig, }, }, + "BPO1": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + }, + }, + "OsakaToBPO1AtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(15_000), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + }, + }, + "BPO2": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(0), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + }, + }, + "BPO1ToBPO2AtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(15_000), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + }, + }, + "BPO3": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(0), + BPO3Time: u64(0), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + BPO3: params.DefaultBPO3BlobConfig, + }, + }, + "BPO2ToBPO3AtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(0), + BPO3Time: u64(15_000), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + BPO3: params.DefaultBPO3BlobConfig, + }, + }, + "BPO4": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(0), + BPO3Time: u64(0), + BPO4Time: u64(0), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + BPO3: params.DefaultBPO3BlobConfig, + BPO4: params.DefaultBPO4BlobConfig, + }, + }, + "BPO3ToBPO4AtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + BPO1Time: u64(0), + BPO2Time: u64(0), + BPO3Time: u64(0), + BPO4Time: u64(15_000), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + BPO1: bpo1BlobConfig, + BPO2: bpo2BlobConfig, + BPO3: params.DefaultBPO3BlobConfig, + BPO4: params.DefaultBPO4BlobConfig, + }, + }, +} + +var bpo1BlobConfig = ¶ms.BlobConfig{ + Target: 9, + Max: 14, + UpdateFraction: 8832827, +} + +var bpo2BlobConfig = ¶ms.BlobConfig{ + Target: 14, + Max: 21, + UpdateFraction: 13739630, } // AvailableForks returns the set of defined fork names diff --git a/tests/state_test.go b/tests/state_test.go index 301bc3a7a9..f80bda4372 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -156,8 +156,8 @@ func execStateTest(t *testing.T, st *testMatcher, test *StateTest) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { var result error test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, state *StateTestState) { - if state.Snapshots != nil && state.StateDB != nil { - if _, err := state.Snapshots.Journal(state.StateDB.IntermediateRoot(false)); err != nil { + if state.TrieDB != nil && state.StateDB != nil { + if err := state.TrieDB.Journal(state.StateDB.IntermediateRoot(false)); err != nil { result = err return } diff --git a/tests/state_test_util.go b/tests/state_test_util.go index a22e470ad8..b8d3c4fb92 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -511,7 +511,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo sdb := state.NewDatabase(triedb, nil) statedb, _ := state.New(types.EmptyRootHash, sdb) for addr, a := range accounts { - statedb.SetCode(addr, a.Code) + statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified) statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeUnspecified) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceChangeUnspecified) for k, v := range a.Storage { @@ -523,7 +523,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo // If snapshot is requested, initialize the snapshotter and use it in state. var snaps *snapshot.Tree - if snapshotter { + if snapshotter && scheme == rawdb.HashScheme { snapconfig := snapshot.Config{ CacheSize: 1, Recovery: false, diff --git a/tests/transaction_test.go b/tests/transaction_test.go index 6260df0f3f..73ee3aa16a 100644 --- a/tests/transaction_test.go +++ b/tests/transaction_test.go @@ -70,7 +70,7 @@ func TestExecutionSpecTransaction(t *testing.T) { st := new(testMatcher) // Emptiness of authorization list is only validated during the tx precheck - st.skipLoad("^prague/eip7702_set_code_tx/invalid_tx/empty_authorization_list.json") + st.skipLoad("^prague/eip7702_set_code_tx/test_empty_authorization_list.json") st.walk(t, executionSpecTransactionTestDir, func(t *testing.T, name string, test *TransactionTest) { if err := st.checkFailure(t, test.Run()); err != nil { diff --git a/trie/iterator.go b/trie/iterator.go index 80298ce48f..3d3191ffba 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -836,3 +836,91 @@ func (it *unionIterator) Error() error { } return nil } + +// subTreeIterator wraps nodeIterator to traverse a trie within a predefined +// start and limit range. +type subtreeIterator struct { + NodeIterator + + stopPath []byte // Precomputed hex path for stopKey (without terminator), nil means no limit + exhausted bool // Flag whether the iterator has been exhausted +} + +// newSubtreeIterator creates an iterator that only traverses nodes within a subtree +// defined by the given startKey and stopKey. This supports general range iteration +// where startKey is inclusive and stopKey is exclusive. +// +// The iterator will only visit nodes whose keys k satisfy: startKey <= k < stopKey, +// where comparisons are performed in lexicographic order of byte keys (internally +// implemented via hex-nibble path comparisons for efficiency). +// +// If startKey is nil, iteration starts from the beginning. If stopKey is nil, +// iteration continues to the end of the trie. +func newSubtreeIterator(trie *Trie, startKey, stopKey []byte) (NodeIterator, error) { + it, err := trie.NodeIterator(startKey) + if err != nil { + return nil, err + } + if startKey == nil && stopKey == nil { + return it, nil + } + // Precompute nibble paths for efficient comparison + var stopPath []byte + if stopKey != nil { + stopPath = keybytesToHex(stopKey) + if hasTerm(stopPath) { + stopPath = stopPath[:len(stopPath)-1] + } + } + return &subtreeIterator{ + NodeIterator: it, + stopPath: stopPath, + }, nil +} + +// nextKey returns the next possible key after the given prefix. +// For example, "abc" -> "abd", "ab\xff" -> "ac", etc. +func nextKey(prefix []byte) []byte { + if len(prefix) == 0 { + return nil + } + // Make a copy to avoid modifying the original + next := make([]byte, len(prefix)) + copy(next, prefix) + + // Increment the last byte that isn't 0xff + for i := len(next) - 1; i >= 0; i-- { + if next[i] < 0xff { + next[i]++ + return next + } + // If it's 0xff, we need to carry over + // Trim trailing 0xff bytes + next = next[:i] + } + // If all bytes were 0xff, return nil (no upper bound) + return nil +} + +// newPrefixIterator creates an iterator that only traverses nodes with the given prefix. +// This ensures that only keys starting with the prefix are visited. +func newPrefixIterator(trie *Trie, prefix []byte) (NodeIterator, error) { + return newSubtreeIterator(trie, prefix, nextKey(prefix)) +} + +// Next moves the iterator to the next node. If the parameter is false, any child +// nodes will be skipped. +func (it *subtreeIterator) Next(descend bool) bool { + if it.exhausted { + return false + } + if !it.NodeIterator.Next(descend) { + it.exhausted = true + return false + } + if it.stopPath != nil && reachedPath(it.NodeIterator.Path(), it.stopPath) { + it.exhausted = true + return false + } + return true +} diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 74a1aa378c..3e0ef126be 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -19,7 +19,9 @@ package trie import ( "bytes" "fmt" + "maps" "math/rand" + "slices" "testing" "github.com/ethereum/go-ethereum/common" @@ -624,6 +626,540 @@ func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) { return true, path, hash } +func TestSubtreeIterator(t *testing.T) { + var ( + db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) + tr = NewEmpty(db) + ) + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"dog", "puppy"}, + {"doge", "coin"}, + {"dog\xff", "value6"}, + {"dog\xff\xff", "value7"}, + {"horse", "stallion"}, + {"house", "building"}, + {"houses", "multiple"}, + {"xyz", "value"}, + {"xyz\xff", "value"}, + {"xyz\xff\xff", "value"}, + } + all := make(map[string]string) + for _, val := range vals { + all[val.k] = val.v + tr.MustUpdate([]byte(val.k), []byte(val.v)) + } + root, nodes := tr.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + + allNodes := make(map[string][]byte) + tr, _ = New(TrieID(root), db) + it, err := tr.NodeIterator(nil) + if err != nil { + t.Fatal(err) + } + for it.Next(true) { + allNodes[string(it.Path())] = it.NodeBlob() + } + allKeys := slices.Collect(maps.Keys(all)) + + suites := []struct { + start []byte + end []byte + expected []string + }{ + // entire key range + { + start: nil, + end: nil, + expected: allKeys, + }, + { + start: nil, + end: bytes.Repeat([]byte{0xff}, 32), + expected: allKeys, + }, + { + start: bytes.Repeat([]byte{0x0}, 32), + end: bytes.Repeat([]byte{0xff}, 32), + expected: allKeys, + }, + // key range with start + { + start: []byte("do"), + end: nil, + expected: allKeys, + }, + { + start: []byte("doe"), + end: nil, + expected: allKeys[1:], + }, + { + start: []byte("dog"), + end: nil, + expected: allKeys[1:], + }, + { + start: []byte("doge"), + end: nil, + expected: allKeys[2:], + }, + { + start: []byte("dog\xff"), + end: nil, + expected: allKeys[3:], + }, + { + start: []byte("dog\xff\xff"), + end: nil, + expected: allKeys[4:], + }, + { + start: []byte("dog\xff\xff\xff"), + end: nil, + expected: allKeys[5:], + }, + // key range with limit + { + start: nil, + end: []byte("xyz"), + expected: allKeys[:len(allKeys)-3], + }, + { + start: nil, + end: []byte("xyz\xff"), + expected: allKeys[:len(allKeys)-2], + }, + { + start: nil, + end: []byte("xyz\xff\xff"), + expected: allKeys[:len(allKeys)-1], + }, + { + start: nil, + end: []byte("xyz\xff\xff\xff"), + expected: allKeys, + }, + } + for _, suite := range suites { + // We need to re-open the trie from the committed state + tr, _ = New(TrieID(root), db) + it, err := newSubtreeIterator(tr, suite.start, suite.end) + if err != nil { + t.Fatal(err) + } + + found := make(map[string]string) + for it.Next(true) { + if it.Leaf() { + found[string(it.LeafKey())] = string(it.LeafBlob()) + } + } + if len(found) != len(suite.expected) { + t.Errorf("wrong number of values: got %d, want %d", len(found), len(suite.expected)) + } + for k, v := range found { + if all[k] != v { + t.Errorf("wrong value for %s: got %s, want %s", k, found[k], all[k]) + } + } + + expectedNodes := make(map[string][]byte) + for path, blob := range allNodes { + if suite.start != nil { + hexStart := keybytesToHex(suite.start) + hexStart = hexStart[:len(hexStart)-1] + if !reachedPath([]byte(path), hexStart) { + continue + } + } + if suite.end != nil { + hexEnd := keybytesToHex(suite.end) + hexEnd = hexEnd[:len(hexEnd)-1] + if reachedPath([]byte(path), hexEnd) { + continue + } + } + expectedNodes[path] = bytes.Clone(blob) + } + + // Compare the result yield from the subtree iterator + var ( + subCount int + subIt, _ = newSubtreeIterator(tr, suite.start, suite.end) + ) + for subIt.Next(true) { + blob, ok := expectedNodes[string(subIt.Path())] + if !ok { + t.Errorf("Unexpected node iterated, path: %v", subIt.Path()) + } + subCount++ + + if !bytes.Equal(blob, subIt.NodeBlob()) { + t.Errorf("Unexpected node blob, path: %v, want: %v, got: %v", subIt.Path(), blob, subIt.NodeBlob()) + } + } + if subCount != len(expectedNodes) { + t.Errorf("Unexpected node being iterated, want: %d, got: %d", len(expectedNodes), subCount) + } + } +} + +func TestPrefixIterator(t *testing.T) { + // Create a new trie + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + + // Insert test data + testData := map[string]string{ + "key1": "value1", + "key2": "value2", + "key10": "value10", + "key11": "value11", + "different": "value_different", + } + + for key, value := range testData { + trie.Update([]byte(key), []byte(value)) + } + + // Test prefix iteration for "key1" prefix + prefix := []byte("key1") + iter, err := trie.NodeIteratorWithPrefix(prefix) + if err != nil { + t.Fatalf("Failed to create prefix iterator: %v", err) + } + + var foundKeys [][]byte + for iter.Next(true) { + if iter.Leaf() { + foundKeys = append(foundKeys, iter.LeafKey()) + } + } + + if err := iter.Error(); err != nil { + t.Fatalf("Iterator error: %v", err) + } + + // Verify only keys starting with "key1" were found + expectedCount := 3 // "key1", "key10", "key11" + if len(foundKeys) != expectedCount { + t.Errorf("Expected %d keys, found %d", expectedCount, len(foundKeys)) + } + + for _, key := range foundKeys { + keyStr := string(key) + if !bytes.HasPrefix(key, prefix) { + t.Errorf("Found key %s doesn't have prefix %s", keyStr, string(prefix)) + } + } +} + +func TestPrefixIteratorVsFullIterator(t *testing.T) { + // Create a new trie with more structured data + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + + // Insert structured test data + testData := map[string]string{ + "aaa": "value_aaa", + "aab": "value_aab", + "aba": "value_aba", + "bbb": "value_bbb", + } + + for key, value := range testData { + trie.Update([]byte(key), []byte(value)) + } + + // Test that prefix iterator stops at boundary + prefix := []byte("aa") + prefixIter, err := trie.NodeIteratorWithPrefix(prefix) + if err != nil { + t.Fatalf("Failed to create prefix iterator: %v", err) + } + + var prefixKeys [][]byte + for prefixIter.Next(true) { + if prefixIter.Leaf() { + prefixKeys = append(prefixKeys, prefixIter.LeafKey()) + } + } + + // Should only find "aaa" and "aab", not "aba" or "bbb" + if len(prefixKeys) != 2 { + t.Errorf("Expected 2 keys with prefix 'aa', found %d", len(prefixKeys)) + } + + // Verify no keys outside prefix were found + for _, key := range prefixKeys { + if !bytes.HasPrefix(key, prefix) { + t.Errorf("Prefix iterator returned key %s outside prefix %s", string(key), string(prefix)) + } + } +} + +func TestEmptyPrefixIterator(t *testing.T) { + // Test with empty trie + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + + iter, err := trie.NodeIteratorWithPrefix([]byte("nonexistent")) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + + if iter.Next(true) { + t.Error("Expected no results from empty trie") + } +} + +// TestPrefixIteratorEdgeCases tests various edge cases for prefix iteration +func TestPrefixIteratorEdgeCases(t *testing.T) { + // Create a trie with test data + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + testData := map[string]string{ + "abc": "value1", + "abcd": "value2", + "abce": "value3", + "abd": "value4", + "dog": "value5", + "dog\xff": "value6", // Test with 0xff byte + "dog\xff\xff": "value7", // Multiple 0xff bytes + } + for key, value := range testData { + trie.Update([]byte(key), []byte(value)) + } + + // Test 1: Prefix not present in trie + t.Run("NonexistentPrefix", func(t *testing.T) { + iter, err := trie.NodeIteratorWithPrefix([]byte("xyz")) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + count := 0 + for iter.Next(true) { + if iter.Leaf() { + count++ + } + } + if count != 0 { + t.Errorf("Expected 0 results for nonexistent prefix, got %d", count) + } + }) + + // Test 2: Prefix exactly equals an existing key + t.Run("ExactKeyPrefix", func(t *testing.T) { + iter, err := trie.NodeIteratorWithPrefix([]byte("abc")) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + found := make(map[string]bool) + for iter.Next(true) { + if iter.Leaf() { + found[string(iter.LeafKey())] = true + } + } + // Should find "abc", "abcd", "abce" but not "abd" + if !found["abc"] || !found["abcd"] || !found["abce"] { + t.Errorf("Missing expected keys: got %v", found) + } + if found["abd"] { + t.Errorf("Found unexpected key 'abd' with prefix 'abc'") + } + }) + + // Test 3: Prefix with trailing 0xff + t.Run("TrailingFFPrefix", func(t *testing.T) { + iter, err := trie.NodeIteratorWithPrefix([]byte("dog\xff")) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + found := make(map[string]bool) + for iter.Next(true) { + if iter.Leaf() { + found[string(iter.LeafKey())] = true + } + } + // Should find "dog\xff" and "dog\xff\xff" + if !found["dog\xff"] || !found["dog\xff\xff"] { + t.Errorf("Missing expected keys with 0xff: got %v", found) + } + if found["dog"] { + t.Errorf("Found unexpected key 'dog' with prefix 'dog\\xff'") + } + }) + + // Test 4: All 0xff case (edge case for nextKey) + t.Run("AllFFPrefix", func(t *testing.T) { + // Add a key with all 0xff bytes + allFF := []byte{0xff, 0xff} + trie.Update(allFF, []byte("all_ff_value")) + trie.Update(append(allFF, 0x00), []byte("all_ff_plus")) + + iter, err := trie.NodeIteratorWithPrefix(allFF) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + count := 0 + for iter.Next(true) { + if iter.Leaf() { + count++ + } + } + // Should find exactly the two keys with the all-0xff prefix + if count != 2 { + t.Errorf("Expected 2 results for all-0xff prefix, got %d", count) + } + }) + + // Test 5: Empty prefix (should iterate entire trie) + t.Run("EmptyPrefix", func(t *testing.T) { + iter, err := trie.NodeIteratorWithPrefix([]byte{}) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + count := 0 + for iter.Next(true) { + if iter.Leaf() { + count++ + } + } + // Should find all keys in the trie + expectedCount := len(testData) + 2 // +2 for the extra keys added in test 4 + if count != expectedCount { + t.Errorf("Expected %d results for empty prefix, got %d", expectedCount, count) + } + }) +} + +// TestGeneralRangeIteration tests NewSubtreeIterator with arbitrary start/stop ranges +func TestGeneralRangeIteration(t *testing.T) { + // Create a trie with test data + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + testData := map[string]string{ + "apple": "fruit1", + "apricot": "fruit2", + "banana": "fruit3", + "cherry": "fruit4", + "date": "fruit5", + "fig": "fruit6", + "grape": "fruit7", + } + for key, value := range testData { + trie.Update([]byte(key), []byte(value)) + } + + // Test range iteration from "banana" to "fig" (exclusive) + t.Run("RangeIteration", func(t *testing.T) { + iter, _ := newSubtreeIterator(trie, []byte("banana"), []byte("fig")) + found := make(map[string]bool) + for iter.Next(true) { + if iter.Leaf() { + found[string(iter.LeafKey())] = true + } + } + // Should find "banana", "cherry", "date" but not "fig" + if !found["banana"] || !found["cherry"] || !found["date"] { + t.Errorf("Missing expected keys in range: got %v", found) + } + if found["apple"] || found["apricot"] || found["fig"] || found["grape"] { + t.Errorf("Found unexpected keys outside range: got %v", found) + } + }) + + // Test with nil stopKey (iterate to end) + t.Run("NilStopKey", func(t *testing.T) { + iter, _ := newSubtreeIterator(trie, []byte("date"), nil) + found := make(map[string]bool) + for iter.Next(true) { + if iter.Leaf() { + found[string(iter.LeafKey())] = true + } + } + // Should find "date", "fig", "grape" + if !found["date"] || !found["fig"] || !found["grape"] { + t.Errorf("Missing expected keys from 'date' to end: got %v", found) + } + if found["apple"] || found["banana"] || found["cherry"] { + t.Errorf("Found unexpected keys before 'date': got %v", found) + } + }) + + // Test with nil startKey (iterate from beginning) + t.Run("NilStartKey", func(t *testing.T) { + iter, _ := newSubtreeIterator(trie, nil, []byte("cherry")) + found := make(map[string]bool) + for iter.Next(true) { + if iter.Leaf() { + found[string(iter.LeafKey())] = true + } + } + // Should find "apple", "apricot", "banana" but not "cherry" or later + if !found["apple"] || !found["apricot"] || !found["banana"] { + t.Errorf("Missing expected keys before 'cherry': got %v", found) + } + if found["cherry"] || found["date"] || found["fig"] || found["grape"] { + t.Errorf("Found unexpected keys at or after 'cherry': got %v", found) + } + }) +} + +// TestPrefixIteratorWithDescend tests prefix iteration with descend=false +func TestPrefixIteratorWithDescend(t *testing.T) { + // Create a trie with nested structure + trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)) + testData := map[string]string{ + "a": "value_a", + "a/b": "value_ab", + "a/b/c": "value_abc", + "a/b/d": "value_abd", + "a/e": "value_ae", + "b": "value_b", + } + for key, value := range testData { + trie.Update([]byte(key), []byte(value)) + } + + // Test skipping subtrees with descend=false + t.Run("SkipSubtrees", func(t *testing.T) { + iter, err := trie.NodeIteratorWithPrefix([]byte("a")) + if err != nil { + t.Fatalf("Failed to create iterator: %v", err) + } + + // Count nodes at each level + nodesVisited := 0 + leafsFound := make(map[string]bool) + + // First call with descend=true to enter the "a" subtree + if !iter.Next(true) { + t.Fatal("Expected to find at least one node") + } + nodesVisited++ + + // Continue iteration, sometimes with descend=false + descendPattern := []bool{false, true, false, true, true} + for i := 0; iter.Next(descendPattern[i%len(descendPattern)]); i++ { + nodesVisited++ + if iter.Leaf() { + leafsFound[string(iter.LeafKey())] = true + } + } + + // We should still respect the prefix boundary even when skipping + prefix := []byte("a") + for key := range leafsFound { + if !bytes.HasPrefix([]byte(key), prefix) { + t.Errorf("Found key outside prefix when using descend=false: %s", key) + } + } + + // Should not have found "b" even if we skip some subtrees + if leafsFound["b"] { + t.Error("Iterator leaked outside prefix boundary with descend=false") + } + }) +} + func BenchmarkIterator(b *testing.B) { diskDb, srcDb, tr, _ := makeTestTrie(rawdb.HashScheme) root := tr.Hash() diff --git a/trie/proof_test.go b/trie/proof_test.go index b3c9dd753c..40c49f358b 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -109,7 +109,7 @@ func TestOneElementProof(t *testing.T) { t.Fatalf("prover %d: failed to verify proof: %v\nraw proof: %x", i, err, proof) } if !bytes.Equal(val, []byte("v")) { - t.Fatalf("prover %d: verified value mismatch: have %x, want 'k'", i, val) + t.Fatalf("prover %d: verified value mismatch: have %x, want 'v'", i, val) } } } diff --git a/trie/trie.go b/trie/trie.go index 630462f8ca..36cc732ee8 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -134,6 +134,34 @@ func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) { return newNodeIterator(t, start), nil } +// NodeIteratorWithPrefix returns an iterator that returns nodes of the trie +// whose leaf keys start with the given prefix. Iteration includes all keys +// where prefix <= k < nextKey(prefix), effectively returning only keys that +// have the prefix. The iteration stops once it would encounter a key that +// doesn't start with the prefix. +// +// For example, with prefix "dog", the iterator will return "dog", "dogcat", +// "dogfish" but not "dot" or "fog". An empty prefix iterates the entire trie. +func (t *Trie) NodeIteratorWithPrefix(prefix []byte) (NodeIterator, error) { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return nil, ErrCommitted + } + // Use the dedicated prefix iterator which handles prefix checking correctly + return newPrefixIterator(t, prefix) +} + +// NodeIteratorWithRange returns an iterator over trie nodes whose leaf keys +// fall within the specified range. It includes all keys where start <= k < end. +// Iteration stops once a key beyond the end boundary is encountered. +func (t *Trie) NodeIteratorWithRange(start, end []byte) (NodeIterator, error) { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return nil, ErrCommitted + } + return newSubtreeIterator(t, start, end) +} + // MustGet is a wrapper of Get and will omit any encountered error but just // print out an error message. func (t *Trie) MustGet(key []byte) []byte { diff --git a/trie/trienode/node.go b/trie/trienode/node.go index c83dc27cef..228a64f04c 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -259,11 +259,24 @@ func (set *MergedNodeSet) Merge(other *NodeSet) error { return nil } -// Flatten returns a two-dimensional map for internal nodes. -func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { +// Nodes returns a two-dimensional map for internal nodes. +func (set *MergedNodeSet) Nodes() map[common.Hash]map[string]*Node { nodes := make(map[common.Hash]map[string]*Node, len(set.Sets)) for owner, set := range set.Sets { nodes[owner] = set.Nodes } return nodes } + +// NodeAndOrigins returns a two-dimensional map for internal nodes along with +// their original values. +func (set *MergedNodeSet) NodeAndOrigins() (map[common.Hash]map[string]*Node, map[common.Hash]map[string][]byte) { + var ( + nodes = make(map[common.Hash]map[string]*Node, len(set.Sets)) + origins = make(map[common.Hash]map[string][]byte, len(set.Sets)) + ) + for owner, set := range set.Sets { + nodes[owner], origins[owner] = set.Nodes, set.Origins + } + return nodes, origins +} diff --git a/triedb/database.go b/triedb/database.go index e2f4334d6e..d2637bd909 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -375,3 +375,12 @@ func (db *Database) IsVerkle() bool { func (db *Database) Disk() ethdb.Database { return db.disk } + +// SnapshotCompleted returns the indicator if the snapshot is completed. +func (db *Database) SnapshotCompleted() bool { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return false + } + return pdb.SnapshotCompleted() +} diff --git a/triedb/pathdb/config.go b/triedb/pathdb/config.go new file mode 100644 index 0000000000..3745a63edd --- /dev/null +++ b/triedb/pathdb/config.go @@ -0,0 +1,118 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +const ( + // defaultTrieCleanSize is the default memory allowance of clean trie cache. + defaultTrieCleanSize = 16 * 1024 * 1024 + + // defaultStateCleanSize is the default memory allowance of clean state cache. + defaultStateCleanSize = 16 * 1024 * 1024 + + // maxBufferSize is the maximum memory allowance of node buffer. + // Too large buffer will cause the system to pause for a long + // time when write happens. Also, the largest batch that pebble can + // support is 4GB, node will panic if batch size exceeds this limit. + maxBufferSize = 256 * 1024 * 1024 + + // defaultBufferSize is the default memory allowance of node buffer + // that aggregates the writes from above until it's flushed into the + // disk. It's meant to be used once the initial sync is finished. + // Do not increase the buffer size arbitrarily, otherwise the system + // pause time will increase when the database writes happen. + defaultBufferSize = 64 * 1024 * 1024 +) + +var ( + // maxDiffLayers is the maximum diff layers allowed in the layer tree. + maxDiffLayers = 128 +) + +// Defaults contains default settings for Ethereum mainnet. +var Defaults = &Config{ + StateHistory: params.FullImmutabilityThreshold, + EnableStateIndexing: false, + TrieCleanSize: defaultTrieCleanSize, + StateCleanSize: defaultStateCleanSize, + WriteBufferSize: defaultBufferSize, +} + +// ReadOnly is the config in order to open database in read only mode. +var ReadOnly = &Config{ + ReadOnly: true, + TrieCleanSize: defaultTrieCleanSize, + StateCleanSize: defaultStateCleanSize, +} + +// Config contains the settings for database. +type Config struct { + StateHistory uint64 // Number of recent blocks to maintain state history for, 0: full chain + EnableStateIndexing bool // Whether to enable state history indexing for external state access + TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie data + StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data + WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer + ReadOnly bool // Flag whether the database is opened in read only mode + JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store) + + // Testing configurations + SnapshotNoBuild bool // Flag Whether the state generation is disabled + NoAsyncFlush bool // Flag whether the background buffer flushing is disabled + NoAsyncGeneration bool // Flag whether the background generation is disabled +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (c *Config) sanitize() *Config { + conf := *c + if conf.WriteBufferSize > maxBufferSize { + log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.WriteBufferSize), "updated", common.StorageSize(maxBufferSize)) + conf.WriteBufferSize = maxBufferSize + } + return &conf +} + +// fields returns a list of attributes of config for printing. +func (c *Config) fields() []interface{} { + var list []interface{} + if c.ReadOnly { + list = append(list, "readonly", true) + } + list = append(list, "triecache", common.StorageSize(c.TrieCleanSize)) + list = append(list, "statecache", common.StorageSize(c.StateCleanSize)) + list = append(list, "buffer", common.StorageSize(c.WriteBufferSize)) + + if c.StateHistory == 0 { + list = append(list, "state-history", "entire chain") + } else { + list = append(list, "state-history", fmt.Sprintf("last %d blocks", c.StateHistory)) + } + if c.EnableStateIndexing { + list = append(list, "index-history", true) + } + if c.JournalDirectory != "" { + list = append(list, "journal-dir", c.JournalDirectory) + } + return list +} diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index f438c64aa2..546d2e0301 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -31,37 +31,10 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-verkle" ) -const ( - // defaultTrieCleanSize is the default memory allowance of clean trie cache. - defaultTrieCleanSize = 16 * 1024 * 1024 - - // defaultStateCleanSize is the default memory allowance of clean state cache. - defaultStateCleanSize = 16 * 1024 * 1024 - - // maxBufferSize is the maximum memory allowance of node buffer. - // Too large buffer will cause the system to pause for a long - // time when write happens. Also, the largest batch that pebble can - // support is 4GB, node will panic if batch size exceeds this limit. - maxBufferSize = 256 * 1024 * 1024 - - // defaultBufferSize is the default memory allowance of node buffer - // that aggregates the writes from above until it's flushed into the - // disk. It's meant to be used once the initial sync is finished. - // Do not increase the buffer size arbitrarily, otherwise the system - // pause time will increase when the database writes happen. - defaultBufferSize = 64 * 1024 * 1024 -) - -var ( - // maxDiffLayers is the maximum diff layers allowed in the layer tree. - maxDiffLayers = 128 -) - // layer is the interface implemented by all state layers which includes some // public methods and some additional methods for internal usage. type layer interface { @@ -105,7 +78,7 @@ type layer interface { // the provided dirty trie nodes along with the state change set. // // Note, the maps are retained by the method to avoid copying everything. - update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer + update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer // journal commits an entire diff hierarchy to disk into a single journal entry. // This is meant to be used during shutdown to persist the layer without @@ -113,68 +86,6 @@ type layer interface { journal(w io.Writer) error } -// Config contains the settings for database. -type Config struct { - StateHistory uint64 // Number of recent blocks to maintain state history for - EnableStateIndexing bool // Whether to enable state history indexing for external state access - TrieCleanSize int // Maximum memory allowance (in bytes) for caching clean trie nodes - StateCleanSize int // Maximum memory allowance (in bytes) for caching clean state data - WriteBufferSize int // Maximum memory allowance (in bytes) for write buffer - ReadOnly bool // Flag whether the database is opened in read only mode - JournalDirectory string // Absolute path of journal directory (null means the journal data is persisted in key-value store) - - // Testing configurations - SnapshotNoBuild bool // Flag Whether the state generation is allowed - NoAsyncFlush bool // Flag whether the background buffer flushing is allowed - NoAsyncGeneration bool // Flag whether the background generation is allowed -} - -// sanitize checks the provided user configurations and changes anything that's -// unreasonable or unworkable. -func (c *Config) sanitize() *Config { - conf := *c - if conf.WriteBufferSize > maxBufferSize { - log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.WriteBufferSize), "updated", common.StorageSize(maxBufferSize)) - conf.WriteBufferSize = maxBufferSize - } - return &conf -} - -// fields returns a list of attributes of config for printing. -func (c *Config) fields() []interface{} { - var list []interface{} - if c.ReadOnly { - list = append(list, "readonly", true) - } - if c.SnapshotNoBuild { - list = append(list, "snapshot", false) - } - list = append(list, "triecache", common.StorageSize(c.TrieCleanSize)) - list = append(list, "statecache", common.StorageSize(c.StateCleanSize)) - list = append(list, "buffer", common.StorageSize(c.WriteBufferSize)) - - if c.StateHistory == 0 { - list = append(list, "history", "entire chain") - } else { - list = append(list, "history", fmt.Sprintf("last %d blocks", c.StateHistory)) - } - if c.JournalDirectory != "" { - list = append(list, "journal-dir", c.JournalDirectory) - } - return list -} - -// Defaults contains default settings for Ethereum mainnet. -var Defaults = &Config{ - StateHistory: params.FullImmutabilityThreshold, - TrieCleanSize: defaultTrieCleanSize, - StateCleanSize: defaultStateCleanSize, - WriteBufferSize: defaultBufferSize, -} - -// ReadOnly is the config in order to open database in read only mode. -var ReadOnly = &Config{ReadOnly: true} - // nodeHasher is the function to compute the hash of supplied node blob. type nodeHasher func([]byte) (common.Hash, error) @@ -278,7 +189,7 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { } // TODO (rjl493456442) disable the background indexing in read-only mode if db.stateFreezer != nil && db.config.EnableStateIndexing { - db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID()) + db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID(), typeStateHistory) log.Info("Enabled state history indexing") } fields := config.fields() @@ -334,7 +245,7 @@ func (db *Database) repairHistory() error { } // Truncate the extra state histories above in freezer in case it's not // aligned with the disk layer. It might happen after a unclean shutdown. - pruned, err := truncateFromHead(db.stateFreezer, id) + pruned, err := truncateFromHead(db.stateFreezer, typeStateHistory, id) if err != nil { log.Crit("Failed to truncate extra state histories", "err", err) } @@ -422,7 +333,8 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6 if err := db.modifyAllowed(); err != nil { return err } - if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { + // TODO(rjl493456442) tracking the origins in the following PRs. + if err := db.tree.add(root, parentRoot, block, NewNodeSetWithOrigin(nodes.Nodes(), nil), states); err != nil { return err } // Keep 128 diff layers in the memory, persistent layer is 129th. @@ -536,7 +448,7 @@ func (db *Database) Enable(root common.Hash) error { // 2. Re-initialize the indexer so it starts indexing from the new state root. if db.stateIndexer != nil && db.stateFreezer != nil && db.config.EnableStateIndexing { db.stateIndexer.close() - db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID()) + db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID(), typeStateHistory) log.Info("Re-enabled state history indexing") } log.Info("Rebuilt trie database", "root", root) @@ -590,7 +502,7 @@ func (db *Database) Recover(root common.Hash) error { if err := db.diskdb.SyncKeyValue(); err != nil { return err } - _, err := truncateFromHead(db.stateFreezer, dl.stateID()) + _, err := truncateFromHead(db.stateFreezer, typeStateHistory, dl.stateID()) if err != nil { return err } @@ -769,3 +681,14 @@ func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek } return newFastStorageIterator(db, root, account, seek) } + +// SnapshotCompleted returns the flag indicating if the snapshot generation is completed. +func (db *Database) SnapshotCompleted() bool { + db.lock.RLock() + wait := db.waitSync + db.lock.RUnlock() + if wait { + return false + } + return db.tree.bottom().genComplete() +} diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 47d13e54a4..8cca7b1b3c 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -36,9 +36,10 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" + "golang.org/x/exp/maps" ) -func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root common.Hash, dirties map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { +func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root common.Hash, entries map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { var id *trie.ID if addrHash == (common.Hash{}) { id = trie.StateTrieID(stateRoot) @@ -49,13 +50,17 @@ func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root if err != nil { panic(fmt.Errorf("failed to load trie, err: %w", err)) } - for key, val := range dirties { + var deletes []common.Hash + for key, val := range entries { if len(val) == 0 { - tr.Delete(key.Bytes()) + deletes = append(deletes, key) } else { tr.Update(key.Bytes(), val) } } + for _, key := range deletes { + tr.Delete(key.Bytes()) + } return tr.Commit(false) } @@ -72,16 +77,18 @@ const ( createAccountOp int = iota modifyAccountOp deleteAccountOp + resurrectAccountOp opLen ) +// genctx carries the generation context used within a single state transition. type genctx struct { stateRoot common.Hash accounts map[common.Hash][]byte // Keyed by the hash of account address storages map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key accountOrigin map[common.Address][]byte // Keyed by the account address storageOrigin map[common.Address]map[common.Hash][]byte // Keyed by the account address and the hash of storage key - nodes *trienode.MergedNodeSet + nodes *trienode.MergedNodeSet // Trie nodes produced from the state transition } func newCtx(stateRoot common.Hash) *genctx { @@ -114,6 +121,8 @@ func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Ad type tester struct { db *Database roots []common.Hash + nodes []*trienode.MergedNodeSet + states []*StateSetWithOrigin preimages map[common.Hash][]byte // current state set @@ -123,20 +132,57 @@ type tester struct { // state snapshots snapAccounts map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key + + // trienode snapshots + snapNodes map[common.Hash]*trienode.MergedNodeSet } -func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, enableIndex bool, journalDir string) *tester { +// testerConfig holds configuration parameters for running a test scenario. +type testerConfig struct { + stateHistory uint64 // Number of historical states to retain + layers int // Number of state transitions to generate for + enableIndex bool // Enable state history indexing or not + journalDir string // Directory path for persisting journal files + isVerkle bool // Enables Verkle trie mode if true + + writeBuffer *int // Optional, the size of memory allocated for write buffer + trieCache *int // Optional, the size of memory allocated for trie cache + stateCache *int // Optional, the size of memory allocated for state cache +} + +func (c *testerConfig) trieCacheSize() int { + if c.trieCache != nil { + return *c.trieCache + } + return 256 * 1024 +} + +func (c *testerConfig) stateCacheSize() int { + if c.stateCache != nil { + return *c.stateCache + } + return 256 * 1024 +} + +func (c *testerConfig) writeBufferSize() int { + if c.writeBuffer != nil { + return *c.writeBuffer + } + return 256 * 1024 +} + +func newTester(t *testing.T, config *testerConfig) *tester { var ( disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()}) db = New(disk, &Config{ - StateHistory: historyLimit, - EnableStateIndexing: enableIndex, - TrieCleanSize: 256 * 1024, - StateCleanSize: 256 * 1024, - WriteBufferSize: 256 * 1024, + StateHistory: config.stateHistory, + EnableStateIndexing: config.enableIndex, + TrieCleanSize: config.trieCacheSize(), + StateCleanSize: config.stateCacheSize(), + WriteBufferSize: config.writeBufferSize(), NoAsyncFlush: true, - JournalDirectory: journalDir, - }, isVerkle) + JournalDirectory: config.journalDir, + }, config.isVerkle) obj = &tester{ db: db, @@ -145,9 +191,10 @@ func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, ena storages: make(map[common.Hash]map[common.Hash][]byte), snapAccounts: make(map[common.Hash]map[common.Hash][]byte), snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte), + snapNodes: make(map[common.Hash]*trienode.MergedNodeSet), } ) - for i := 0; i < layers; i++ { + for i := 0; i < config.layers; i++ { var parent = types.EmptyRootHash if len(obj.roots) != 0 { parent = obj.roots[len(obj.roots)-1] @@ -158,6 +205,8 @@ func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, ena panic(fmt.Errorf("failed to update state changes, err: %w", err)) } obj.roots = append(obj.roots, root) + obj.nodes = append(obj.nodes, nodes) + obj.states = append(obj.states, states) } return obj } @@ -181,6 +230,8 @@ func (t *tester) extend(layers int) { panic(fmt.Errorf("failed to update state changes, err: %w", err)) } t.roots = append(t.roots, root) + t.nodes = append(t.nodes, nodes) + t.states = append(t.states, states) } } @@ -270,10 +321,53 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash return root } +func (t *tester) resurrectStorage(ctx *genctx, addr common.Address, old map[common.Hash][]byte) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for i := 0; i < 3; i++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32))) + key := testrand.Bytes(32) + hash := crypto.Keccak256Hash(key) + t.preimages[hash] = key + + storage[hash] = v + origin[hash] = nil + } + var cnt int + for khash := range old { + cnt += 1 + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32))) + + storage[khash] = v + origin[khash] = old[khash] + if cnt >= 3 { + break + } + } + root, set := updateTrie(t.db, ctx.stateRoot, addrHash, types.EmptyRootHash, storage) + + maps.Copy(ctx.storages[addrHash], storage) + if ctx.storageOrigin[addr] == nil { + ctx.storageOrigin[addr] = make(map[common.Hash][]byte) + } + for k, v := range origin { + if _, exists := ctx.storageOrigin[addr][k]; !exists { + ctx.storageOrigin[addr][k] = v + } + } + ctx.nodes.Merge(set) + return root +} + func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) { var ( - ctx = newCtx(parent) - dirties = make(map[common.Hash]struct{}) + ctx = newCtx(parent) + dirties = make(map[common.Hash]struct{}) + deleted = make(map[common.Address]struct{}) + resurrect = make(map[common.Address]struct{}) ) for i := 0; i < 20; i++ { // Start with account creation always @@ -336,6 +430,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, continue } dirties[addrHash] = struct{}{} + deleted[addr] = struct{}{} acct, _ := types.FullAccount(account) if acct.Root != types.EmptyRootHash { @@ -343,6 +438,25 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, } ctx.accounts[addrHash] = nil ctx.accountOrigin[addr] = account + + case resurrectAccountOp: + if len(deleted) == 0 { + continue + } + addresses := maps.Keys(deleted) + addr := addresses[rand.Intn(len(addresses))] + if _, exist := resurrect[addr]; exist { + continue + } + resurrect[addr] = struct{}{} + + addrHash := crypto.Keccak256Hash(addr.Bytes()) + root := t.resurrectStorage(ctx, addr, t.storages[addrHash]) + ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root)) + if _, exist := ctx.accountOrigin[addr]; !exist { + ctx.accountOrigin[addr] = nil + } + t.preimages[addrHash] = addr.Bytes() } } root, set := updateTrie(t.db, parent, common.Hash{}, parent, ctx.accounts) @@ -351,6 +465,7 @@ func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, // Save state snapshot before commit t.snapAccounts[parent] = copyAccounts(t.accounts) t.snapStorages[parent] = copyStorages(t.storages) + t.snapNodes[parent] = ctx.nodes // Commit all changes to live state set for addrHash, account := range ctx.accounts { @@ -470,8 +585,7 @@ func TestDatabaseRollback(t *testing.T) { maxDiffLayers = 128 }() - // Verify state histories - tester := newTester(t, 0, false, 32, false, "") + tester := newTester(t, &testerConfig{layers: 32}) defer tester.release() if err := tester.verifyHistory(); err != nil { @@ -505,7 +619,7 @@ func TestDatabaseRecoverable(t *testing.T) { }() var ( - tester = newTester(t, 0, false, 12, false, "") + tester = newTester(t, &testerConfig{layers: 12}) index = tester.bottomIndex() ) defer tester.release() @@ -526,7 +640,7 @@ func TestDatabaseRecoverable(t *testing.T) { // Layers below current disk layer are recoverable {tester.roots[index-1], true}, - // Disklayer itself is not recoverable, since it's + // Disk layer itself is not recoverable, since it's // available for accessing. {tester.roots[index], false}, @@ -542,6 +656,59 @@ func TestDatabaseRecoverable(t *testing.T) { } } +func TestExecuteRollback(t *testing.T) { + // Redefine the diff layer depth allowance for faster testing. + maxDiffLayers = 4 + defer func() { + maxDiffLayers = 128 + }() + + tester := newTester(t, &testerConfig{layers: 32}) + defer tester.release() + + // Revert database from top to bottom + for i := tester.bottomIndex(); i >= 0; i-- { + dl := tester.db.tree.bottom() + h, err := readStateHistory(tester.db.stateFreezer, dl.stateID()) + if err != nil { + t.Fatalf("Failed to read history, err: %v", err) + } + nodes, err := apply(tester.db, h.meta.parent, h.meta.root, h.meta.version == stateHistoryV1, h.accounts, h.storages) + if err != nil { + t.Fatalf("Failed to apply history, err: %v", err) + } + + // Verify the produced node set, ensuring they are aligned with the + // tracked dirty nodes. + want := tester.snapNodes[h.meta.parent] + if len(nodes) != len(want.Sets) { + t.Fatalf("Unexpected node sets, want: %d, got: %d", len(want.Sets), len(nodes)) + } + for owner, setA := range nodes { + setB, ok := want.Sets[owner] + if !ok { + t.Fatalf("Excessive nodeset, %x", owner) + } + if len(setA) != len(setB.Origins) { + t.Fatalf("Unexpected origins, want: %d, got: %d", len(setA), len(setB.Origins)) + } + for k, nA := range setA { + nB, ok := setB.Origins[k] + if !ok { + t.Fatalf("Excessive node, %v", []byte(k)) + } + if !bytes.Equal(nA.Blob, nB) { + t.Fatalf("Unexpected node value, want: %v, got: %v", nA.Blob, nB) + } + } + } + + if err := tester.db.Recover(h.meta.parent); err != nil { + t.Fatalf("Failed to recover db, err: %v", err) + } + } +} + func TestDisable(t *testing.T) { // Redefine the diff layer depth allowance for faster testing. maxDiffLayers = 4 @@ -549,7 +716,7 @@ func TestDisable(t *testing.T) { maxDiffLayers = 128 }() - tester := newTester(t, 0, false, 32, false, "") + tester := newTester(t, &testerConfig{layers: 32}) defer tester.release() stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) @@ -563,10 +730,6 @@ func TestDisable(t *testing.T) { t.Fatalf("Failed to activate database: %v", err) } - // Ensure journal is deleted from disk - if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { - t.Fatal("Failed to clean journal") - } // Ensure all trie histories are removed n, err := tester.db.stateFreezer.Ancients() if err != nil { @@ -591,7 +754,7 @@ func TestCommit(t *testing.T) { maxDiffLayers = 128 }() - tester := newTester(t, 0, false, 12, false, "") + tester := newTester(t, &testerConfig{layers: 12}) defer tester.release() if err := tester.db.Commit(tester.lastHash(), false); err != nil { @@ -626,7 +789,7 @@ func testJournal(t *testing.T, journalDir string) { maxDiffLayers = 128 }() - tester := newTester(t, 0, false, 12, false, journalDir) + tester := newTester(t, &testerConfig{layers: 12, journalDir: journalDir}) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -673,7 +836,7 @@ func testCorruptedJournal(t *testing.T, journalDir string, modifyFn func(databas maxDiffLayers = 128 }() - tester := newTester(t, 0, false, 12, false, journalDir) + tester := newTester(t, &testerConfig{layers: 12, journalDir: journalDir}) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -718,7 +881,7 @@ func TestTailTruncateHistory(t *testing.T) { maxDiffLayers = 128 }() - tester := newTester(t, 10, false, 12, false, "") + tester := newTester(t, &testerConfig{layers: 12, stateHistory: 10}) defer tester.release() tester.db.Close() @@ -754,3 +917,107 @@ func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]ma } return copied } + +func TestDatabaseIndexRecovery(t *testing.T) { + maxDiffLayers = 4 + defer func() { + maxDiffLayers = 128 + }() + + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) + writeBuffer := 512 * 1024 + config := &testerConfig{ + layers: 64, + enableIndex: true, + writeBuffer: &writeBuffer, + } + env := newTester(t, config) + defer env.release() + + // Ensure the buffer in disk layer is not empty + var ( + bRoot = env.db.tree.bottom().rootHash() + dRoot = crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(env.db.diskdb, nil)) + ) + for dRoot == bRoot { + env.extend(1) + + bRoot = env.db.tree.bottom().rootHash() + dRoot = crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(env.db.diskdb, nil)) + } + waitIndexing(env.db) + + var ( + dIndex int + roots = env.roots + hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer) + ) + for i, root := range roots { + if root == dRoot { + dIndex = i + } + if root == bRoot { + break + } + if err := checkHistoricalState(env, root, uint64(i+1), hr); err != nil { + t.Fatal(err) + } + } + + // Terminate the database and mutate the journal, it's for simulating + // the unclean shutdown + env.db.Journal(env.lastHash()) + env.db.Close() + + // Mutate the journal in disk, it should be regarded as invalid + blob := rawdb.ReadTrieJournal(env.db.diskdb) + blob[0] = 0xa + rawdb.WriteTrieJournal(env.db.diskdb, blob) + + // Reload the database, the extra state histories should be removed + env.db = New(env.db.diskdb, env.db.config, false) + + for i := range roots { + _, err := readStateHistory(env.db.stateFreezer, uint64(i+1)) + if i <= dIndex && err != nil { + t.Fatalf("State history is not found, %d", i) + } + if i > dIndex && err == nil { + t.Fatalf("Unexpected state history found, %d", i) + } + } + remain, err := env.db.IndexProgress() + if err != nil { + t.Fatalf("Failed to obtain the progress, %v", err) + } + if remain == 0 { + t.Fatalf("Unexpected progress remain, %d", remain) + } + + // Apply new states on top, ensuring state indexing can respond correctly + for i := dIndex + 1; i < len(roots); i++ { + if err := env.db.Update(roots[i], roots[i-1], uint64(i), env.nodes[i], env.states[i]); err != nil { + panic(fmt.Errorf("failed to update state changes, err: %w", err)) + } + } + remain, err = env.db.IndexProgress() + if err != nil { + t.Fatalf("Failed to obtain the progress, %v", err) + } + if remain != 0 { + t.Fatalf("Unexpected progress remain, %d", remain) + } + waitIndexing(env.db) + + // Ensure the truncated state histories become accessible + bRoot = env.db.tree.bottom().rootHash() + hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer) + for i, root := range roots { + if root == bRoot { + break + } + if err := checkHistoricalState(env, root, uint64(i+1), hr); err != nil { + t.Fatal(err) + } + } +} diff --git a/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go index ac05b4a0fb..ae523c979c 100644 --- a/triedb/pathdb/difflayer.go +++ b/triedb/pathdb/difflayer.go @@ -34,7 +34,7 @@ type diffLayer struct { root common.Hash // Root hash to which this layer diff belongs to id uint64 // Corresponding state id block uint64 // Associated block number - nodes *nodeSet // Cached trie nodes indexed by owner and path + nodes *nodeSetWithOrigin // Cached trie nodes indexed by owner and path states *StateSetWithOrigin // Associated state changes along with origin value parent layer // Parent layer modified by this one, never nil, **can be changed** @@ -42,7 +42,7 @@ type diffLayer struct { } // newDiffLayer creates a new diff layer on top of an existing layer. -func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { +func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer { dl := &diffLayer{ root: root, id: id, @@ -151,7 +151,7 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([ // update implements the layer interface, creating a new layer on top of the // existing layer tree with the specified data items. -func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { +func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer { return newDiffLayer(dl, root, id, block, nodes, states) } diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go index 83ed833486..0484fd9644 100644 --- a/triedb/pathdb/difflayer_test.go +++ b/triedb/pathdb/difflayer_test.go @@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) { nblob = common.CopyBytes(blob) } } - return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) } var layer layer layer = emptyLayer() @@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) { ) nodes[common.Hash{}][string(path)] = node } - return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) } for i := 0; i < b.N; i++ { b.StopTimer() @@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) { ) nodes[common.Hash{}][string(path)] = node } - return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + return newDiffLayer(parent, common.Hash{}, 0, 0, NewNodeSetWithOrigin(nodes, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) } var layer layer layer = emptyLayer() diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index 13df6251e8..76f3f5a46e 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -319,7 +319,7 @@ func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([ // update implements the layer interface, returning a new diff layer on top // with the given state set. -func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer { +func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) *diffLayer { return newDiffLayer(dl, root, id, block, nodes, states) } @@ -378,7 +378,7 @@ func (dl *diskLayer) writeStateHistory(diff *diffLayer) (bool, error) { log.Debug("Skip tail truncation", "persistentID", persistentID, "tailID", tail+1, "headID", diff.stateID(), "limit", limit) return true, nil } - pruned, err := truncateFromTail(dl.db.stateFreezer, newFirst-1) + pruned, err := truncateFromTail(dl.db.stateFreezer, typeStateHistory, newFirst-1) if err != nil { return false, err } @@ -413,7 +413,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { // Merge the trie nodes and flat states of the bottom-most diff layer into the // buffer as the combined layer. - combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet) + combined := dl.buffer.commit(bottom.nodes.nodeSet, bottom.states.stateSet) // Terminate the background state snapshot generation before mutating the // persistent state. diff --git a/triedb/pathdb/execute.go b/triedb/pathdb/execute.go index aa4bd8b44b..4c1cafec12 100644 --- a/triedb/pathdb/execute.go +++ b/triedb/pathdb/execute.go @@ -59,13 +59,19 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, rawStorageKey: rawStorageKey, nodes: trienode.NewMergedNodeSet(), } + var deletes []common.Address for addr, account := range accounts { - var err error if len(account) == 0 { - err = deleteAccount(ctx, db, addr) + deletes = append(deletes, addr) } else { - err = updateAccount(ctx, db, addr) + err := updateAccount(ctx, db, addr) + if err != nil { + return nil, fmt.Errorf("failed to revert state, err: %w", err) + } } + } + for _, addr := range deletes { + err := deleteAccount(ctx, db, addr) if err != nil { return nil, fmt.Errorf("failed to revert state, err: %w", err) } @@ -77,7 +83,7 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, if err := ctx.nodes.Merge(result); err != nil { return nil, err } - return ctx.nodes.Flatten(), nil + return ctx.nodes.Nodes(), nil } // updateAccount the account was present in prev-state, and may or may not @@ -108,17 +114,23 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address) if err != nil { return err } + var deletes []common.Hash for key, val := range ctx.storages[addr] { tkey := key if ctx.rawStorageKey { tkey = crypto.Keccak256Hash(key.Bytes()) } - var err error if len(val) == 0 { - err = st.Delete(tkey.Bytes()) + deletes = append(deletes, tkey) } else { - err = st.Update(tkey.Bytes(), val) + err := st.Update(tkey.Bytes(), val) + if err != nil { + return err + } } + } + for _, tkey := range deletes { + err := st.Delete(tkey.Bytes()) if err != nil { return err } diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index bbedd52f34..81b843d9f1 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -19,11 +19,147 @@ package pathdb import ( "errors" "fmt" + "iter" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) +// historyType represents the category of historical data. +type historyType uint8 + +const ( + // typeStateHistory indicates history data related to account or storage changes. + typeStateHistory historyType = 0 +) + +// String returns the string format representation. +func (h historyType) String() string { + switch h { + case typeStateHistory: + return "state" + default: + return fmt.Sprintf("unknown type: %d", h) + } +} + +// elementType represents the category of state element. +type elementType uint8 + +const ( + typeAccount elementType = 0 // represents the account data + typeStorage elementType = 1 // represents the storage slot data +) + +// String returns the string format representation. +func (e elementType) String() string { + switch e { + case typeAccount: + return "account" + case typeStorage: + return "storage" + default: + return fmt.Sprintf("unknown element type: %d", e) + } +} + +// toHistoryType maps an element type to its corresponding history type. +func toHistoryType(typ elementType) historyType { + if typ == typeAccount || typ == typeStorage { + return typeStateHistory + } + panic(fmt.Sprintf("unknown element type %v", typ)) +} + +// stateIdent represents the identifier of a state element, which can be +// an account or a storage slot. +type stateIdent struct { + typ elementType + + // The hash of the account address. This is used instead of the raw account + // address is to align the traversal order with the Merkle-Patricia-Trie. + addressHash common.Hash + + // The hash of the storage slot key. This is used instead of the raw slot key + // because, in legacy state histories (prior to the Cancun fork), the slot + // identifier is the hash of the key, and the original key (preimage) cannot + // be recovered. To maintain backward compatibility, the key hash is used. + // + // Meanwhile, using the storage key hash also preserve the traversal order + // with Merkle-Patricia-Trie. + // + // This field is null if the identifier refers to an account or a trie node. + storageHash common.Hash +} + +// String returns the string format state identifier. +func (ident stateIdent) String() string { + if ident.typ == typeAccount { + return ident.addressHash.Hex() + } + return ident.addressHash.Hex() + ident.storageHash.Hex() +} + +// newAccountIdent constructs a state identifier for an account. +func newAccountIdent(addressHash common.Hash) stateIdent { + return stateIdent{ + typ: typeAccount, + addressHash: addressHash, + } +} + +// newStorageIdent constructs a state identifier for a storage slot. +// The address denotes the address hash of the associated account; +// the storageHash denotes the hash of the raw storage slot key; +func newStorageIdent(addressHash common.Hash, storageHash common.Hash) stateIdent { + return stateIdent{ + typ: typeStorage, + addressHash: addressHash, + storageHash: storageHash, + } +} + +// stateIdentQuery is the extension of stateIdent by adding the account address +// and raw storage key. +type stateIdentQuery struct { + stateIdent + + address common.Address + storageKey common.Hash +} + +// newAccountIdentQuery constructs a state identifier for an account. +func newAccountIdentQuery(address common.Address, addressHash common.Hash) stateIdentQuery { + return stateIdentQuery{ + stateIdent: newAccountIdent(addressHash), + address: address, + } +} + +// newStorageIdentQuery constructs a state identifier for a storage slot. +// the address denotes the address of the associated account; +// the addressHash denotes the address hash of the associated account; +// the storageKey denotes the raw storage slot key; +// the storageHash denotes the hash of the raw storage slot key; +func newStorageIdentQuery(address common.Address, addressHash common.Hash, storageKey common.Hash, storageHash common.Hash) stateIdentQuery { + return stateIdentQuery{ + stateIdent: newStorageIdent(addressHash, storageHash), + address: address, + storageKey: storageKey, + } +} + +// history defines the interface of historical data, implemented by stateHistory +// and trienodeHistory (in the near future). +type history interface { + // typ returns the historical data type held in the history. + typ() historyType + + // forEach returns an iterator to traverse the state entries in the history. + forEach() iter.Seq[stateIdent] +} + var ( errHeadTruncationOutOfRange = errors.New("history head truncation out of range") errTailTruncationOutOfRange = errors.New("history tail truncation out of range") @@ -31,7 +167,7 @@ var ( // truncateFromHead removes excess elements from the head of the freezer based // on the given parameters. It returns the number of items that were removed. -func truncateFromHead(store ethdb.AncientStore, nhead uint64) (int, error) { +func truncateFromHead(store ethdb.AncientStore, typ historyType, nhead uint64) (int, error) { ohead, err := store.Ancients() if err != nil { return 0, err @@ -40,16 +176,16 @@ func truncateFromHead(store ethdb.AncientStore, nhead uint64) (int, error) { if err != nil { return 0, err } - log.Info("Truncating from head", "ohead", ohead, "tail", otail, "nhead", nhead) - // Ensure that the truncation target falls within the valid range. if ohead < nhead || nhead < otail { - return 0, fmt.Errorf("%w, tail: %d, head: %d, target: %d", errHeadTruncationOutOfRange, otail, ohead, nhead) + return 0, fmt.Errorf("%w, %s, tail: %d, head: %d, target: %d", errHeadTruncationOutOfRange, typ, otail, ohead, nhead) } // Short circuit if nothing to truncate. if ohead == nhead { return 0, nil } + log.Info("Truncating from head", "type", typ.String(), "ohead", ohead, "tail", otail, "nhead", nhead) + ohead, err = store.TruncateHead(nhead) if err != nil { return 0, err @@ -61,7 +197,7 @@ func truncateFromHead(store ethdb.AncientStore, nhead uint64) (int, error) { // truncateFromTail removes excess elements from the end of the freezer based // on the given parameters. It returns the number of items that were removed. -func truncateFromTail(store ethdb.AncientStore, ntail uint64) (int, error) { +func truncateFromTail(store ethdb.AncientStore, typ historyType, ntail uint64) (int, error) { ohead, err := store.Ancients() if err != nil { return 0, err @@ -72,7 +208,7 @@ func truncateFromTail(store ethdb.AncientStore, ntail uint64) (int, error) { } // Ensure that the truncation target falls within the valid range. if otail > ntail || ntail > ohead { - return 0, fmt.Errorf("%w, tail: %d, head: %d, target: %d", errTailTruncationOutOfRange, otail, ohead, ntail) + return 0, fmt.Errorf("%w, %s, tail: %d, head: %d, target: %d", errTailTruncationOutOfRange, typ, otail, ohead, ntail) } // Short circuit if nothing to truncate. if otail == ntail { diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go index e781a898e1..47cee9820d 100644 --- a/triedb/pathdb/history_index.go +++ b/triedb/pathdb/history_index.go @@ -78,12 +78,7 @@ type indexReader struct { // loadIndexData loads the index data associated with the specified state. func loadIndexData(db ethdb.KeyValueReader, state stateIdent) ([]*indexBlockDesc, error) { - var blob []byte - if state.account { - blob = rawdb.ReadAccountHistoryIndex(db, state.addressHash) - } else { - blob = rawdb.ReadStorageHistoryIndex(db, state.addressHash, state.storageHash) - } + blob := readStateIndex(state, db) if len(blob) == 0 { return nil, nil } @@ -137,15 +132,8 @@ func (r *indexReader) readGreaterThan(id uint64) (uint64, error) { br, ok := r.readers[desc.id] if !ok { - var ( - err error - blob []byte - ) - if r.state.account { - blob = rawdb.ReadAccountHistoryIndexBlock(r.db, r.state.addressHash, desc.id) - } else { - blob = rawdb.ReadStorageHistoryIndexBlock(r.db, r.state.addressHash, r.state.storageHash, desc.id) - } + var err error + blob := readStateIndexBlock(r.state, r.db, desc.id) br, err = newBlockReader(blob) if err != nil { return 0, err @@ -174,12 +162,7 @@ type indexWriter struct { // newIndexWriter constructs the index writer for the specified state. func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, error) { - var blob []byte - if state.account { - blob = rawdb.ReadAccountHistoryIndex(db, state.addressHash) - } else { - blob = rawdb.ReadStorageHistoryIndex(db, state.addressHash, state.storageHash) - } + blob := readStateIndex(state, db) if len(blob) == 0 { desc := newIndexBlockDesc(0) bw, _ := newBlockWriter(nil, desc) @@ -194,15 +177,8 @@ func newIndexWriter(db ethdb.KeyValueReader, state stateIdent) (*indexWriter, er if err != nil { return nil, err } - var ( - indexBlock []byte - lastDesc = descList[len(descList)-1] - ) - if state.account { - indexBlock = rawdb.ReadAccountHistoryIndexBlock(db, state.addressHash, lastDesc.id) - } else { - indexBlock = rawdb.ReadStorageHistoryIndexBlock(db, state.addressHash, state.storageHash, lastDesc.id) - } + lastDesc := descList[len(descList)-1] + indexBlock := readStateIndexBlock(state, db, lastDesc.id) bw, err := newBlockWriter(indexBlock, lastDesc) if err != nil { return nil, err @@ -270,11 +246,7 @@ func (w *indexWriter) finish(batch ethdb.Batch) { return // nothing to commit } for _, bw := range writers { - if w.state.account { - rawdb.WriteAccountHistoryIndexBlock(batch, w.state.addressHash, bw.desc.id, bw.finish()) - } else { - rawdb.WriteStorageHistoryIndexBlock(batch, w.state.addressHash, w.state.storageHash, bw.desc.id, bw.finish()) - } + writeStateIndexBlock(w.state, batch, bw.desc.id, bw.finish()) } w.frozen = nil // release all the frozen writers @@ -282,11 +254,7 @@ func (w *indexWriter) finish(batch ethdb.Batch) { for _, desc := range descList { buf = append(buf, desc.encode()...) } - if w.state.account { - rawdb.WriteAccountHistoryIndex(batch, w.state.addressHash, buf) - } else { - rawdb.WriteStorageHistoryIndex(batch, w.state.addressHash, w.state.storageHash, buf) - } + writeStateIndex(w.state, batch, buf) } // indexDeleter is responsible for deleting index data for a specific state. @@ -301,12 +269,7 @@ type indexDeleter struct { // newIndexDeleter constructs the index deleter for the specified state. func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter, error) { - var blob []byte - if state.account { - blob = rawdb.ReadAccountHistoryIndex(db, state.addressHash) - } else { - blob = rawdb.ReadStorageHistoryIndex(db, state.addressHash, state.storageHash) - } + blob := readStateIndex(state, db) if len(blob) == 0 { // TODO(rjl493456442) we can probably return an error here, // deleter with no data is meaningless. @@ -323,15 +286,8 @@ func newIndexDeleter(db ethdb.KeyValueReader, state stateIdent) (*indexDeleter, if err != nil { return nil, err } - var ( - indexBlock []byte - lastDesc = descList[len(descList)-1] - ) - if state.account { - indexBlock = rawdb.ReadAccountHistoryIndexBlock(db, state.addressHash, lastDesc.id) - } else { - indexBlock = rawdb.ReadStorageHistoryIndexBlock(db, state.addressHash, state.storageHash, lastDesc.id) - } + lastDesc := descList[len(descList)-1] + indexBlock := readStateIndexBlock(state, db, lastDesc.id) bw, err := newBlockWriter(indexBlock, lastDesc) if err != nil { return nil, err @@ -376,15 +332,8 @@ func (d *indexDeleter) pop(id uint64) error { d.descList = d.descList[:len(d.descList)-1] // Open the previous block writer for deleting - var ( - indexBlock []byte - lastDesc = d.descList[len(d.descList)-1] - ) - if d.state.account { - indexBlock = rawdb.ReadAccountHistoryIndexBlock(d.db, d.state.addressHash, lastDesc.id) - } else { - indexBlock = rawdb.ReadStorageHistoryIndexBlock(d.db, d.state.addressHash, d.state.storageHash, lastDesc.id) - } + lastDesc := d.descList[len(d.descList)-1] + indexBlock := readStateIndexBlock(d.state, d.db, lastDesc.id) bw, err := newBlockWriter(indexBlock, lastDesc) if err != nil { return err @@ -399,38 +348,100 @@ func (d *indexDeleter) pop(id uint64) error { // This function is safe to be called multiple times. func (d *indexDeleter) finish(batch ethdb.Batch) { for _, id := range d.dropped { - if d.state.account { - rawdb.DeleteAccountHistoryIndexBlock(batch, d.state.addressHash, id) - } else { - rawdb.DeleteStorageHistoryIndexBlock(batch, d.state.addressHash, d.state.storageHash, id) - } + deleteStateIndexBlock(d.state, batch, id) } d.dropped = nil // Flush the content of last block writer, regardless it's dirty or not if !d.bw.empty() { - if d.state.account { - rawdb.WriteAccountHistoryIndexBlock(batch, d.state.addressHash, d.bw.desc.id, d.bw.finish()) - } else { - rawdb.WriteStorageHistoryIndexBlock(batch, d.state.addressHash, d.state.storageHash, d.bw.desc.id, d.bw.finish()) - } + writeStateIndexBlock(d.state, batch, d.bw.desc.id, d.bw.finish()) } // Flush the index metadata into the supplied batch if d.empty() { - if d.state.account { - rawdb.DeleteAccountHistoryIndex(batch, d.state.addressHash) - } else { - rawdb.DeleteStorageHistoryIndex(batch, d.state.addressHash, d.state.storageHash) - } + deleteStateIndex(d.state, batch) } else { buf := make([]byte, 0, indexBlockDescSize*len(d.descList)) for _, desc := range d.descList { buf = append(buf, desc.encode()...) } - if d.state.account { - rawdb.WriteAccountHistoryIndex(batch, d.state.addressHash, buf) - } else { - rawdb.WriteStorageHistoryIndex(batch, d.state.addressHash, d.state.storageHash, buf) - } + writeStateIndex(d.state, batch, buf) + } +} + +// readStateIndex retrieves the index metadata for the given state identifier. +// This function is shared by accounts and storage slots. +func readStateIndex(ident stateIdent, db ethdb.KeyValueReader) []byte { + switch ident.typ { + case typeAccount: + return rawdb.ReadAccountHistoryIndex(db, ident.addressHash) + case typeStorage: + return rawdb.ReadStorageHistoryIndex(db, ident.addressHash, ident.storageHash) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) + } +} + +// writeStateIndex writes the provided index metadata into database with the +// given state identifier. This function is shared by accounts and storage slots. +func writeStateIndex(ident stateIdent, db ethdb.KeyValueWriter, data []byte) { + switch ident.typ { + case typeAccount: + rawdb.WriteAccountHistoryIndex(db, ident.addressHash, data) + case typeStorage: + rawdb.WriteStorageHistoryIndex(db, ident.addressHash, ident.storageHash, data) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) + } +} + +// deleteStateIndex removes the index metadata for the given state identifier. +// This function is shared by accounts and storage slots. +func deleteStateIndex(ident stateIdent, db ethdb.KeyValueWriter) { + switch ident.typ { + case typeAccount: + rawdb.DeleteAccountHistoryIndex(db, ident.addressHash) + case typeStorage: + rawdb.DeleteStorageHistoryIndex(db, ident.addressHash, ident.storageHash) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) + } +} + +// readStateIndexBlock retrieves the index block for the given state identifier +// and block ID. This function is shared by accounts and storage slots. +func readStateIndexBlock(ident stateIdent, db ethdb.KeyValueReader, id uint32) []byte { + switch ident.typ { + case typeAccount: + return rawdb.ReadAccountHistoryIndexBlock(db, ident.addressHash, id) + case typeStorage: + return rawdb.ReadStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) + } +} + +// writeStateIndexBlock writes the provided index block into database with the +// given state identifier. This function is shared by accounts and storage slots. +func writeStateIndexBlock(ident stateIdent, db ethdb.KeyValueWriter, id uint32, data []byte) { + switch ident.typ { + case typeAccount: + rawdb.WriteAccountHistoryIndexBlock(db, ident.addressHash, id, data) + case typeStorage: + rawdb.WriteStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id, data) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) + } +} + +// deleteStateIndexBlock removes the index block from database with the given +// state identifier. This function is shared by accounts and storage slots. +func deleteStateIndexBlock(ident stateIdent, db ethdb.KeyValueWriter, id uint32) { + switch ident.typ { + case typeAccount: + rawdb.DeleteAccountHistoryIndexBlock(db, ident.addressHash, id) + case typeStorage: + rawdb.DeleteStorageHistoryIndexBlock(db, ident.addressHash, ident.storageHash, id) + default: + panic(fmt.Errorf("unknown type: %v", ident.typ)) } } diff --git a/triedb/pathdb/history_index_test.go b/triedb/pathdb/history_index_test.go index c83c33ffbd..be9b7c4049 100644 --- a/triedb/pathdb/history_index_test.go +++ b/triedb/pathdb/history_index_test.go @@ -179,7 +179,7 @@ func TestIndexWriterDelete(t *testing.T) { func TestBatchIndexerWrite(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() - batch = newBatchIndexer(db, false) + batch = newBatchIndexer(db, false, typeStateHistory) histories = makeStateHistories(10) ) for i, h := range histories { @@ -190,7 +190,7 @@ func TestBatchIndexerWrite(t *testing.T) { if err := batch.finish(true); err != nil { t.Fatalf("Failed to finish batch indexer, %v", err) } - metadata := loadIndexMetadata(db) + metadata := loadIndexMetadata(db, typeStateHistory) if metadata == nil || metadata.Last != uint64(10) { t.Fatal("Unexpected index position") } @@ -256,7 +256,7 @@ func TestBatchIndexerWrite(t *testing.T) { func TestBatchIndexerDelete(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() - bw = newBatchIndexer(db, false) + bw = newBatchIndexer(db, false, typeStateHistory) histories = makeStateHistories(10) ) // Index histories @@ -270,7 +270,7 @@ func TestBatchIndexerDelete(t *testing.T) { } // Unindex histories - bd := newBatchIndexer(db, true) + bd := newBatchIndexer(db, true, typeStateHistory) for i := len(histories) - 1; i >= 0; i-- { if err := bd.process(histories[i], uint64(i+1)); err != nil { t.Fatalf("Failed to process history, %v", err) @@ -280,7 +280,7 @@ func TestBatchIndexerDelete(t *testing.T) { t.Fatalf("Failed to finish batch indexer, %v", err) } - metadata := loadIndexMetadata(db) + metadata := loadIndexMetadata(db, typeStateHistory) if metadata != nil { t.Fatal("Unexpected index position") } diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go index 14b9af5367..d618585929 100644 --- a/triedb/pathdb/history_indexer.go +++ b/triedb/pathdb/history_indexer.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -41,13 +40,32 @@ const ( stateIndexVersion = stateIndexV0 // the current state index version ) +// indexVersion returns the latest index version for the given history type. +// It panics if the history type is unknown. +func indexVersion(typ historyType) uint8 { + switch typ { + case typeStateHistory: + return stateIndexVersion + default: + panic(fmt.Errorf("unknown history type: %d", typ)) + } +} + +// indexMetadata describes the metadata of the historical data index. type indexMetadata struct { Version uint8 Last uint64 } -func loadIndexMetadata(db ethdb.KeyValueReader) *indexMetadata { - blob := rawdb.ReadStateHistoryIndexMetadata(db) +// loadIndexMetadata reads the metadata of the specific history index. +func loadIndexMetadata(db ethdb.KeyValueReader, typ historyType) *indexMetadata { + var blob []byte + switch typ { + case typeStateHistory: + blob = rawdb.ReadStateHistoryIndexMetadata(db) + default: + panic(fmt.Errorf("unknown history type %d", typ)) + } if len(blob) == 0 { return nil } @@ -59,91 +77,94 @@ func loadIndexMetadata(db ethdb.KeyValueReader) *indexMetadata { return &m } -func storeIndexMetadata(db ethdb.KeyValueWriter, last uint64) { - var m indexMetadata - m.Version = stateIndexVersion - m.Last = last +// storeIndexMetadata stores the metadata of the specific history index. +func storeIndexMetadata(db ethdb.KeyValueWriter, typ historyType, last uint64) { + m := indexMetadata{ + Version: indexVersion(typ), + Last: last, + } blob, err := rlp.EncodeToBytes(m) if err != nil { - log.Crit("Failed to encode index metadata", "err", err) + panic(fmt.Errorf("fail to encode index metadata, %v", err)) } - rawdb.WriteStateHistoryIndexMetadata(db, blob) + switch typ { + case typeStateHistory: + rawdb.WriteStateHistoryIndexMetadata(db, blob) + default: + panic(fmt.Errorf("unknown history type %d", typ)) + } + log.Debug("Written index metadata", "type", typ, "last", last) } -// batchIndexer is a structure designed to perform batch indexing or unindexing -// of state histories atomically. +// deleteIndexMetadata deletes the metadata of the specific history index. +func deleteIndexMetadata(db ethdb.KeyValueWriter, typ historyType) { + switch typ { + case typeStateHistory: + rawdb.DeleteStateHistoryIndexMetadata(db) + default: + panic(fmt.Errorf("unknown history type %d", typ)) + } + log.Debug("Deleted index metadata", "type", typ) +} + +// batchIndexer is responsible for performing batch indexing or unindexing +// of historical data (e.g., state or trie node changes) atomically. type batchIndexer struct { - accounts map[common.Hash][]uint64 // History ID list, Keyed by the hash of account address - storages map[common.Hash]map[common.Hash][]uint64 // History ID list, Keyed by the hash of account address and the hash of raw storage key - counter int // The counter of processed states - delete bool // Index or unindex mode - lastID uint64 // The ID of latest processed history - db ethdb.KeyValueStore + index map[stateIdent][]uint64 // List of history IDs for tracked state entry + pending int // Number of entries processed in the current batch. + delete bool // Operation mode: true for unindex, false for index. + lastID uint64 // ID of the most recently processed history. + typ historyType // Type of history being processed (e.g., state or trienode). + db ethdb.KeyValueStore // Key-value database used to store or delete index data. } // newBatchIndexer constructs the batch indexer with the supplied mode. -func newBatchIndexer(db ethdb.KeyValueStore, delete bool) *batchIndexer { +func newBatchIndexer(db ethdb.KeyValueStore, delete bool, typ historyType) *batchIndexer { return &batchIndexer{ - accounts: make(map[common.Hash][]uint64), - storages: make(map[common.Hash]map[common.Hash][]uint64), - delete: delete, - db: db, + index: make(map[stateIdent][]uint64), + delete: delete, + typ: typ, + db: db, } } -// process iterates through the accounts and their associated storage slots in the -// state history, tracking the mapping between state and history IDs. -func (b *batchIndexer) process(h *stateHistory, historyID uint64) error { - for _, address := range h.accountList { - addrHash := crypto.Keccak256Hash(address.Bytes()) - b.counter += 1 - b.accounts[addrHash] = append(b.accounts[addrHash], historyID) - - for _, slotKey := range h.storageList[address] { - b.counter += 1 - if _, ok := b.storages[addrHash]; !ok { - b.storages[addrHash] = make(map[common.Hash][]uint64) - } - // The hash of the storage slot key is used as the identifier because the - // legacy history does not include the raw storage key, therefore, the - // conversion from storage key to hash is necessary for non-v0 histories. - slotHash := slotKey - if h.meta.version != stateHistoryV0 { - slotHash = crypto.Keccak256Hash(slotKey.Bytes()) - } - b.storages[addrHash][slotHash] = append(b.storages[addrHash][slotHash], historyID) - } +// process traverses the state entries within the provided history and tracks the mutation +// records for them. +func (b *batchIndexer) process(h history, id uint64) error { + for ident := range h.forEach() { + b.index[ident] = append(b.index[ident], id) + b.pending++ } - b.lastID = historyID + b.lastID = id + return b.finish(false) } // finish writes the accumulated state indexes into the disk if either the // memory limitation is reached or it's requested forcibly. func (b *batchIndexer) finish(force bool) error { - if b.counter == 0 { + if b.pending == 0 { return nil } - if !force && b.counter < historyIndexBatch { + if !force && b.pending < historyIndexBatch { return nil } var ( - batch = b.db.NewBatch() - batchMu sync.RWMutex - storages int - start = time.Now() - eg errgroup.Group + batch = b.db.NewBatch() + batchMu sync.RWMutex + start = time.Now() + eg errgroup.Group ) eg.SetLimit(runtime.NumCPU()) - for addrHash, idList := range b.accounts { + for ident, list := range b.index { eg.Go(func() error { if !b.delete { - iw, err := newIndexWriter(b.db, newAccountIdent(addrHash)) + iw, err := newIndexWriter(b.db, ident) if err != nil { return err } - for _, n := range idList { + for _, n := range list { if err := iw.append(n); err != nil { return err } @@ -152,11 +173,11 @@ func (b *batchIndexer) finish(force bool) error { iw.finish(batch) batchMu.Unlock() } else { - id, err := newIndexDeleter(b.db, newAccountIdent(addrHash)) + id, err := newIndexDeleter(b.db, ident) if err != nil { return err } - for _, n := range idList { + for _, n := range list { if err := id.pop(n); err != nil { return err } @@ -168,72 +189,36 @@ func (b *batchIndexer) finish(force bool) error { return nil }) } - for addrHash, slots := range b.storages { - storages += len(slots) - for storageHash, idList := range slots { - eg.Go(func() error { - if !b.delete { - iw, err := newIndexWriter(b.db, newStorageIdent(addrHash, storageHash)) - if err != nil { - return err - } - for _, n := range idList { - if err := iw.append(n); err != nil { - return err - } - } - batchMu.Lock() - iw.finish(batch) - batchMu.Unlock() - } else { - id, err := newIndexDeleter(b.db, newStorageIdent(addrHash, storageHash)) - if err != nil { - return err - } - for _, n := range idList { - if err := id.pop(n); err != nil { - return err - } - } - batchMu.Lock() - id.finish(batch) - batchMu.Unlock() - } - return nil - }) - } - } if err := eg.Wait(); err != nil { return err } // Update the position of last indexed state history if !b.delete { - storeIndexMetadata(batch, b.lastID) + storeIndexMetadata(batch, b.typ, b.lastID) } else { if b.lastID == 1 { - rawdb.DeleteStateHistoryIndexMetadata(batch) + deleteIndexMetadata(batch, b.typ) } else { - storeIndexMetadata(batch, b.lastID-1) + storeIndexMetadata(batch, b.typ, b.lastID-1) } } if err := batch.Write(); err != nil { return err } - log.Debug("Committed batch indexer", "accounts", len(b.accounts), "storages", storages, "records", b.counter, "elapsed", common.PrettyDuration(time.Since(start))) - b.counter = 0 - b.accounts = make(map[common.Hash][]uint64) - b.storages = make(map[common.Hash]map[common.Hash][]uint64) + log.Debug("Committed batch indexer", "type", b.typ, "entries", len(b.index), "records", b.pending, "elapsed", common.PrettyDuration(time.Since(start))) + b.pending = 0 + b.index = make(map[stateIdent][]uint64) return nil } // indexSingle processes the state history with the specified ID for indexing. -func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader) error { +func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader, typ historyType) error { start := time.Now() defer func() { indexHistoryTimer.UpdateSince(start) }() - metadata := loadIndexMetadata(db) + metadata := loadIndexMetadata(db, typ) if metadata == nil || metadata.Last+1 != historyID { last := "null" if metadata != nil { @@ -241,29 +226,37 @@ func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancient } return fmt.Errorf("history indexing is out of order, last: %s, requested: %d", last, historyID) } - h, err := readStateHistory(freezer, historyID) + var ( + err error + h history + b = newBatchIndexer(db, false, typ) + ) + if typ == typeStateHistory { + h, err = readStateHistory(freezer, historyID) + } else { + // h, err = readTrienodeHistory(freezer, historyID) + } if err != nil { return err } - b := newBatchIndexer(db, false) if err := b.process(h, historyID); err != nil { return err } if err := b.finish(true); err != nil { return err } - log.Debug("Indexed state history", "id", historyID, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Indexed history", "type", typ, "id", historyID, "elapsed", common.PrettyDuration(time.Since(start))) return nil } // unindexSingle processes the state history with the specified ID for unindexing. -func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader) error { +func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.AncientReader, typ historyType) error { start := time.Now() defer func() { unindexHistoryTimer.UpdateSince(start) }() - metadata := loadIndexMetadata(db) + metadata := loadIndexMetadata(db, typ) if metadata == nil || metadata.Last != historyID { last := "null" if metadata != nil { @@ -271,18 +264,26 @@ func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancie } return fmt.Errorf("history unindexing is out of order, last: %s, requested: %d", last, historyID) } - h, err := readStateHistory(freezer, historyID) + var ( + err error + h history + ) + b := newBatchIndexer(db, true, typ) + if typ == typeStateHistory { + h, err = readStateHistory(freezer, historyID) + } else { + // h, err = readTrienodeHistory(freezer, historyID) + } if err != nil { return err } - b := newBatchIndexer(db, true) if err := b.process(h, historyID); err != nil { return err } if err := b.finish(true); err != nil { return err } - log.Debug("Unindexed state history", "id", historyID, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Unindexed history", "type", typ, "id", historyID, "elapsed", common.PrettyDuration(time.Since(start))) return nil } @@ -305,6 +306,8 @@ type indexIniter struct { interrupt chan *interruptSignal done chan struct{} closed chan struct{} + typ historyType + log log.Logger // Contextual logger with the history type injected // indexing progress indexed atomic.Uint64 // the id of latest indexed state @@ -313,24 +316,33 @@ type indexIniter struct { wg sync.WaitGroup } -func newIndexIniter(disk ethdb.KeyValueStore, freezer ethdb.AncientStore, lastID uint64) *indexIniter { +func newIndexIniter(disk ethdb.KeyValueStore, freezer ethdb.AncientStore, typ historyType, lastID uint64) *indexIniter { initer := &indexIniter{ disk: disk, freezer: freezer, interrupt: make(chan *interruptSignal), done: make(chan struct{}), closed: make(chan struct{}), + typ: typ, + log: log.New("type", typ.String()), } // Load indexing progress + var recover bool initer.last.Store(lastID) - metadata := loadIndexMetadata(disk) + metadata := loadIndexMetadata(disk, typ) if metadata != nil { initer.indexed.Store(metadata.Last) + recover = metadata.Last > lastID } // Launch background indexer initer.wg.Add(1) - go initer.run(lastID) + if recover { + log.Info("History indexer is recovering", "history", lastID, "indexed", metadata.Last) + go initer.recover(lastID) + } else { + go initer.run(lastID) + } return initer } @@ -364,8 +376,8 @@ func (i *indexIniter) remain() uint64 { default: last, indexed := i.last.Load(), i.indexed.Load() if last < indexed { - log.Error("Invalid state indexing range", "last", last, "indexed", indexed) - return 0 + i.log.Warn("State indexer is in recovery", "indexed", indexed, "last", last) + return indexed - last } return last - indexed } @@ -382,7 +394,7 @@ func (i *indexIniter) run(lastID uint64) { // checkDone indicates whether all requested state histories // have been fully indexed. checkDone = func() bool { - metadata := loadIndexMetadata(i.disk) + metadata := loadIndexMetadata(i.disk, i.typ) return metadata != nil && metadata.Last == lastID } ) @@ -404,7 +416,7 @@ func (i *indexIniter) run(lastID uint64) { if newLastID == lastID+1 { lastID = newLastID signal.result <- nil - log.Debug("Extended state history range", "last", lastID) + i.log.Debug("Extended history range", "last", lastID) continue } // The index limit is shortened by one, interrupt the current background @@ -415,14 +427,14 @@ func (i *indexIniter) run(lastID uint64) { // If all state histories, including the one to be reverted, have // been fully indexed, unindex it here and shut down the initializer. if checkDone() { - log.Info("Truncate the extra history", "id", lastID) - if err := unindexSingle(lastID, i.disk, i.freezer); err != nil { + i.log.Info("Truncate the extra history", "id", lastID) + if err := unindexSingle(lastID, i.disk, i.freezer, i.typ); err != nil { signal.result <- err return } close(i.done) signal.result <- nil - log.Info("State histories have been fully indexed", "last", lastID-1) + i.log.Info("Histories have been fully indexed", "last", lastID-1) return } // Adjust the indexing target and relaunch the process @@ -431,12 +443,12 @@ func (i *indexIniter) run(lastID uint64) { done, interrupt = make(chan struct{}), new(atomic.Int32) go i.index(done, interrupt, lastID) - log.Debug("Shortened state history range", "last", lastID) + i.log.Debug("Shortened history range", "last", lastID) case <-done: if checkDone() { close(i.done) - log.Info("State histories have been fully indexed", "last", lastID) + i.log.Info("Histories have been fully indexed", "last", lastID) return } // Relaunch the background runner if some tasks are left @@ -445,7 +457,7 @@ func (i *indexIniter) run(lastID uint64) { case <-i.closed: interrupt.Store(1) - log.Info("Waiting background history index initer to exit") + i.log.Info("Waiting background history index initer to exit") <-done if checkDone() { @@ -465,14 +477,14 @@ func (i *indexIniter) next() (uint64, error) { tailID := tail + 1 // compute the id of the oldest history // Start indexing from scratch if nothing has been indexed - metadata := loadIndexMetadata(i.disk) + metadata := loadIndexMetadata(i.disk, i.typ) if metadata == nil { - log.Debug("Initialize state history indexing from scratch", "id", tailID) + i.log.Debug("Initialize history indexing from scratch", "id", tailID) return tailID, nil } // Resume indexing from the last interrupted position if metadata.Last+1 >= tailID { - log.Debug("Resume state history indexing", "id", metadata.Last+1, "tail", tailID) + i.log.Debug("Resume history indexing", "id", metadata.Last+1, "tail", tailID) return metadata.Last + 1, nil } // History has been shortened without indexing. Discard the gapped segment @@ -480,7 +492,7 @@ func (i *indexIniter) next() (uint64, error) { // // The missing indexes corresponding to the gapped histories won't be visible. // It's fine to leave them unindexed. - log.Info("History gap detected, discard old segment", "oldHead", metadata.Last, "newHead", tailID) + i.log.Info("History gap detected, discard old segment", "oldHead", metadata.Last, "newHead", tailID) return tailID, nil } @@ -489,7 +501,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID beginID, err := i.next() if err != nil { - log.Error("Failed to find next state history for indexing", "err", err) + i.log.Error("Failed to find next history for indexing", "err", err) return } // All available state histories have been indexed, and the last indexed one @@ -504,36 +516,47 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID // // This step is essential to avoid spinning up indexing thread // endlessly until a history object is produced. - storeIndexMetadata(i.disk, 0) - log.Info("Initialized history indexing flag") + storeIndexMetadata(i.disk, i.typ, 0) + i.log.Info("Initialized history indexing flag") } else { - log.Debug("State history is fully indexed", "last", lastID) + i.log.Debug("History is fully indexed", "last", lastID) } return } - log.Info("Start history indexing", "beginID", beginID, "lastID", lastID) + i.log.Info("Start history indexing", "beginID", beginID, "lastID", lastID) var ( current = beginID start = time.Now() logged = time.Now() - batch = newBatchIndexer(i.disk, false) + batch = newBatchIndexer(i.disk, false, i.typ) ) for current <= lastID { count := lastID - current + 1 if count > historyReadBatch { count = historyReadBatch } - histories, err := readStateHistories(i.freezer, current, count) - if err != nil { - // The history read might fall if the history is truncated from - // head due to revert operation. - log.Error("Failed to read history for indexing", "current", current, "count", count, "err", err) - return + var histories []history + if i.typ == typeStateHistory { + histories, err = readStateHistories(i.freezer, current, count) + if err != nil { + // The history read might fall if the history is truncated from + // head due to revert operation. + i.log.Error("Failed to read history for indexing", "current", current, "count", count, "err", err) + return + } + } else { + // histories, err = readTrienodeHistories(i.freezer, current, count) + // if err != nil { + // // The history read might fall if the history is truncated from + // // head due to revert operation. + // i.log.Error("Failed to read history for indexing", "current", current, "count", count, "err", err) + // return + // } } for _, h := range histories { if err := batch.process(h, current); err != nil { - log.Error("Failed to index history", "err", err) + i.log.Error("Failed to index history", "err", err) return } current += 1 @@ -547,7 +570,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID done = current - beginID ) eta := common.CalculateETA(done, left, time.Since(start)) - log.Info("Indexing state history", "processed", done, "left", left, "elapsed", common.PrettyDuration(time.Since(start)), "eta", common.PrettyDuration(eta)) + i.log.Info("Indexing state history", "processed", done, "left", left, "elapsed", common.PrettyDuration(time.Since(start)), "eta", common.PrettyDuration(eta)) } } i.indexed.Store(current - 1) // update indexing progress @@ -556,7 +579,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID if interrupt != nil { if signal := interrupt.Load(); signal != 0 { if err := batch.finish(true); err != nil { - log.Error("Failed to flush index", "err", err) + i.log.Error("Failed to flush index", "err", err) } log.Info("State indexing interrupted") return @@ -564,9 +587,52 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID } } if err := batch.finish(true); err != nil { - log.Error("Failed to flush index", "err", err) + i.log.Error("Failed to flush index", "err", err) + } + i.log.Info("Indexed history", "from", beginID, "to", lastID, "elapsed", common.PrettyDuration(time.Since(start))) +} + +// recover handles unclean shutdown recovery. After an unclean shutdown, any +// extra histories are typically truncated, while the corresponding history index +// entries may still have been written. Ideally, we would unindex these histories +// in reverse order, but there is no guarantee that the required histories will +// still be available. +// +// As a workaround, indexIniter waits until the missing histories are regenerated +// by chain recovery, under the assumption that the recovered histories will be +// identical to the lost ones. Fork-awareness should be added in the future to +// correctly handle histories affected by reorgs. +func (i *indexIniter) recover(lastID uint64) { + defer i.wg.Done() + + for { + select { + case signal := <-i.interrupt: + newLastID := signal.newLastID + if newLastID != lastID+1 && newLastID != lastID-1 { + signal.result <- fmt.Errorf("invalid history id, last: %d, got: %d", lastID, newLastID) + continue + } + + // Update the last indexed flag + lastID = newLastID + signal.result <- nil + i.last.Store(newLastID) + i.log.Debug("Updated history index flag", "last", lastID) + + // Terminate the recovery routine once the histories are fully aligned + // with the index data, indicating that index initialization is complete. + metadata := loadIndexMetadata(i.disk, i.typ) + if metadata != nil && metadata.Last == lastID { + close(i.done) + i.log.Info("History indexer is recovered", "last", lastID) + return + } + + case <-i.closed: + return + } } - log.Info("Indexed state history", "from", beginID, "to", lastID, "elapsed", common.PrettyDuration(time.Since(start))) } // historyIndexer manages the indexing and unindexing of state histories, @@ -581,21 +647,31 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID // state history. type historyIndexer struct { initer *indexIniter + typ historyType disk ethdb.KeyValueStore freezer ethdb.AncientStore } // checkVersion checks whether the index data in the database matches the version. -func checkVersion(disk ethdb.KeyValueStore) { - blob := rawdb.ReadStateHistoryIndexMetadata(disk) +func checkVersion(disk ethdb.KeyValueStore, typ historyType) { + var blob []byte + if typ == typeStateHistory { + blob = rawdb.ReadStateHistoryIndexMetadata(disk) + } else { + panic(fmt.Errorf("unknown history type: %v", typ)) + } + // Short circuit if metadata is not found, re-index is required + // from scratch. if len(blob) == 0 { return } + // Short circuit if the metadata is found and the version is matched var m indexMetadata err := rlp.DecodeBytes(blob, &m) if err == nil && m.Version == stateIndexVersion { return } + // Version is not matched, prune the existing data and re-index from scratch version := "unknown" if err == nil { version = fmt.Sprintf("%d", m.Version) @@ -612,10 +688,11 @@ func checkVersion(disk ethdb.KeyValueStore) { // newHistoryIndexer constructs the history indexer and launches the background // initer to complete the indexing of any remaining state histories. -func newHistoryIndexer(disk ethdb.KeyValueStore, freezer ethdb.AncientStore, lastHistoryID uint64) *historyIndexer { - checkVersion(disk) +func newHistoryIndexer(disk ethdb.KeyValueStore, freezer ethdb.AncientStore, lastHistoryID uint64, typ historyType) *historyIndexer { + checkVersion(disk, typ) return &historyIndexer{ - initer: newIndexIniter(disk, freezer, lastHistoryID), + initer: newIndexIniter(disk, freezer, typ, lastHistoryID), + typ: typ, disk: disk, freezer: freezer, } @@ -643,7 +720,7 @@ func (i *historyIndexer) extend(historyID uint64) error { case <-i.initer.closed: return errors.New("indexer is closed") case <-i.initer.done: - return indexSingle(historyID, i.disk, i.freezer) + return indexSingle(historyID, i.disk, i.freezer, i.typ) case i.initer.interrupt <- signal: return <-signal.result } @@ -660,7 +737,7 @@ func (i *historyIndexer) shorten(historyID uint64) error { case <-i.initer.closed: return errors.New("indexer is closed") case <-i.initer.done: - return unindexSingle(historyID, i.disk, i.freezer) + return unindexSingle(historyID, i.disk, i.freezer, i.typ) case i.initer.interrupt <- signal: return <-signal.result } diff --git a/triedb/pathdb/history_indexer_test.go b/triedb/pathdb/history_indexer_test.go index 96c87ccb1b..f333d18d8b 100644 --- a/triedb/pathdb/history_indexer_test.go +++ b/triedb/pathdb/history_indexer_test.go @@ -38,7 +38,7 @@ func TestHistoryIndexerShortenDeadlock(t *testing.T) { rawdb.WriteStateHistory(freezer, uint64(i+1), h.meta.encode(), accountIndex, storageIndex, accountData, storageData) } // As a workaround, assign a future block to keep the initer running indefinitely - indexer := newHistoryIndexer(db, freezer, 200) + indexer := newHistoryIndexer(db, freezer, 200, typeStateHistory) defer indexer.close() done := make(chan error, 1) diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go index a11297b3f6..ce6aa693d1 100644 --- a/triedb/pathdb/history_reader.go +++ b/triedb/pathdb/history_reader.go @@ -29,88 +29,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" ) -// stateIdent represents the identifier of a state element, which can be -// either an account or a storage slot. -type stateIdent struct { - account bool - - // The hash of the account address. This is used instead of the raw account - // address is to align the traversal order with the Merkle-Patricia-Trie. - addressHash common.Hash - - // The hash of the storage slot key. This is used instead of the raw slot key - // because, in legacy state histories (prior to the Cancun fork), the slot - // identifier is the hash of the key, and the original key (preimage) cannot - // be recovered. To maintain backward compatibility, the key hash is used. - // - // Meanwhile, using the storage key hash also preserve the traversal order - // with Merkle-Patricia-Trie. - // - // This field is null if the identifier refers to account data. - storageHash common.Hash -} - -// String returns the string format state identifier. -func (ident stateIdent) String() string { - if ident.account { - return ident.addressHash.Hex() - } - return ident.addressHash.Hex() + ident.storageHash.Hex() -} - -// newAccountIdent constructs a state identifier for an account. -func newAccountIdent(addressHash common.Hash) stateIdent { - return stateIdent{ - account: true, - addressHash: addressHash, - } -} - -// newStorageIdent constructs a state identifier for a storage slot. -// The address denotes the address of the associated account; -// the storageHash denotes the hash of the raw storage slot key; -func newStorageIdent(addressHash common.Hash, storageHash common.Hash) stateIdent { - return stateIdent{ - addressHash: addressHash, - storageHash: storageHash, - } -} - -// stateIdentQuery is the extension of stateIdent by adding the raw storage key. -type stateIdentQuery struct { - stateIdent - - address common.Address - storageKey common.Hash -} - -// newAccountIdentQuery constructs a state identifier for an account. -func newAccountIdentQuery(address common.Address, addressHash common.Hash) stateIdentQuery { - return stateIdentQuery{ - stateIdent: stateIdent{ - account: true, - addressHash: addressHash, - }, - address: address, - } -} - -// newStorageIdentQuery constructs a state identifier for a storage slot. -// the address denotes the address of the associated account; -// the addressHash denotes the address hash of the associated account; -// the storageKey denotes the raw storage slot key; -// the storageHash denotes the hash of the raw storage slot key; -func newStorageIdentQuery(address common.Address, addressHash common.Hash, storageKey common.Hash, storageHash common.Hash) stateIdentQuery { - return stateIdentQuery{ - stateIdent: stateIdent{ - addressHash: addressHash, - storageHash: storageHash, - }, - address: address, - storageKey: storageKey, - } -} - // indexReaderWithLimitTag is a wrapper around indexReader that includes an // additional index position. This position represents the ID of the last // indexed state history at the time the reader was created, implying that @@ -169,7 +87,7 @@ func (r *indexReaderWithLimitTag) readGreaterThan(id uint64, lastID uint64) (uin // Given that it's very unlikely to occur and users try to perform historical // state queries while reverting the states at the same time. Simply returning // an error should be sufficient for now. - metadata := loadIndexMetadata(r.db) + metadata := loadIndexMetadata(r.db, toHistoryType(r.reader.state.typ)) if metadata == nil || metadata.Last < lastID { return 0, errors.New("state history hasn't been indexed yet") } @@ -331,7 +249,7 @@ func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint6 // To serve the request, all state histories from stateID+1 to lastID // must be indexed. It's not supposed to happen unless system is very // wrong. - metadata := loadIndexMetadata(r.disk) + metadata := loadIndexMetadata(r.disk, toHistoryType(state.typ)) if metadata == nil || metadata.Last < lastID { indexed := "null" if metadata != nil { @@ -364,7 +282,7 @@ func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint6 // that the associated state histories are no longer available due to a rollback. // Such truncation should be captured by the state resolver below, rather than returning // invalid data. - if state.account { + if state.typ == typeAccount { return r.readAccount(state.address, historyID) } return r.readStorage(state.address, state.storageKey, state.storageHash, historyID) diff --git a/triedb/pathdb/history_reader_test.go b/triedb/pathdb/history_reader_test.go index 9028a886ce..3e1a545ff3 100644 --- a/triedb/pathdb/history_reader_test.go +++ b/triedb/pathdb/history_reader_test.go @@ -29,7 +29,7 @@ import ( func waitIndexing(db *Database) { for { - metadata := loadIndexMetadata(db.diskdb) + metadata := loadIndexMetadata(db.diskdb, typeStateHistory) if metadata != nil && metadata.Last >= db.tree.bottom().stateID() { return } @@ -144,8 +144,13 @@ func testHistoryReader(t *testing.T, historyLimit uint64) { maxDiffLayers = 128 }() - // log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) - env := newTester(t, historyLimit, false, 64, true, "") + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) + config := &testerConfig{ + stateHistory: historyLimit, + layers: 64, + enableIndex: true, + } + env := newTester(t, config) defer env.release() waitIndexing(env.db) @@ -184,7 +189,12 @@ func TestHistoricalStateReader(t *testing.T) { }() //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) - env := newTester(t, 0, false, 64, true, "") + config := &testerConfig{ + stateHistory: 0, + layers: 64, + enableIndex: true, + } + env := newTester(t, config) defer env.release() waitIndexing(env.db) diff --git a/triedb/pathdb/history_state.go b/triedb/pathdb/history_state.go index 3bb69a7f4d..9d1e4dfb09 100644 --- a/triedb/pathdb/history_state.go +++ b/triedb/pathdb/history_state.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "errors" "fmt" + "iter" "maps" "slices" "time" @@ -275,6 +276,36 @@ func newStateHistory(root common.Hash, parent common.Hash, block uint64, account } } +// typ implements the history interface, returning the historical data type held. +func (h *stateHistory) typ() historyType { + return typeStateHistory +} + +// forEach implements the history interface, returning an iterator to traverse the +// state entries in the history. +func (h *stateHistory) forEach() iter.Seq[stateIdent] { + return func(yield func(stateIdent) bool) { + for _, addr := range h.accountList { + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if !yield(newAccountIdent(addrHash)) { + return + } + for _, slotKey := range h.storageList[addr] { + // The hash of the storage slot key is used as the identifier because the + // legacy history does not include the raw storage key, therefore, the + // conversion from storage key to hash is necessary for non-v0 histories. + slotHash := slotKey + if h.meta.version != stateHistoryV0 { + slotHash = crypto.Keccak256Hash(slotKey.Bytes()) + } + if !yield(newStorageIdent(addrHash, slotHash)) { + return + } + } + } + } +} + // stateSet returns the state set, keyed by the hash of the account address // and the hash of the storage slot key. func (h *stateHistory) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) { @@ -536,8 +567,8 @@ func readStateHistory(reader ethdb.AncientReader, id uint64) (*stateHistory, err } // readStateHistories reads a list of state history records within the specified range. -func readStateHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*stateHistory, error) { - var histories []*stateHistory +func readStateHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]history, error) { + var histories []history metaList, aIndexList, sIndexList, aDataList, sDataList, err := rawdb.ReadStateHistoryList(freezer, start, count) if err != nil { return nil, err diff --git a/triedb/pathdb/history_state_test.go b/triedb/pathdb/history_state_test.go index 4a777111ea..5718081566 100644 --- a/triedb/pathdb/history_state_test.go +++ b/triedb/pathdb/history_state_test.go @@ -137,7 +137,7 @@ func TestTruncateHeadStateHistory(t *testing.T) { rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) } for size := len(hs); size > 0; size-- { - pruned, err := truncateFromHead(freezer, uint64(size-1)) + pruned, err := truncateFromHead(freezer, typeStateHistory, uint64(size-1)) if err != nil { t.Fatalf("Failed to truncate from head %v", err) } @@ -161,7 +161,7 @@ func TestTruncateTailStateHistory(t *testing.T) { rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) } for newTail := 1; newTail < len(hs); newTail++ { - pruned, _ := truncateFromTail(freezer, uint64(newTail)) + pruned, _ := truncateFromTail(freezer, typeStateHistory, uint64(newTail)) if pruned != 1 { t.Error("Unexpected pruned items", "want", 1, "got", pruned) } @@ -209,7 +209,7 @@ func TestTruncateTailStateHistories(t *testing.T) { accountData, storageData, accountIndex, storageIndex := hs[i].encode() rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) } - pruned, _ := truncateFromTail(freezer, uint64(10)-c.limit) + pruned, _ := truncateFromTail(freezer, typeStateHistory, uint64(10)-c.limit) if pruned != c.expPruned { t.Error("Unexpected pruned items", "want", c.expPruned, "got", pruned) } @@ -233,7 +233,7 @@ func TestTruncateOutOfRange(t *testing.T) { accountData, storageData, accountIndex, storageIndex := hs[i].encode() rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) } - truncateFromTail(freezer, uint64(len(hs)/2)) + truncateFromTail(freezer, typeStateHistory, uint64(len(hs)/2)) // Ensure of-out-range truncations are rejected correctly. head, _ := freezer.Ancients() @@ -254,9 +254,9 @@ func TestTruncateOutOfRange(t *testing.T) { for _, c := range cases { var gotErr error if c.mode == 0 { - _, gotErr = truncateFromHead(freezer, c.target) + _, gotErr = truncateFromHead(freezer, typeStateHistory, c.target) } else { - _, gotErr = truncateFromTail(freezer, c.target) + _, gotErr = truncateFromTail(freezer, typeStateHistory, c.target) } if !errors.Is(gotErr, c.expErr) { t.Errorf("Unexpected error, want: %v, got: %v", c.expErr, gotErr) diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 4639932763..02bdef5d34 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -229,7 +229,7 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { return nil, fmt.Errorf("load block number: %v", err) } // Read in-memory trie nodes from journal - var nodes nodeSet + var nodes nodeSetWithOrigin if err := nodes.decode(r); err != nil { return nil, err } @@ -267,7 +267,7 @@ func (dl *diskLayer) journal(w io.Writer) error { if err := dl.buffer.states.encode(w); err != nil { return err } - log.Debug("Journaled pathdb disk layer", "root", dl.root) + log.Debug("Journaled pathdb disk layer", "root", dl.root, "id", dl.id) return nil } @@ -333,7 +333,16 @@ func (db *Database) Journal(root common.Hash) error { if db.readOnly { return errDatabaseReadOnly } - + // Forcibly sync the ancient store before persisting the in-memory layers. + // This prevents an edge case where the in-memory layers are persisted + // but the ancient store is not properly closed, resulting in recent writes + // being lost. After a restart, the ancient store would then be misaligned + // with the disk layer, causing data corruption. + if db.stateFreezer != nil { + if err := db.stateFreezer.SyncAncient(); err != nil { + return err + } + } // Store the journal into the database and return var ( file *os.File diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go index b2f3f7f37d..ec45257db5 100644 --- a/triedb/pathdb/layertree.go +++ b/triedb/pathdb/layertree.go @@ -22,7 +22,6 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie/trienode" ) // layerTree is a group of state layers identified by the state root. @@ -142,7 +141,7 @@ func (tree *layerTree) len() int { } // add inserts a new layer into the tree if it can be linked to an existing old parent. -func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *StateSetWithOrigin) error { +func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *nodeSetWithOrigin, states *StateSetWithOrigin) error { // Reject noop updates to avoid self-loops. This is a special case that can // happen for clique networks and proof-of-stake networks where empty blocks // don't modify the state (0 block subsidy). @@ -156,7 +155,7 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6 if parent == nil { return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot) } - l := parent.update(root, parent.stateID()+1, block, newNodeSet(nodes.Flatten()), states) + l := parent.update(root, parent.stateID()+1, block, nodes, states) tree.lock.Lock() defer tree.lock.Unlock() diff --git a/triedb/pathdb/layertree_test.go b/triedb/pathdb/layertree_test.go index a76d60ba5b..a74c6eb045 100644 --- a/triedb/pathdb/layertree_test.go +++ b/triedb/pathdb/layertree_test.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/trie/trienode" ) func newTestLayerTree() *layerTree { @@ -45,9 +44,9 @@ func TestLayerCap(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -66,9 +65,9 @@ func TestLayerCap(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -86,9 +85,9 @@ func TestLayerCap(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -106,12 +105,12 @@ func TestLayerCap(t *testing.T) { // ->C2'->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -131,12 +130,12 @@ func TestLayerCap(t *testing.T) { // ->C2'->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -155,11 +154,11 @@ func TestLayerCap(t *testing.T) { // ->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, // Chain: @@ -213,8 +212,8 @@ func TestBaseLayer(t *testing.T) { // C1->C2->C3 (HEAD) { func() { - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) }, common.Hash{0x1}, }, @@ -230,9 +229,9 @@ func TestBaseLayer(t *testing.T) { // C4->C5->C6 (HEAD) { func() { - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x5}, common.Hash{0x4}, 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x6}, common.Hash{0x5}, 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x5}, common.Hash{0x4}, 4, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x6}, common.Hash{0x5}, 5, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) tr.cap(common.Hash{0x6}, 2) }, common.Hash{0x4}, @@ -258,7 +257,7 @@ func TestDescendant(t *testing.T) { // C1->C2 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -269,7 +268,7 @@ func TestDescendant(t *testing.T) { // Chain: // C1->C2->C3 (HEAD) op: func(tr *layerTree) { - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) }, snapshotB: map[common.Hash]map[common.Hash]struct{}{ common.Hash{0x1}: { @@ -286,9 +285,9 @@ func TestDescendant(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -325,9 +324,9 @@ func TestDescendant(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -360,9 +359,9 @@ func TestDescendant(t *testing.T) { // C1->C2->C3->C4 (HEAD) init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -392,12 +391,12 @@ func TestDescendant(t *testing.T) { // ->C2'->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -445,12 +444,12 @@ func TestDescendant(t *testing.T) { // ->C2'->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -494,11 +493,11 @@ func TestDescendant(t *testing.T) { // ->C3'->C4' init: func() *layerTree { tr := newTestLayerTree() - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) - tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) + tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(nil, nil, nil, nil, false)) return tr }, snapshotA: map[common.Hash]map[common.Hash]struct{}{ @@ -580,11 +579,11 @@ func TestAccountLookup(t *testing.T) { // Chain: // C1->C2->C3->C4 (HEAD) tr := newTestLayerTree() // base = 0x1 - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xa"), nil, nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xb"), nil, nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xa", "0xc"), nil, nil, nil, false)) var cases = []struct { @@ -734,11 +733,11 @@ func TestStorageLookup(t *testing.T) { // Chain: // C1->C2->C3->C4 (HEAD) tr := newTestLayerTree() // base = 0x1 - tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1"}}, nil), nil, nil, false)) - tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x2"}}, nil), nil, nil, false)) - tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), + tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, NewNodeSetWithOrigin(nil, nil), NewStateSetWithOrigin(randomAccountSet("0xa"), randomStorageSet([]string{"0xa"}, [][]string{{"0x1", "0x3"}}, nil), nil, nil, false)) var cases = []struct { diff --git a/triedb/pathdb/nodes.go b/triedb/pathdb/nodes.go index f90bd0f01c..c6f9e7aece 100644 --- a/triedb/pathdb/nodes.go +++ b/triedb/pathdb/nodes.go @@ -18,6 +18,7 @@ package pathdb import ( "bytes" + "errors" "fmt" "io" "maps" @@ -301,3 +302,125 @@ func (s *nodeSet) dbsize() int { } return m + int(s.size) } + +// nodeSetWithOrigin wraps the node set with additional original values of the +// mutated trie nodes. +type nodeSetWithOrigin struct { + *nodeSet + + // nodeOrigin represents the trie nodes before the state transition. It's keyed + // by the account address hash and node path. The nil value means the trie node + // was not present. + nodeOrigin map[common.Hash]map[string][]byte + + // memory size of the state data (accountNodeOrigin and storageNodeOrigin) + size uint64 +} + +// NewNodeSetWithOrigin constructs the state set with the provided data. +func NewNodeSetWithOrigin(nodes map[common.Hash]map[string]*trienode.Node, origins map[common.Hash]map[string][]byte) *nodeSetWithOrigin { + // Don't panic for the lazy callers, initialize the nil maps instead. + if origins == nil { + origins = make(map[common.Hash]map[string][]byte) + } + set := &nodeSetWithOrigin{ + nodeSet: newNodeSet(nodes), + nodeOrigin: origins, + } + set.computeSize() + return set +} + +// computeSize calculates the database size of the held trie nodes. +func (s *nodeSetWithOrigin) computeSize() { + var size int + for owner, slots := range s.nodeOrigin { + prefixLen := common.HashLength + if owner == (common.Hash{}) { + prefixLen = 0 + } + for path, data := range slots { + size += prefixLen + len(path) + len(data) + } + } + s.size = s.nodeSet.size + uint64(size) +} + +// encode serializes the content of node set into the provided writer. +func (s *nodeSetWithOrigin) encode(w io.Writer) error { + // Encode node set + if err := s.nodeSet.encode(w); err != nil { + return err + } + // Short circuit if the origins are not tracked + if len(s.nodeOrigin) == 0 { + return nil + } + + // Encode node origins + nodes := make([]journalNodes, 0, len(s.nodeOrigin)) + for owner, subset := range s.nodeOrigin { + entry := journalNodes{ + Owner: owner, + Nodes: make([]journalNode, 0, len(subset)), + } + for path, node := range subset { + entry.Nodes = append(entry.Nodes, journalNode{ + Path: []byte(path), + Blob: node, + }) + } + nodes = append(nodes, entry) + } + return rlp.Encode(w, nodes) +} + +// hasOrigin returns whether the origin data set exists in the rlp stream. +// It's a workaround for backward compatibility. +func (s *nodeSetWithOrigin) hasOrigin(r *rlp.Stream) (bool, error) { + kind, _, err := r.Kind() + if err != nil { + if errors.Is(err, io.EOF) { + return false, nil + } + return false, err + } + // If the type of next element in the RLP stream is: + // - `rlp.List`: represents the original value of trienodes; + // - others, like `boolean`: represent a field in the following state data set; + return kind == rlp.List, nil +} + +// decode deserializes the content from the rlp stream into the node set. +func (s *nodeSetWithOrigin) decode(r *rlp.Stream) error { + if s.nodeSet == nil { + s.nodeSet = &nodeSet{} + } + if err := s.nodeSet.decode(r); err != nil { + return err + } + + // Decode node origins + s.nodeOrigin = make(map[common.Hash]map[string][]byte) + if hasOrigin, err := s.hasOrigin(r); err != nil { + return err + } else if hasOrigin { + var encoded []journalNodes + if err := r.Decode(&encoded); err != nil { + return fmt.Errorf("load nodes: %v", err) + } + for _, entry := range encoded { + subset := make(map[string][]byte, len(entry.Nodes)) + for _, n := range entry.Nodes { + if len(n.Blob) > 0 { + subset[string(n.Path)] = n.Blob + } else { + subset[string(n.Path)] = nil + } + } + s.nodeOrigin[entry.Owner] = subset + } + } + s.computeSize() + return nil +} diff --git a/triedb/pathdb/nodes_test.go b/triedb/pathdb/nodes_test.go new file mode 100644 index 0000000000..483dc4b1a6 --- /dev/null +++ b/triedb/pathdb/nodes_test.go @@ -0,0 +1,128 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +func TestNodeSetEncode(t *testing.T) { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = map[string]*trienode.Node{ + "": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}), + "1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}), + "2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}), + } + nodes[common.Hash{0x1}] = map[string]*trienode.Node{ + "": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}), + "1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}), + "2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}), + } + s := newNodeSet(nodes) + + buf := bytes.NewBuffer(nil) + if err := s.encode(buf); err != nil { + t.Fatalf("Failed to encode states, %v", err) + } + var dec nodeSet + if err := dec.decode(rlp.NewStream(buf, 0)); err != nil { + t.Fatalf("Failed to decode states, %v", err) + } + if !reflect.DeepEqual(s.accountNodes, dec.accountNodes) { + t.Fatal("Unexpected account data") + } + if !reflect.DeepEqual(s.storageNodes, dec.storageNodes) { + t.Fatal("Unexpected storage data") + } +} + +func TestNodeSetWithOriginEncode(t *testing.T) { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = map[string]*trienode.Node{ + "": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}), + "1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}), + "2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}), + } + nodes[common.Hash{0x1}] = map[string]*trienode.Node{ + "": trienode.New(crypto.Keccak256Hash([]byte{0x0}), []byte{0x0}), + "1": trienode.New(crypto.Keccak256Hash([]byte{0x1}), []byte{0x1}), + "2": trienode.New(crypto.Keccak256Hash([]byte{0x2}), []byte{0x2}), + } + origins := make(map[common.Hash]map[string][]byte) + origins[common.Hash{}] = map[string][]byte{ + "": nil, + "1": {0x1}, + "2": {0x2}, + } + origins[common.Hash{0x1}] = map[string][]byte{ + "": nil, + "1": {0x1}, + "2": {0x2}, + } + + // Encode with origin set + s := NewNodeSetWithOrigin(nodes, origins) + + buf := bytes.NewBuffer(nil) + if err := s.encode(buf); err != nil { + t.Fatalf("Failed to encode states, %v", err) + } + var dec nodeSetWithOrigin + if err := dec.decode(rlp.NewStream(buf, 0)); err != nil { + t.Fatalf("Failed to decode states, %v", err) + } + if !reflect.DeepEqual(s.accountNodes, dec.accountNodes) { + t.Fatal("Unexpected account data") + } + if !reflect.DeepEqual(s.storageNodes, dec.storageNodes) { + t.Fatal("Unexpected storage data") + } + if !reflect.DeepEqual(s.nodeOrigin, dec.nodeOrigin) { + t.Fatal("Unexpected node origin data") + } + + // Encode without origin set + s = NewNodeSetWithOrigin(nodes, nil) + + buf = bytes.NewBuffer(nil) + if err := s.encode(buf); err != nil { + t.Fatalf("Failed to encode states, %v", err) + } + var dec2 nodeSetWithOrigin + if err := dec2.decode(rlp.NewStream(buf, 0)); err != nil { + t.Fatalf("Failed to decode states, %v", err) + } + if !reflect.DeepEqual(s.accountNodes, dec2.accountNodes) { + t.Fatal("Unexpected account data") + } + if !reflect.DeepEqual(s.storageNodes, dec2.storageNodes) { + t.Fatal("Unexpected storage data") + } + if len(dec2.nodeOrigin) != 0 { + t.Fatal("unexpected node origin data") + } + if dec2.size != s.size { + t.Fatalf("Unexpected data size, got: %d, want: %d", dec2.size, s.size) + } +} diff --git a/version/version.go b/version/version.go index 2695c43d43..6430140631 100644 --- a/version/version.go +++ b/version/version.go @@ -19,6 +19,6 @@ package version const ( Major = 1 // Major version component of the current release Minor = 16 // Minor version component of the current release - Patch = 3 // Patch version component of the current release + Patch = 4 // Patch version component of the current release Meta = "stable" // Version metadata to append to the version string )