From 0ad0966cecf0602bc554fd368d2a02ea762baf8c Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 4 Feb 2025 18:45:45 +0800 Subject: [PATCH 01/17] core/rawdb: introduce flush offset in freezer (#30392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a follow-up PR to #29792 to get rid of the data file sync. **This is a non-backward compatible change, which increments the database version from 8 to 9**. We introduce a flushOffset for each freezer table, which tracks the position of the most recently fsync’d item in the index file. When this offset moves forward, it indicates that all index entries below it, along with their corresponding data items, have been properly persisted to disk. The offset can also be moved backward when truncating from either the head or tail of the file. Previously, the data file required an explicit fsync after every mutation, which was highly inefficient. With the introduction of the flush offset, the synchronization strategy becomes more flexible, allowing the freezer to sync every 30 seconds instead. The data items above the flush offset are regarded volatile and callers must ensure they are recoverable after the unclean shutdown, or explicitly sync the freezer before any proceeding operations. --------- Co-authored-by: Felix Lange --- core/blockchain.go | 10 +- core/rawdb/accessors_chain_test.go | 3 +- core/rawdb/ancient_scheme.go | 1 + core/rawdb/freezer_batch.go | 10 +- core/rawdb/freezer_meta.go | 192 +++++++++++----- core/rawdb/freezer_meta_test.go | 88 ++++++- core/rawdb/freezer_table.go | 264 ++++++++++++++------- core/rawdb/freezer_table_test.go | 356 ++++++++++++++++++++--------- 8 files changed, 664 insertions(+), 260 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index ab88f4b68e..f097dc9781 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -113,23 +113,29 @@ const ( // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted // * the `Bloom` field of receipt is deleted // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted + // // - Version 5 // The following incompatible database changes were added: // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the // receipts' corresponding block + // // - Version 6 // The following incompatible database changes were added: // * Transaction lookup information stores the corresponding block number instead of block hash + // // - Version 7 // The following incompatible database changes were added: // * Use freezer as the ancient database to maintain all ancient data + // // - Version 8 // The following incompatible database changes were added: // * New scheme for contract code in order to separate the codes and trie nodes + // // - Version 9 - // Total difficulty has been removed from both the key-value store and the - // ancient store, the td freezer table has been deprecated since that. + // The following incompatible database changes were added: + // * Total difficulty has been removed from both the key-value store and the ancient store. + // * The metadata structure of freezer is changed by adding 'flushOffset' BlockChainVersion uint64 = 9 ) diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 5533c60ab9..b9684f8e17 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -849,6 +849,7 @@ func TestHeadersRLPStorage(t *testing.T) { t.Fatalf("failed to create database with ancient backend") } defer db.Close() + // Create blocks var chain []*types.Block var pHash common.Hash @@ -864,7 +865,7 @@ func TestHeadersRLPStorage(t *testing.T) { chain = append(chain, block) pHash = block.Hash() } - var receipts []types.Receipts = make([]types.Receipts, 100) + receipts := make([]types.Receipts, 100) // Write first half to ancients WriteAncientBlocks(db, chain[:50], receipts[:50]) // Write second half to db diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index 54a3be391f..67bfa37ecc 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -58,6 +58,7 @@ const ( stateHistoryStorageData = "storage.data" ) +// stateFreezerNoSnappy configures whether compression is disabled for the state freezer. var stateFreezerNoSnappy = map[string]bool{ stateHistoryMeta: true, stateHistoryAccountIndex: false, diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index 013d0b9d13..801d30f73f 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -19,6 +19,7 @@ package rawdb import ( "fmt" "math" + "time" "github.com/ethereum/go-ethereum/rlp" "github.com/golang/snappy" @@ -188,9 +189,6 @@ func (batch *freezerTableBatch) commit() error { if err != nil { return err } - if err := batch.t.head.Sync(); err != nil { - return err - } dataSize := int64(len(batch.dataBuffer)) batch.dataBuffer = batch.dataBuffer[:0] @@ -208,6 +206,12 @@ func (batch *freezerTableBatch) commit() error { // Update metrics. batch.t.sizeGauge.Inc(dataSize + indexSize) batch.t.writeMeter.Mark(dataSize + indexSize) + + // Periodically sync the table, todo (rjl493456442) make it configurable? + if time.Since(batch.t.lastSync) > 30*time.Second { + batch.t.lastSync = time.Now() + return batch.t.Sync() + } return nil } diff --git a/core/rawdb/freezer_meta.go b/core/rawdb/freezer_meta.go index 9eef9df351..3cda9ae45c 100644 --- a/core/rawdb/freezer_meta.go +++ b/core/rawdb/freezer_meta.go @@ -17,93 +17,173 @@ package rawdb import ( + "errors" "io" + "math" "os" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) -const freezerVersion = 1 // The initial version tag of freezer table metadata +const ( + freezerTableV1 = 1 // Initial version of metadata struct + freezerTableV2 = 2 // Add field: 'flushOffset' + freezerVersion = freezerTableV2 // The current used version +) -// freezerTableMeta wraps all the metadata of the freezer table. +// freezerTableMeta is a collection of additional properties that describe the +// freezer table. These properties are designed with error resilience, allowing +// them to be automatically corrected after an error occurs without significantly +// impacting overall correctness. type freezerTableMeta struct { - // Version is the versioning descriptor of the freezer table. - Version uint16 + file *os.File // file handler of metadata + version uint16 // version descriptor of the freezer table - // VirtualTail indicates how many items have been marked as deleted. - // Its value is equal to the number of items removed from the table - // plus the number of items hidden in the table, so it should never - // be lower than the "actual tail". - VirtualTail uint64 + // virtualTail represents the number of items marked as deleted. It is + // calculated as the sum of items removed from the table and the items + // hidden within the table, and should never be less than the "actual + // tail". + // + // If lost due to a crash or other reasons, it will be reset to the number + // of items deleted from the table, causing the previously hidden items + // to become visible, which is an acceptable consequence. + virtualTail uint64 + + // flushOffset represents the offset in the index file up to which the index + // items along with the corresponding data items in data files has been flushed + // (fsync’d) to disk. Beyond this offset, data integrity is not guaranteed, + // the extra index items along with the associated data items should be removed + // during the startup. + // + // The principle is that all data items above the flush offset are considered + // volatile and should be recoverable if they are discarded after the unclean + // shutdown. If data integrity is required, manually force a sync of the + // freezer before proceeding with further operations (e.g. do freezer.Sync() + // first and then write data to key value store in some circumstances). + // + // The offset could be moved forward by applying sync operation, or be moved + // backward in cases of head/tail truncation, etc. + flushOffset int64 } -// newMetadata initializes the metadata object with the given virtual tail. -func newMetadata(tail uint64) *freezerTableMeta { +// decodeV1 attempts to decode the metadata structure in v1 format. If fails or +// the result is incompatible, nil is returned. +func decodeV1(file *os.File) *freezerTableMeta { + _, err := file.Seek(0, io.SeekStart) + if err != nil { + return nil + } + type obj struct { + Version uint16 + Tail uint64 + } + var o obj + if err := rlp.Decode(file, &o); err != nil { + return nil + } + if o.Version != freezerTableV1 { + return nil + } return &freezerTableMeta{ - Version: freezerVersion, - VirtualTail: tail, + file: file, + version: o.Version, + virtualTail: o.Tail, } } -// readMetadata reads the metadata of the freezer table from the -// given metadata file. -func readMetadata(file *os.File) (*freezerTableMeta, error) { +// decodeV2 attempts to decode the metadata structure in v2 format. If fails or +// the result is incompatible, nil is returned. +func decodeV2(file *os.File) *freezerTableMeta { _, err := file.Seek(0, io.SeekStart) if err != nil { - return nil, err + return nil } - var meta freezerTableMeta - if err := rlp.Decode(file, &meta); err != nil { - return nil, err + type obj struct { + Version uint16 + Tail uint64 + Offset uint64 + } + var o obj + if err := rlp.Decode(file, &o); err != nil { + return nil + } + if o.Version != freezerTableV2 { + return nil + } + if o.Offset > math.MaxInt64 { + log.Error("Invalid flushOffset %d in freezer metadata", o.Offset, "file", file.Name()) + return nil + } + return &freezerTableMeta{ + file: file, + version: freezerTableV2, + virtualTail: o.Tail, + flushOffset: int64(o.Offset), } - return &meta, nil } -// writeMetadata writes the metadata of the freezer table into the -// given metadata file. -func writeMetadata(file *os.File, meta *freezerTableMeta) error { - _, err := file.Seek(0, io.SeekStart) - if err != nil { - return err - } - return rlp.Encode(file, meta) -} - -// loadMetadata loads the metadata from the given metadata file. -// Initializes the metadata file with the given "actual tail" if -// it's empty. -func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) { +// newMetadata initializes the metadata object, either by loading it from the file +// or by constructing a new one from scratch. +func newMetadata(file *os.File) (*freezerTableMeta, error) { stat, err := file.Stat() if err != nil { return nil, err } - // Write the metadata with the given actual tail into metadata file - // if it's non-existent. There are two possible scenarios here: - // - the freezer table is empty - // - the freezer table is legacy - // In both cases, write the meta into the file with the actual tail - // as the virtual tail. if stat.Size() == 0 { - m := newMetadata(tail) - if err := writeMetadata(file, m); err != nil { + m := &freezerTableMeta{ + file: file, + version: freezerTableV2, + virtualTail: 0, + flushOffset: 0, + } + if err := m.write(true); err != nil { return nil, err } return m, nil } - m, err := readMetadata(file) - if err != nil { - return nil, err + if m := decodeV2(file); m != nil { + return m, nil } - // Update the virtual tail with the given actual tail if it's even - // lower than it. Theoretically it shouldn't happen at all, print - // a warning here. - if m.VirtualTail < tail { - log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail) - m.VirtualTail = tail - if err := writeMetadata(file, m); err != nil { - return nil, err - } + if m := decodeV1(file); m != nil { + return m, nil // legacy metadata } - return m, nil + return nil, errors.New("failed to decode metadata") +} + +// setVirtualTail sets the virtual tail and flushes the metadata if sync is true. +func (m *freezerTableMeta) setVirtualTail(tail uint64, sync bool) error { + m.virtualTail = tail + return m.write(sync) +} + +// setFlushOffset sets the flush offset and flushes the metadata if sync is true. +func (m *freezerTableMeta) setFlushOffset(offset int64, sync bool) error { + m.flushOffset = offset + return m.write(sync) +} + +// write flushes the content of metadata into file and performs a fsync if required. +func (m *freezerTableMeta) write(sync bool) error { + type obj struct { + Version uint16 + Tail uint64 + Offset uint64 + } + var o obj + o.Version = freezerVersion // forcibly use the current version + o.Tail = m.virtualTail + o.Offset = uint64(m.flushOffset) + + _, err := m.file.Seek(0, io.SeekStart) + if err != nil { + return err + } + if err := rlp.Encode(m.file, &o); err != nil { + return err + } + if !sync { + return nil + } + return m.file.Sync() } diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go index 409e811026..31f8c519c8 100644 --- a/core/rawdb/freezer_meta_test.go +++ b/core/rawdb/freezer_meta_test.go @@ -19,6 +19,8 @@ package rawdb import ( "os" "testing" + + "github.com/ethereum/go-ethereum/rlp" ) func TestReadWriteFreezerTableMeta(t *testing.T) { @@ -27,36 +29,98 @@ func TestReadWriteFreezerTableMeta(t *testing.T) { t.Fatalf("Failed to create file %v", err) } defer f.Close() - err = writeMetadata(f, newMetadata(100)) + + meta, err := newMetadata(f) if err != nil { - t.Fatalf("Failed to write metadata %v", err) + t.Fatalf("Failed to new metadata %v", err) } - meta, err := readMetadata(f) + meta.setVirtualTail(100, false) + + meta, err = newMetadata(f) if err != nil { - t.Fatalf("Failed to read metadata %v", err) + t.Fatalf("Failed to reload metadata %v", err) } - if meta.Version != freezerVersion { + if meta.version != freezerTableV2 { t.Fatalf("Unexpected version field") } - if meta.VirtualTail != uint64(100) { + if meta.virtualTail != uint64(100) { t.Fatalf("Unexpected virtual tail field") } } -func TestInitializeFreezerTableMeta(t *testing.T) { +func TestUpgradeMetadata(t *testing.T) { f, err := os.CreateTemp(t.TempDir(), "*") if err != nil { t.Fatalf("Failed to create file %v", err) } defer f.Close() - meta, err := loadMetadata(f, uint64(100)) + + // Write legacy metadata into file + type obj struct { + Version uint16 + Tail uint64 + } + var o obj + o.Version = freezerTableV1 + o.Tail = 100 + + if err := rlp.Encode(f, &o); err != nil { + t.Fatalf("Failed to encode %v", err) + } + + // Reload the metadata, a silent upgrade is expected + meta, err := newMetadata(f) if err != nil { t.Fatalf("Failed to read metadata %v", err) } - if meta.Version != freezerVersion { - t.Fatalf("Unexpected version field") + if meta.version != freezerTableV1 { + t.Fatal("Unexpected version field") } - if meta.VirtualTail != uint64(100) { - t.Fatalf("Unexpected virtual tail field") + if meta.virtualTail != uint64(100) { + t.Fatal("Unexpected virtual tail field") + } + if meta.flushOffset != 0 { + t.Fatal("Unexpected flush offset field") + } + + meta.setFlushOffset(100, true) + + meta, err = newMetadata(f) + if err != nil { + t.Fatalf("Failed to read metadata %v", err) + } + if meta.version != freezerTableV2 { + t.Fatal("Unexpected version field") + } + if meta.virtualTail != uint64(100) { + t.Fatal("Unexpected virtual tail field") + } + if meta.flushOffset != 100 { + t.Fatal("Unexpected flush offset field") + } +} + +func TestInvalidMetadata(t *testing.T) { + f, err := os.CreateTemp(t.TempDir(), "*") + if err != nil { + t.Fatalf("Failed to create file %v", err) + } + defer f.Close() + + // Write invalid legacy metadata into file + type obj struct { + Version uint16 + Tail uint64 + } + var o obj + o.Version = freezerTableV2 // -> invalid version tag + o.Tail = 100 + + if err := rlp.Encode(f, &o); err != nil { + t.Fatalf("Failed to encode %v", err) + } + _, err = newMetadata(f) + if err == nil { + t.Fatal("Unexpected success") } } diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 38c47dc223..1ba8cf639f 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -108,11 +108,13 @@ type freezerTable struct { head *os.File // File descriptor for the data head of the table index *os.File // File descriptor for the indexEntry file of the table - meta *os.File // File descriptor for metadata of the table files map[uint32]*os.File // open files headId uint32 // number of the currently active head file tailId uint32 // number of the earliest file + metadata *freezerTableMeta // metadata of the table + lastSync time.Time // Timestamp when the last sync was performed + headBytes int64 // Number of bytes written to the head file readMeter *metrics.Meter // Meter for measuring the effective amount of data read writeMeter *metrics.Meter // Meter for measuring the effective amount of data written @@ -166,10 +168,17 @@ func newTable(path string, name string, readMeter, writeMeter *metrics.Meter, si return nil, err } } + // Load metadata from the file. The tag will be true if legacy metadata + // is detected. + metadata, err := newMetadata(meta) + if err != nil { + return nil, err + } // Create the table and repair any past inconsistency tab := &freezerTable{ index: index, - meta: meta, + metadata: metadata, + lastSync: time.Now(), files: make(map[uint32]*os.File), readMeter: readMeter, writeMeter: writeMeter, @@ -221,13 +230,11 @@ func (t *freezerTable) repair() error { return err } // New file can't trigger this path } - // Validate the index file as it might contain some garbage data after the - // power failures. if err := t.repairIndex(); err != nil { return err } // Retrieve the file sizes and prepare for truncation. Note the file size - // might be changed after index validation. + // might be changed after index repair. if stat, err = t.index.Stat(); err != nil { return err } @@ -253,12 +260,14 @@ func (t *freezerTable) repair() error { t.tailId = firstIndex.filenum t.itemOffset.Store(uint64(firstIndex.offset)) - // Load metadata from the file - meta, err := loadMetadata(t.meta, t.itemOffset.Load()) - if err != nil { - return err + // Adjust the number of hidden items if it is less than the number of items + // being removed. + if t.itemOffset.Load() > t.metadata.virtualTail { + if err := t.metadata.setVirtualTail(t.itemOffset.Load(), true); err != nil { + return err + } } - t.itemHidden.Store(meta.VirtualTail) + t.itemHidden.Store(t.metadata.virtualTail) // Read the last index, use the default value in case the freezer is empty if offsetsSize == indexEntrySize { @@ -267,12 +276,6 @@ func (t *freezerTable) repair() error { t.index.ReadAt(buffer, offsetsSize-indexEntrySize) lastIndex.unmarshalBinary(buffer) } - // Print an error log if the index is corrupted due to an incorrect - // last index item. While it is theoretically possible to have a zero offset - // by storing all zero-size items, it is highly unlikely to occur in practice. - if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 { - log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize) - } if t.readonly { t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly) } else { @@ -293,6 +296,7 @@ func (t *freezerTable) repair() error { return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum) } verbose = true + // Truncate the head file to the last offset pointer if contentExp < contentSize { t.logger.Warn("Truncating dangling head", "indexed", contentExp, "stored", contentSize) @@ -304,11 +308,23 @@ func (t *freezerTable) repair() error { // Truncate the index to point within the head file if contentExp > contentSize { t.logger.Warn("Truncating dangling indexes", "indexes", offsetsSize/indexEntrySize, "indexed", contentExp, "stored", contentSize) - if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil { + + newOffset := offsetsSize - indexEntrySize + if err := truncateFreezerFile(t.index, newOffset); err != nil { return err } offsetsSize -= indexEntrySize + // If the index file is truncated beyond the flush offset, move the flush + // offset back to the new end of the file. A crash may occur before the + // offset is updated, leaving a dangling reference that points to a position + // outside the file. If so, the offset will be reset to the new end of the + // file during the next run. + if t.metadata.flushOffset > newOffset { + if err := t.metadata.setFlushOffset(newOffset, true); err != nil { + return err + } + } // Read the new head index, use the default value in case // the freezer is already empty. var newLastIndex indexEntry @@ -345,7 +361,7 @@ func (t *freezerTable) repair() error { if err := t.head.Sync(); err != nil { return err } - if err := t.meta.Sync(); err != nil { + if err := t.metadata.file.Sync(); err != nil { return err } } @@ -372,7 +388,65 @@ func (t *freezerTable) repair() error { return nil } -// repairIndex validates the integrity of the index file. According to the design, +func (t *freezerTable) repairIndex() error { + stat, err := t.index.Stat() + if err != nil { + return err + } + size := stat.Size() + + // Validate the items in the index file to ensure the data integrity. + // It's possible some garbage data is retained in the index file after + // the power failures and should be truncated first. + size, err = t.checkIndex(size) + if err != nil { + return err + } + // If legacy metadata is detected, attempt to recover the offset from the + // index file to avoid clearing the entire table. + if t.metadata.version == freezerTableV1 { + t.logger.Info("Recovering freezer flushOffset for legacy table", "offset", size) + return t.metadata.setFlushOffset(size, true) + } + + switch { + case size == indexEntrySize && t.metadata.flushOffset == 0: + // It's a new freezer table with no content. + // Move the flush offset to the end of the file. + return t.metadata.setFlushOffset(size, true) + + case size == t.metadata.flushOffset: + // flushOffset is aligned with the index file, all is well. + return nil + + case size > t.metadata.flushOffset: + // Extra index items have been detected beyond the flush offset. Since these + // entries correspond to data that has not been fully flushed to disk in the + // last run (because of unclean shutdown), their integrity cannot be guaranteed. + // To ensure consistency, these index items will be truncated, as there is no + // reliable way to validate or recover their associated data. + extraSize := size - t.metadata.flushOffset + if t.readonly { + return fmt.Errorf("index file(path: %s, name: %s) contains %d garbage data bytes", t.path, t.name, extraSize) + } + t.logger.Warn("Truncating freezer items after flushOffset", "size", extraSize) + return truncateFreezerFile(t.index, t.metadata.flushOffset) + + default: // size < flushOffset + // Flush offset refers to a position larger than index file. The only + // possible scenario for this is: a power failure or system crash has occurred after + // truncating the segment in index file from head or tail, but without updating + // the flush offset. In this case, automatically reset the flush offset with + // the file size which implies the entire index file is complete. + if t.readonly { + return nil // do nothing in read only mode + } + t.logger.Warn("Rewinding freezer flushOffset", "old", t.metadata.flushOffset, "new", size) + return t.metadata.setFlushOffset(size, true) + } +} + +// checkIndex validates the integrity of the index file. According to the design, // the initial entry in the file denotes the earliest data file along with the // count of deleted items. Following this, all subsequent entries in the file must // be in order. This function identifies any corrupted entries and truncates items @@ -392,18 +466,11 @@ func (t *freezerTable) repair() error { // leftover garbage or if all items in the table have zero size is impossible. // In such instances, the file will remain unchanged to prevent potential data // loss or misinterpretation. -func (t *freezerTable) repairIndex() error { - // Retrieve the file sizes and prepare for validation - stat, err := t.index.Stat() - if err != nil { - return err - } - size := stat.Size() - +func (t *freezerTable) checkIndex(size int64) (int64, error) { // Move the read cursor to the beginning of the file - _, err = t.index.Seek(0, io.SeekStart) + _, err := t.index.Seek(0, io.SeekStart) if err != nil { - return err + return 0, err } fr := bufio.NewReader(t.index) @@ -425,21 +492,21 @@ func (t *freezerTable) repairIndex() error { entry.unmarshalBinary(buff) return entry, nil } - truncate = func(offset int64) error { + truncate = func(offset int64) (int64, error) { if t.readonly { - return fmt.Errorf("index file is corrupted at %d, size: %d", offset, size) + return 0, fmt.Errorf("index file is corrupted at %d, size: %d", offset, size) } if err := truncateFreezerFile(t.index, offset); err != nil { - return err + return 0, err } log.Warn("Truncated index file", "offset", offset, "truncated", size-offset) - return nil + return offset, nil } ) for offset := int64(0); offset < size; offset += indexEntrySize { entry, err := read() if err != nil { - return err + return 0, err } if offset == 0 { head = entry @@ -468,10 +535,10 @@ func (t *freezerTable) repairIndex() error { // the seek operation anyway as a precaution. _, err = t.index.Seek(0, io.SeekEnd) if err != nil { - return err + return 0, err } log.Debug("Verified index file", "items", size/indexEntrySize, "elapsed", common.PrettyDuration(time.Since(start))) - return nil + return size, nil } // checkIndexItems validates the correctness of two consecutive index items based @@ -550,12 +617,23 @@ func (t *freezerTable) truncateHead(items uint64) error { // Truncate the index file first, the tail position is also considered // when calculating the new freezer table length. length := items - t.itemOffset.Load() - if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil { + newOffset := (length + 1) * indexEntrySize + if err := truncateFreezerFile(t.index, int64(newOffset)); err != nil { return err } if err := t.index.Sync(); err != nil { return err } + // If the index file is truncated beyond the flush offset, move the flush + // offset back to the new end of the file. A crash may occur before the + // offset is updated, leaving a dangling reference that points to a position + // outside the file. If so, the offset will be reset to the new end of the + // file during the next run. + if t.metadata.flushOffset > int64(newOffset) { + if err := t.metadata.setFlushOffset(int64(newOffset), true); err != nil { + return err + } + } // Calculate the new expected size of the data file and truncate it var expected indexEntry if length == 0 { @@ -652,7 +730,10 @@ func (t *freezerTable) truncateTail(items uint64) error { } // Update the virtual tail marker and hidden these entries in table. t.itemHidden.Store(items) - if err := writeMetadata(t.meta, newMetadata(items)); err != nil { + + // Update the virtual tail without fsync, otherwise it will significantly + // impact the overall performance. + if err := t.metadata.setVirtualTail(items, false); err != nil { return err } // Hidden items still fall in the current tail file, no data file @@ -664,6 +745,18 @@ func (t *freezerTable) truncateTail(items uint64) error { if t.tailId > newTailId { return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId) } + // Sync the table before performing the index tail truncation. A crash may + // occur after truncating the index file without updating the flush offset, + // leaving a dangling offset that points to a position outside the file. + // The offset will be rewound to the end of file during the next run + // automatically and implicitly assumes all the items within the file are + // complete. + // + // Therefore, forcibly flush everything above the offset to ensure this + // assumption is satisfied! + if err := t.doSync(); err != nil { + return err + } // Count how many items can be deleted from the file. var ( newDeleted = items @@ -681,11 +774,6 @@ func (t *freezerTable) truncateTail(items uint64) error { } newDeleted = current } - // Commit the changes of metadata file first before manipulating - // the indexes file. - if err := t.meta.Sync(); err != nil { - return err - } // Close the index file before shorten it. if err := t.index.Close(); err != nil { return err @@ -716,6 +804,21 @@ func (t *freezerTable) truncateTail(items uint64) error { t.itemOffset.Store(newDeleted) t.releaseFilesBefore(t.tailId, true) + // Move the index flush offset backward due to the deletion of an index segment. + // A crash may occur before the offset is updated, leaving a dangling reference + // that points to a position outside the file. If so, the offset will be reset + // to the new end of the file during the next run. + // + // Note, both the index and head data file has been persisted before performing + // tail truncation and all the items in these files are regarded as complete. + shorten := indexEntrySize * int64(newDeleted-deleted) + if t.metadata.flushOffset <= shorten { + return fmt.Errorf("invalid index flush offset: %d, shorten: %d", t.metadata.flushOffset, shorten) + } else { + if err := t.metadata.setFlushOffset(t.metadata.flushOffset-shorten, true); err != nil { + return err + } + } // Retrieve the new size and update the total size counter newSize, err := t.sizeNolock() if err != nil { @@ -725,40 +828,30 @@ func (t *freezerTable) truncateTail(items uint64) error { return nil } -// Close closes all opened files. +// Close closes all opened files and finalizes the freezer table for use. +// This operation must be completed before shutdown to prevent the loss of +// recent writes. func (t *freezerTable) Close() error { t.lock.Lock() defer t.lock.Unlock() + if err := t.doSync(); err != nil { + return err + } var errs []error - doClose := func(f *os.File, sync bool, close bool) { - if sync && !t.readonly { - if err := f.Sync(); err != nil { - errs = append(errs, err) - } - } - if close { - if err := f.Close(); err != nil { - errs = append(errs, err) - } + doClose := func(f *os.File) { + if err := f.Close(); err != nil { + errs = append(errs, err) } } - // Trying to fsync a file opened in rdonly causes "Access denied" - // error on Windows. - doClose(t.index, true, true) - doClose(t.meta, true, true) - - // The preopened non-head data-files are all opened in readonly. - // The head is opened in rw-mode, so we sync it here - but since it's also - // part of t.files, it will be closed in the loop below. - doClose(t.head, true, false) // sync but do not close - + doClose(t.index) + doClose(t.metadata.file) for _, f := range t.files { - doClose(f, false, true) // close but do not sync + doClose(f) } t.index = nil - t.meta = nil t.head = nil + t.metadata.file = nil if errs != nil { return fmt.Errorf("%v", errs) @@ -917,7 +1010,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i defer t.lock.RUnlock() // Ensure the table and the item are accessible - if t.index == nil || t.head == nil || t.meta == nil { + if t.index == nil || t.head == nil || t.metadata.file == nil { return nil, nil, errClosed } var ( @@ -1042,6 +1135,9 @@ func (t *freezerTable) advanceHead() error { t.lock.Lock() defer t.lock.Unlock() + if err := t.doSync(); err != nil { + return err + } // We open the next file in truncated mode -- if this file already // exists, we need to start over from scratch on it. nextID := t.headId + 1 @@ -1069,7 +1165,18 @@ func (t *freezerTable) advanceHead() error { func (t *freezerTable) Sync() error { t.lock.Lock() defer t.lock.Unlock() - if t.index == nil || t.head == nil || t.meta == nil { + + return t.doSync() +} + +// doSync is the internal version of Sync which assumes the lock is already held. +func (t *freezerTable) doSync() error { + // Trying to fsync a file opened in rdonly causes "Access denied" + // error on Windows. + if t.readonly { + return nil + } + if t.index == nil || t.head == nil || t.metadata.file == nil { return errClosed } var err error @@ -1078,10 +1185,18 @@ func (t *freezerTable) Sync() error { err = e } } - trackError(t.index.Sync()) - trackError(t.meta.Sync()) trackError(t.head.Sync()) + + // A crash may occur before the offset is updated, leaving the offset + // points to a old position. If so, the extra items above the offset + // will be truncated during the next run. + stat, err := t.index.Stat() + if err != nil { + return err + } + offset := stat.Size() + trackError(t.metadata.setFlushOffset(offset, true)) return err } @@ -1097,13 +1212,8 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string { } func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) { - meta, err := readMetadata(t.meta) - if err != nil { - fmt.Fprintf(w, "Failed to decode freezer table %v\n", err) - return - } - fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version, - t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load()) + fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", + t.metadata.version, t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load()) buf := make([]byte, indexEntrySize) diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go index fd6e3cf199..9a72af6ccc 100644 --- a/core/rawdb/freezer_table_test.go +++ b/core/rawdb/freezer_table_test.go @@ -262,18 +262,6 @@ func TestSnappyDetection(t *testing.T) { f.Close() } - // Open without snappy - { - f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false) - if err != nil { - t.Fatal(err) - } - if _, err = f.Retrieve(0); err == nil { - f.Close() - t.Fatalf("expected empty table") - } - } - // Open with snappy { f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) @@ -286,6 +274,18 @@ func TestSnappyDetection(t *testing.T) { t.Fatalf("expected no error, got %v", err) } } + + // Open without snappy + { + f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false) + if err != nil { + t.Fatal(err) + } + if _, err = f.Retrieve(0); err == nil { + f.Close() + t.Fatalf("expected empty table") + } + } } func assertFileSize(f string, size int64) error { @@ -521,93 +521,53 @@ func TestFreezerOffset(t *testing.T) { fname := fmt.Sprintf("offset-%d", rand.Uint64()) // Fill table - { - f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) - if err != nil { - t.Fatal(err) - } - - // Write 6 x 20 bytes, splitting out into three files - batch := f.newBatch() - require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) - require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) - - require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) - require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) - - require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) - require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) - require.NoError(t, batch.commit()) - - t.Log(f.dumpIndexString(0, 100)) - f.Close() + f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) + if err != nil { + t.Fatal(err) } + // Write 6 x 20 bytes, splitting out into three files + batch := f.newBatch() + require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) + require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) + + require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) + require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) + + require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) + require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) + require.NoError(t, batch.commit()) + + t.Log(f.dumpIndexString(0, 100)) + // Now crop it. - { - // delete files 0 and 1 - for i := 0; i < 2; i++ { - p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) - if err := os.Remove(p); err != nil { - t.Fatal(err) - } - } - // Read the index file - p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) - indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) - if err != nil { - t.Fatal(err) - } - indexBuf := make([]byte, 7*indexEntrySize) - indexFile.Read(indexBuf) - - // Update the index file, so that we store - // [ file = 2, offset = 4 ] at index zero - - zeroIndex := indexEntry{ - filenum: uint32(2), // First file is 2 - offset: uint32(4), // We have removed four items - } - buf := zeroIndex.append(nil) - - // Overwrite index zero - copy(indexBuf, buf) - - // Remove the four next indices by overwriting - copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) - indexFile.WriteAt(indexBuf, 0) - - // Need to truncate the moved index items - indexFile.Truncate(indexEntrySize * (1 + 2)) - indexFile.Close() - } + f.truncateTail(4) + f.Close() // Now open again - { - f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) - if err != nil { - t.Fatal(err) - } - defer f.Close() - t.Log(f.dumpIndexString(0, 100)) - - // It should allow writing item 6. - batch := f.newBatch() - require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) - require.NoError(t, batch.commit()) - - checkRetrieveError(t, f, map[uint64]error{ - 0: errOutOfBounds, - 1: errOutOfBounds, - 2: errOutOfBounds, - 3: errOutOfBounds, - }) - checkRetrieve(t, f, map[uint64][]byte{ - 4: getChunk(20, 0xbb), - 5: getChunk(20, 0xaa), - 6: getChunk(20, 0x99), - }) + f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) + if err != nil { + t.Fatal(err) } + t.Log(f.dumpIndexString(0, 100)) + + // It should allow writing item 6. + batch = f.newBatch() + require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) + require.NoError(t, batch.commit()) + + checkRetrieveError(t, f, map[uint64]error{ + 0: errOutOfBounds, + 1: errOutOfBounds, + 2: errOutOfBounds, + 3: errOutOfBounds, + }) + checkRetrieve(t, f, map[uint64][]byte{ + 4: getChunk(20, 0xbb), + 5: getChunk(20, 0xaa), + 6: getChunk(20, 0x99), + }) + f.Close() // Edit the index again, with a much larger initial offset of 1M. { @@ -1369,45 +1329,63 @@ func TestRandom(t *testing.T) { } func TestIndexValidation(t *testing.T) { - const ( - items = 30 - dataSize = 10 - ) + const dataSize = 10 + garbage := indexEntry{ filenum: 100, offset: 200, } var cases = []struct { - offset int64 - data []byte - expItems int + write int + offset int64 + data []byte + expItems int + hasCorruption bool }{ // extend index file with zero bytes at the end { - offset: (items + 1) * indexEntrySize, + write: 5, + offset: (5 + 1) * indexEntrySize, data: make([]byte, indexEntrySize), - expItems: 30, + expItems: 5, + }, + // extend index file with unaligned zero bytes at the end + { + write: 5, + offset: (5 + 1) * indexEntrySize, + data: make([]byte, indexEntrySize*1.5), + expItems: 5, }, // write garbage in the first non-head item { + write: 5, offset: indexEntrySize, data: garbage.append(nil), expItems: 0, }, - // write garbage in the first non-head item + // write garbage in the middle { - offset: (items/2 + 1) * indexEntrySize, + write: 5, + offset: 3 * indexEntrySize, data: garbage.append(nil), - expItems: items / 2, + expItems: 2, + }, + // fulfill the first data file (but not yet advanced), the zero bytes + // at tail should be truncated. + { + write: 10, + offset: 11 * indexEntrySize, + data: garbage.append(nil), + expItems: 10, }, } for _, c := range cases { fn := fmt.Sprintf("t-%d", rand.Uint64()) - f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 100, true, false) + f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 10*dataSize, true, false) if err != nil { t.Fatal(err) } - writeChunks(t, f, items, dataSize) + writeChunks(t, f, c.write, dataSize) // write corrupted data f.index.WriteAt(c.data, c.offset) @@ -1421,10 +1399,10 @@ func TestIndexValidation(t *testing.T) { for i := 0; i < c.expItems; i++ { exp := getChunk(10, i) got, err := f.Retrieve(uint64(i)) - if err != nil { + if err != nil && !c.hasCorruption { t.Fatalf("Failed to read from table, %v", err) } - if !bytes.Equal(exp, got) { + if !bytes.Equal(exp, got) && !c.hasCorruption { t.Fatalf("Unexpected item data, want: %v, got: %v", exp, got) } } @@ -1433,3 +1411,163 @@ func TestIndexValidation(t *testing.T) { } } } + +// TestFlushOffsetTracking tests the flush offset tracking. The offset moving +// in the test is mostly triggered by the advanceHead (new data file) and +// heda/tail truncation. +func TestFlushOffsetTracking(t *testing.T) { + const ( + items = 35 + dataSize = 10 + fileSize = 100 + ) + fn := fmt.Sprintf("t-%d", rand.Uint64()) + f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false) + if err != nil { + t.Fatal(err) + } + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full) + writeChunks(t, f, items, dataSize) + + var cases = []struct { + op func(*freezerTable) + offset int64 + }{ + { + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full) + func(f *freezerTable) {}, // no-op + 31 * indexEntrySize, + }, + { + // Write more items to fulfill the newest data file, but the file advance + // is not triggered. + + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items, full) + func(f *freezerTable) { + batch := f.newBatch() + for i := 0; i < 5; i++ { + batch.AppendRaw(items+uint64(i), make([]byte, dataSize)) + } + batch.commit() + }, + 31 * indexEntrySize, + }, + { + // Write more items to trigger the data file advance + + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(1 item) + func(f *freezerTable) { + batch := f.newBatch() + batch.AppendRaw(items+5, make([]byte, dataSize)) + batch.commit() + }, + 41 * indexEntrySize, + }, + { + // Head truncate + + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item) + func(f *freezerTable) { + f.truncateHead(items + 5) + }, + 41 * indexEntrySize, + }, + { + // Tail truncate + + // Data files: + // F1(1 hidden, 9 visible) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item) + func(f *freezerTable) { + f.truncateTail(1) + }, + 41 * indexEntrySize, + }, + { + // Tail truncate + + // Data files: + // F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item) + func(f *freezerTable) { + f.truncateTail(10) + }, + 31 * indexEntrySize, + }, + { + // Tail truncate + + // Data files: + // F4(10 items) -> F5(0 item) + func(f *freezerTable) { + f.truncateTail(30) + }, + 11 * indexEntrySize, + }, + { + // Head truncate + + // Data files: + // F4(9 items) + func(f *freezerTable) { + f.truncateHead(items + 4) + }, + 10 * indexEntrySize, + }, + } + for _, c := range cases { + c.op(f) + if f.metadata.flushOffset != c.offset { + t.Fatalf("Unexpected index flush offset, want: %d, got: %d", c.offset, f.metadata.flushOffset) + } + } +} + +func TestTailTruncationCrash(t *testing.T) { + const ( + items = 35 + dataSize = 10 + fileSize = 100 + ) + fn := fmt.Sprintf("t-%d", rand.Uint64()) + f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false) + if err != nil { + t.Fatal(err) + } + // Data files: + // F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full) + writeChunks(t, f, items, dataSize) + + // The latest 5 items are not persisted yet + if f.metadata.flushOffset != 31*indexEntrySize { + t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset) + } + + f.truncateTail(5) + if f.metadata.flushOffset != 31*indexEntrySize { + t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset) + } + + // Truncate the first 10 items which results in the first data file + // being removed. The offset should be moved to 26*indexEntrySize. + f.truncateTail(10) + if f.metadata.flushOffset != 26*indexEntrySize { + t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset) + } + + // Write the offset back to 31*indexEntrySize to simulate a crash + // which occurs after truncating the index file without updating + // the offset + f.metadata.setFlushOffset(31*indexEntrySize, true) + + f, err = newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false) + if err != nil { + t.Fatal(err) + } + if f.metadata.flushOffset != 26*indexEntrySize { + t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset) + } +} From 665c8512f388bd79053dce81b3920f65eb617289 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 4 Feb 2025 14:22:30 +0100 Subject: [PATCH 02/17] core: copy genesis before modifying (#31097) This PR fixes a data race in SetupGenesisWithOverride. --- core/genesis.go | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index b579e33c77..d1ee1b6e92 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -75,6 +75,19 @@ type Genesis struct { BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 } +// copy copies the genesis. +func (g *Genesis) copy() *Genesis { + if g != nil { + cpy := *g + if g.Config != nil { + conf := *g.Config + cpy.Config = &conf + } + return &cpy + } + return nil +} + func ReadGenesis(db ethdb.Database) (*Genesis, error) { var genesis Genesis stored := rawdb.ReadCanonicalHash(db, 0) @@ -248,21 +261,17 @@ type ChainOverrides struct { } // apply applies the chain overrides on the supplied chain config. -func (o *ChainOverrides) apply(cfg *params.ChainConfig) (*params.ChainConfig, error) { +func (o *ChainOverrides) apply(cfg *params.ChainConfig) error { if o == nil || cfg == nil { - return cfg, nil + return nil } - cpy := *cfg if o.OverrideCancun != nil { - cpy.CancunTime = o.OverrideCancun + cfg.CancunTime = o.OverrideCancun } if o.OverrideVerkle != nil { - cpy.VerkleTime = o.OverrideVerkle + cfg.VerkleTime = o.OverrideVerkle } - if err := cpy.CheckConfigForkOrder(); err != nil { - return nil, err - } - return &cpy, nil + return cfg.CheckConfigForkOrder() } // SetupGenesisBlock writes or updates the genesis block in db. @@ -281,6 +290,8 @@ func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Gene } func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { + // Copy the genesis, so we can operate on a copy. + genesis = genesis.copy() // Sanitize the supplied genesis, ensuring it has the associated chain // config attached. if genesis != nil && genesis.Config == nil { @@ -295,17 +306,15 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g } else { log.Info("Writing custom genesis block") } - chainCfg, err := overrides.apply(genesis.Config) - if err != nil { + if err := overrides.apply(genesis.Config); err != nil { return nil, common.Hash{}, nil, err } - genesis.Config = chainCfg block, err := genesis.Commit(db, triedb) if err != nil { return nil, common.Hash{}, nil, err } - return chainCfg, block.Hash(), nil, nil + return genesis.Config, block.Hash(), nil, nil } // Commit the genesis if the genesis block exists in the ancient database // but the key-value database is empty without initializing the genesis @@ -322,11 +331,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g } else { log.Info("Writing custom genesis block") } - chainCfg, err := overrides.apply(genesis.Config) - if err != nil { + if err := overrides.apply(genesis.Config); err != nil { return nil, common.Hash{}, nil, err } - genesis.Config = chainCfg if hash := genesis.ToBlock().Hash(); hash != ghash { return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash} @@ -335,17 +342,15 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g if err != nil { return nil, common.Hash{}, nil, err } - return chainCfg, block.Hash(), nil, nil + return genesis.Config, block.Hash(), nil, nil } // The genesis block has already been committed previously. Verify that the // provided genesis with chain overrides matches the existing one, and update // the stored chain config if necessary. if genesis != nil { - chainCfg, err := overrides.apply(genesis.Config) - if err != nil { + if err := overrides.apply(genesis.Config); err != nil { return nil, common.Hash{}, nil, err } - genesis.Config = chainCfg if hash := genesis.ToBlock().Hash(); hash != ghash { return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash} From eee868226a3d4504994fa176ea7c866b5d8f9d18 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 4 Feb 2025 15:29:51 +0100 Subject: [PATCH 03/17] params: start osaka fork (#31125) This PR defines the Osaka fork. An easy first step to start our work on the next hardfork (This is needed for EOF testing as well) --------- Co-authored-by: lightclient <14004106+lightclient@users.noreply.github.com> --- core/genesis_test.go | 1 + core/vm/jump_table_export.go | 4 +++- eth/tracers/api.go | 4 ++++ params/config.go | 23 ++++++++++++++++++- params/forks/forks.go | 1 + tests/init.go | 44 ++++++++++++++++++++++++++++++++++++ 6 files changed, 75 insertions(+), 2 deletions(-) diff --git a/core/genesis_test.go b/core/genesis_test.go index 4e7662d14c..714dc4d6b3 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -272,6 +272,7 @@ func TestVerkleGenesisCommit(t *testing.T) { ShanghaiTime: &verkleTime, CancunTime: &verkleTime, PragueTime: &verkleTime, + OsakaTime: &verkleTime, VerkleTime: &verkleTime, TerminalTotalDifficulty: big.NewInt(0), EnableVerkleAtGenesis: true, diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index b74109da0a..b8fa6049bb 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -28,8 +28,10 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { case rules.IsVerkle: return newCancunInstructionSet(), errors.New("verkle-fork not defined yet") + case rules.IsOsaka: + return newPragueInstructionSet(), errors.New("osaka-fork not defined yet") case rules.IsPrague: - return newCancunInstructionSet(), errors.New("prague-fork not defined yet") + return newPragueInstructionSet(), nil case rules.IsCancun: return newCancunInstructionSet(), nil case rules.IsShanghai: diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 22163030de..d13aee555f 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1088,6 +1088,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.PragueTime = timestamp canon = false } + if timestamp := override.OsakaTime; timestamp != nil { + copy.OsakaTime = timestamp + canon = false + } if timestamp := override.VerkleTime; timestamp != nil { copy.VerkleTime = timestamp canon = false diff --git a/params/config.go b/params/config.go index f1e139608c..9269cb2310 100644 --- a/params/config.go +++ b/params/config.go @@ -133,6 +133,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + OsakaTime: nil, VerkleTime: nil, Ethash: new(EthashConfig), Clique: nil, @@ -182,6 +183,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + OsakaTime: nil, VerkleTime: nil, TerminalTotalDifficulty: big.NewInt(math.MaxInt64), Ethash: nil, @@ -211,6 +213,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + OsakaTime: nil, VerkleTime: nil, TerminalTotalDifficulty: big.NewInt(math.MaxInt64), Ethash: new(EthashConfig), @@ -240,6 +243,7 @@ var ( ShanghaiTime: newUint64(0), CancunTime: newUint64(0), PragueTime: newUint64(0), + OsakaTime: nil, VerkleTime: nil, TerminalTotalDifficulty: big.NewInt(0), Ethash: new(EthashConfig), @@ -269,6 +273,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + OsakaTime: nil, VerkleTime: nil, TerminalTotalDifficulty: big.NewInt(math.MaxInt64), Ethash: new(EthashConfig), @@ -318,6 +323,7 @@ type ChainConfig struct { ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) + OsakaTime *uint64 `json:"osakaTime,omitempty"` // Osaka switch time (nil = no fork, 0 = already on osaka) VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) // TerminalTotalDifficulty is the amount of total difficulty reached by @@ -432,6 +438,9 @@ func (c *ChainConfig) Description() string { if c.PragueTime != nil { banner += fmt.Sprintf(" - Prague: @%-10v\n", *c.PragueTime) } + if c.OsakaTime != nil { + banner += fmt.Sprintf(" - Osaka: @%-10v\n", *c.OsakaTime) + } if c.VerkleTime != nil { banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) } @@ -533,6 +542,11 @@ func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } +// IsOsaka returns whether time is either equal to the Osaka fork time or greater. +func (c *ChainConfig) IsOsaka(num *big.Int, time uint64) bool { + return c.IsLondon(num) && isTimestampForked(c.OsakaTime, time) +} + // IsVerkle returns whether time is either equal to the Verkle fork time or greater. func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) @@ -611,6 +625,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "shanghaiTime", timestamp: c.ShanghaiTime}, {name: "cancunTime", timestamp: c.CancunTime, optional: true}, {name: "pragueTime", timestamp: c.PragueTime, optional: true}, + {name: "osakaTime", timestamp: c.OsakaTime, optional: true}, {name: "verkleTime", timestamp: c.VerkleTime, optional: true}, } { if lastFork.name != "" { @@ -715,6 +730,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int, if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) { return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime) } + if isForkTimestampIncompatible(c.OsakaTime, newcfg.OsakaTime, headTimestamp) { + return newTimestampCompatError("Osaka fork timestamp", c.OsakaTime, newcfg.OsakaTime) + } if isForkTimestampIncompatible(c.VerkleTime, newcfg.VerkleTime, headTimestamp) { return newTimestampCompatError("Verkle fork timestamp", c.VerkleTime, newcfg.VerkleTime) } @@ -737,6 +755,8 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork { london := c.LondonBlock switch { + case c.IsOsaka(london, time): + return forks.Osaka case c.IsPrague(london, time): return forks.Prague case c.IsCancun(london, time): @@ -888,7 +908,7 @@ type Rules struct { IsEIP2929, IsEIP4762 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool - IsMerge, IsShanghai, IsCancun, IsPrague bool + IsMerge, IsShanghai, IsCancun, IsPrague, IsOsaka bool IsVerkle bool } @@ -918,6 +938,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsShanghai: isMerge && c.IsShanghai(num, timestamp), IsCancun: isMerge && c.IsCancun(num, timestamp), IsPrague: isMerge && c.IsPrague(num, timestamp), + IsOsaka: isMerge && c.IsOsaka(num, timestamp), IsVerkle: isVerkle, IsEIP4762: isVerkle, } diff --git a/params/forks/forks.go b/params/forks/forks.go index 4f50ff5aed..2d44e13b04 100644 --- a/params/forks/forks.go +++ b/params/forks/forks.go @@ -39,4 +39,5 @@ const ( Shanghai Cancun Prague + Osaka ) diff --git a/tests/init.go b/tests/init.go index 4bb83f9300..8429f38e44 100644 --- a/tests/init.go +++ b/tests/init.go @@ -396,6 +396,50 @@ var Forks = map[string]*params.ChainConfig{ PragueTime: u64(15_000), DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, }, + "Osaka": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(0), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + }, + "PragueToOsakaAtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + PragueTime: u64(0), + OsakaTime: u64(15_000), + DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + }, } // AvailableForks returns the set of defined fork names From e6f3ce7b168b8f346de621a8f60d2fa57c2ebfb0 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 4 Feb 2025 07:43:18 -0700 Subject: [PATCH 04/17] params,core: add max and target value to chain config (#31002) Implements [EIP-7840](https://github.com/ethereum/EIPs/pull/9129) and [EIP-7691](https://github.com/ethereum/EIPs/blob/d96625a4dcbbe2572fa006f062bd02b4582eefd5/EIPS/eip-7691.md). --------- Co-authored-by: Marius van der Wijden Co-authored-by: Felix Lange --- cmd/devp2p/internal/ethtest/suite.go | 2 +- .../internal/ethtest/testdata/genesis.json | 11 +- cmd/evm/internal/t8ntool/execution.go | 22 ++- consensus/beacon/consensus.go | 2 +- consensus/misc/eip4844/eip4844.go | 110 +++++++++--- consensus/misc/eip4844/eip4844_test.go | 38 +++-- core/blockchain.go | 5 +- core/chain_makers.go | 18 +- core/chain_makers_test.go | 6 +- core/evm.go | 6 +- core/genesis_test.go | 5 + core/rawdb/accessors_chain.go | 2 +- core/state_processor_test.go | 27 +-- core/txpool/blobpool/blobpool.go | 22 +-- core/txpool/blobpool/blobpool_test.go | 158 +++++++++++++++++- core/txpool/blobpool/evictheap_test.go | 34 ++-- core/txpool/blobpool/limbo.go | 4 +- core/txpool/blobpool/priority_test.go | 6 +- core/txpool/blobpool/slotter.go | 4 +- core/txpool/blobpool/slotter_test.go | 2 +- core/txpool/validation.go | 6 +- core/verkle_witness_test.go | 8 +- core/vm/runtime/runtime_test.go | 4 + eth/api_backend.go | 2 +- eth/catalyst/api_test.go | 3 + eth/fetcher/tx_fetcher_test.go | 6 +- eth/gasprice/feehistory.go | 10 +- eth/gasprice/gasprice_test.go | 1 + eth/tracers/internal/tracetest/supply_test.go | 1 + .../testdata/call_tracer/blob_tx.json | 9 +- .../testdata/prestate_tracer/blob_tx.json | 13 +- .../testdata/prestate_tracer/setcode_tx.json | 14 +- eth/tracers/internal/tracetest/util.go | 6 +- eth/tracers/live/supply.go | 26 +-- ethclient/ethclient_test.go | 9 +- internal/ethapi/api.go | 5 + internal/ethapi/simulate.go | 8 +- internal/ethapi/transaction_args.go | 22 +-- internal/ethapi/transaction_args_test.go | 1 + miner/worker.go | 29 ++-- params/config.go | 96 ++++++++++- params/protocol_params.go | 4 - tests/init.go | 14 ++ tests/state_test.go | 2 +- tests/state_test_util.go | 34 +++- 45 files changed, 613 insertions(+), 204 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 12dc5711ac..2f1731b60c 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -743,7 +743,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra GasTipCap: uint256.NewInt(1), GasFeeCap: uint256.MustFromBig(s.chain.Head().BaseFee()), Gas: 100000, - BlobFeeCap: uint256.MustFromBig(eip4844.CalcBlobFee(*s.chain.Head().ExcessBlobGas())), + BlobFeeCap: uint256.MustFromBig(eip4844.CalcBlobFee(s.chain.config, s.chain.Head().Header())), BlobHashes: makeSidecar(blobdata...).BlobHashes(), Sidecar: makeSidecar(blobdata...), } diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json index 4cfebdcac1..ace2f78815 100644 --- a/cmd/devp2p/internal/ethtest/testdata/genesis.json +++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json @@ -18,7 +18,14 @@ "shanghaiTime": 780, "cancunTime": 840, "terminalTotalDifficulty": 9454784, - "ethash": {} + "ethash": {}, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + } + } }, "nonce": "0x0", "timestamp": "0x0", @@ -108,4 +115,4 @@ "baseFeePerGas": null, "excessBlobGas": null, "blobGasUsed": null -} \ No newline at end of file +} diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 9ff5a05290..9332f4901b 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -178,15 +178,28 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, var excessBlobGas uint64 if pre.Env.ExcessBlobGas != nil { excessBlobGas = *pre.Env.ExcessBlobGas - vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) + header := &types.Header{ + Time: pre.Env.Timestamp, + ExcessBlobGas: pre.Env.ExcessBlobGas, + } + vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header) } else { // If it is not explicitly defined, but we have the parent values, we try // to calculate it ourselves. parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentBlobGasUsed := pre.Env.ParentBlobGasUsed if parentExcessBlobGas != nil && parentBlobGasUsed != nil { - excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) - vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) + parent := &types.Header{ + Time: pre.Env.ParentTimestamp, + ExcessBlobGas: pre.Env.ParentExcessBlobGas, + BlobGasUsed: pre.Env.ParentBlobGasUsed, + } + excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent) + header := &types.Header{ + Time: pre.Env.Timestamp, + ExcessBlobGas: &excessBlobGas, + } + vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header) } } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's @@ -229,7 +242,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txBlobGas := uint64(0) if tx.Type() == types.BlobTxType { txBlobGas = uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) - if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max { + max := eip4844.MaxBlobGasPerBlock(chainConfig, pre.Env.Timestamp) + if used := blobGasUsed + txBlobGas; used > max { err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max) log.Warn("rejected tx", "index", i, "err", err) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 20df6d0091..bdfddfff06 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -282,7 +282,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa if header.ParentBeaconRoot == nil { return errors.New("header is missing beaconRoot") } - if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { + if err := eip4844.VerifyEIP4844Header(chain.Config(), parent, header); err != nil { return err } } diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index 2dad9a0cd3..1d76b21b30 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -23,17 +23,17 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/forks" ) var ( - minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) - blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction) + minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) ) // VerifyEIP4844Header verifies the presence of the excessBlobGas field and that // if the current block contains no transactions, the excessBlobGas is updated // accordingly. -func VerifyEIP4844Header(parent, header *types.Header) error { +func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Header) error { // Verify the header is not malformed if header.ExcessBlobGas == nil { return errors.New("header is missing excessBlobGas") @@ -42,14 +42,26 @@ func VerifyEIP4844Header(parent, header *types.Header) error { return errors.New("header is missing blobGasUsed") } // Verify that the blob gas used remains within reasonable limits. - if *header.BlobGasUsed > params.MaxBlobGasPerBlock { - return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.MaxBlobGasPerBlock) + maxBlobGas := MaxBlobGasPerBlock(config, header.Time) + if *header.BlobGasUsed > maxBlobGas { + return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, maxBlobGas) } if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) } // Verify the excessBlobGas is correct based on the parent header + expectedExcessBlobGas := CalcExcessBlobGas(config, parent) + if *header.ExcessBlobGas != expectedExcessBlobGas { + return fmt.Errorf("invalid excessBlobGas: have %d, want %d", *header.ExcessBlobGas, expectedExcessBlobGas) + } + return nil +} + +// CalcExcessBlobGas calculates the excess blob gas after applying the set of +// blobs on top of the excess blob gas. +func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header) uint64 { var ( + targetGas = uint64(targetBlobsPerBlock(config, parent.Time)) * params.BlobTxBlobGasPerBlob parentExcessBlobGas uint64 parentBlobGasUsed uint64 ) @@ -57,27 +69,85 @@ func VerifyEIP4844Header(parent, header *types.Header) error { parentExcessBlobGas = *parent.ExcessBlobGas parentBlobGasUsed = *parent.BlobGasUsed } - expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) - if *header.ExcessBlobGas != expectedExcessBlobGas { - return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d", - *header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed) - } - return nil -} - -// CalcExcessBlobGas calculates the excess blob gas after applying the set of -// blobs on top of the excess blob gas. -func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 { excessBlobGas := parentExcessBlobGas + parentBlobGasUsed - if excessBlobGas < params.BlobTxTargetBlobGasPerBlock { + if excessBlobGas < targetGas { return 0 } - return excessBlobGas - params.BlobTxTargetBlobGasPerBlock + return excessBlobGas - targetGas } // CalcBlobFee calculates the blobfee from the header's excess blob gas field. -func CalcBlobFee(excessBlobGas uint64) *big.Int { - return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction) +func CalcBlobFee(config *params.ChainConfig, header *types.Header) *big.Int { + var frac uint64 + switch config.LatestFork(header.Time) { + case forks.Prague: + frac = config.BlobScheduleConfig.Prague.UpdateFraction + case forks.Cancun: + frac = config.BlobScheduleConfig.Cancun.UpdateFraction + default: + panic("calculating blob fee on unsupported fork") + } + return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(*header.ExcessBlobGas), new(big.Int).SetUint64(frac)) +} + +// MaxBlobsPerBlock returns the max blobs per block for a block at the given timestamp. +func MaxBlobsPerBlock(cfg *params.ChainConfig, time uint64) int { + if cfg.BlobScheduleConfig == nil { + return 0 + } + var ( + london = cfg.LondonBlock + s = cfg.BlobScheduleConfig + ) + switch { + case cfg.IsPrague(london, time) && s.Prague != nil: + return s.Prague.Max + case cfg.IsCancun(london, time) && s.Cancun != nil: + return s.Cancun.Max + default: + return 0 + } +} + +// MaxBlobsPerBlock returns the maximum blob gas that can be spent in a block at the given timestamp. +func MaxBlobGasPerBlock(cfg *params.ChainConfig, time uint64) uint64 { + return uint64(MaxBlobsPerBlock(cfg, time)) * params.BlobTxBlobGasPerBlob +} + +// LatestMaxBlobsPerBlock returns the latest max blobs per block defined by the +// configuration, regardless of the currently active fork. +func LatestMaxBlobsPerBlock(cfg *params.ChainConfig) int { + s := cfg.BlobScheduleConfig + if s == nil { + return 0 + } + switch { + case s.Prague != nil: + return s.Prague.Max + case s.Cancun != nil: + return s.Cancun.Max + default: + return 0 + } +} + +// targetBlobsPerBlock returns the target number of blobs in a block at the given timestamp. +func targetBlobsPerBlock(cfg *params.ChainConfig, time uint64) int { + if cfg.BlobScheduleConfig == nil { + return 0 + } + var ( + london = cfg.LondonBlock + s = cfg.BlobScheduleConfig + ) + switch { + case cfg.IsPrague(london, time) && s.Prague != nil: + return s.Prague.Target + case cfg.IsCancun(london, time) && s.Cancun != nil: + return s.Cancun.Target + default: + return 0 + } } // fakeExponential approximates factor * e ** (numerator / denominator) using diff --git a/consensus/misc/eip4844/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go index ec417380fc..839ea8df22 100644 --- a/consensus/misc/eip4844/eip4844_test.go +++ b/consensus/misc/eip4844/eip4844_test.go @@ -21,36 +21,48 @@ import ( "math/big" "testing" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" ) func TestCalcExcessBlobGas(t *testing.T) { + var ( + config = params.MainnetChainConfig + targetBlobs = targetBlobsPerBlock(config, *config.CancunTime) + targetBlobGas = uint64(targetBlobs) * params.BlobTxBlobGasPerBlob + ) var tests = []struct { excess uint64 - blobs uint64 + blobs int want uint64 }{ // The excess blob gas should not increase from zero if the used blob // slots are below - or equal - to the target. {0, 0, 0}, {0, 1, 0}, - {0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0}, + {0, targetBlobs, 0}, // If the target blob gas is exceeded, the excessBlobGas should increase // by however much it was overshot - {0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob}, - {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1}, - {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1}, + {0, targetBlobs + 1, params.BlobTxBlobGasPerBlob}, + {1, targetBlobs + 1, params.BlobTxBlobGasPerBlob + 1}, + {1, targetBlobs + 2, 2*params.BlobTxBlobGasPerBlob + 1}, // The excess blob gas should decrease by however much the target was // under-shot, capped at zero. - {params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock}, - {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxTargetBlobGasPerBlock - params.BlobTxBlobGasPerBlob}, - {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, params.BlobTxTargetBlobGasPerBlock - (2 * params.BlobTxBlobGasPerBlob)}, - {params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0}, + {targetBlobGas, targetBlobs, targetBlobGas}, + {targetBlobGas, targetBlobs - 1, targetBlobGas - params.BlobTxBlobGasPerBlob}, + {targetBlobGas, targetBlobs - 2, targetBlobGas - (2 * params.BlobTxBlobGasPerBlob)}, + {params.BlobTxBlobGasPerBlob - 1, targetBlobs - 1, 0}, } for i, tt := range tests { - result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob) + blobGasUsed := uint64(tt.blobs) * params.BlobTxBlobGasPerBlob + parent := &types.Header{ + Time: *config.CancunTime, + ExcessBlobGas: &tt.excess, + BlobGasUsed: &blobGasUsed, + } + result := CalcExcessBlobGas(config, parent) if result != tt.want { t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want) } @@ -58,6 +70,8 @@ func TestCalcExcessBlobGas(t *testing.T) { } func TestCalcBlobFee(t *testing.T) { + zero := uint64(0) + tests := []struct { excessBlobGas uint64 blobfee int64 @@ -68,7 +82,9 @@ func TestCalcBlobFee(t *testing.T) { {10 * 1024 * 1024, 23}, } for i, tt := range tests { - have := CalcBlobFee(tt.excessBlobGas) + config := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), CancunTime: &zero, BlobScheduleConfig: params.DefaultBlobSchedule} + header := &types.Header{ExcessBlobGas: &tt.excessBlobGas} + have := CalcBlobFee(config, header) if have.Int64() != tt.blobfee { t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee) } diff --git a/core/blockchain.go b/core/blockchain.go index f097dc9781..b98c2d43aa 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2139,9 +2139,8 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co // processing of a block. These logs are later announced as deleted or reborn. func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { var blobGasPrice *big.Int - excessBlobGas := b.ExcessBlobGas() - if excessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) + if b.ExcessBlobGas() != nil { + blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header()) } receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil { diff --git a/core/chain_makers.go b/core/chain_makers.go index 1f48f9d128..19f433d7b0 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -143,7 +143,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti // instruction will panic during execution if it attempts to access a block number outside // of the range created by GenerateChain. func (b *BlockGen) AddTx(tx *types.Transaction) { - b.addTx(nil, vm.Config{}, tx) + // Wrap the chain config in an empty BlockChain object to satisfy ChainContext. + bc := &BlockChain{chainConfig: b.cm.config} + b.addTx(bc, vm.Config{}, tx) } // AddTxWithChain adds a transaction to the generated block. If no coinbase has @@ -445,7 +447,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } var blobGasPrice *big.Int if block.ExcessBlobGas() != nil { - blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) + blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header()) } if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { panic(err) @@ -548,7 +550,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine } var blobGasPrice *big.Int if block.ExcessBlobGas() != nil { - blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) + blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header()) } if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { panic(err) @@ -598,15 +600,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi } } if cm.config.IsCancun(header.Number, header.Time) { - var ( - parentExcessBlobGas uint64 - parentBlobGasUsed uint64 - ) - if parent.ExcessBlobGas() != nil { - parentExcessBlobGas = *parent.ExcessBlobGas() - parentBlobGasUsed = *parent.BlobGasUsed() - } - excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) + excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parent.Header()) header.ExcessBlobGas = &excessBlobGas header.BlobGasUsed = new(uint64) header.ParentBeaconRoot = new(common.Hash) diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index cb1087b4b3..d81a52e915 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -42,7 +42,7 @@ func TestGeneratePOSChain(t *testing.T) { aa = common.Address{0xaa} bb = common.Address{0xbb} funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether)) - config = *params.AllEthashProtocolChanges + config = *params.MergedTestChainConfig gspec = &Genesis{ Config: &config, Alloc: types.GenesisAlloc{ @@ -57,10 +57,6 @@ func TestGeneratePOSChain(t *testing.T) { db = rawdb.NewMemoryDatabase() ) - config.TerminalTotalDifficulty = common.Big0 - config.ShanghaiTime = u64(0) - config.CancunTime = u64(0) - // init 0xaa with some storage elements storage := make(map[common.Hash]common.Hash) storage[common.Hash{0x00}] = common.Hash{0x00} diff --git a/core/evm.go b/core/evm.go index 5d3c454d7c..41b4e6ac58 100644 --- a/core/evm.go +++ b/core/evm.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) @@ -36,6 +37,9 @@ type ChainContext interface { // GetHeader returns the header corresponding to the hash/number argument pair. GetHeader(common.Hash, uint64) *types.Header + + // Config returns the chain's configuration. + Config() *params.ChainConfig } // NewEVMBlockContext creates a new context for use in the EVM. @@ -57,7 +61,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common baseFee = new(big.Int).Set(header.BaseFee) } if header.ExcessBlobGas != nil { - blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) + blobBaseFee = eip4844.CalcBlobFee(chain.Config(), header) } if header.Difficulty.Sign() == 0 { random = &header.MixDigest diff --git a/core/genesis_test.go b/core/genesis_test.go index 714dc4d6b3..3e120a9307 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -278,6 +278,11 @@ func TestVerkleGenesisCommit(t *testing.T) { EnableVerkleAtGenesis: true, Ethash: nil, Clique: nil, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + Verkle: params.DefaultPragueBlobConfig, + }, } genesis := &Genesis{ diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index a586046d5f..1a2b31b360 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -587,7 +587,7 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, // Compute effective blob gas price. var blobGasPrice *big.Int if header != nil && header.ExcessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas) + blobGasPrice = eip4844.CalcBlobFee(config, header) } if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil { log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 750f3708dc..7d62e90ad4 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -46,25 +46,7 @@ func u64(val uint64) *uint64 { return &val } // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { var ( - config = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - TerminalTotalDifficulty: big.NewInt(0), - ShanghaiTime: new(uint64), - CancunTime: new(uint64), - PragueTime: new(uint64), - } + config = params.MergedTestChainConfig signer = types.LatestSigner(config) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020") @@ -425,12 +407,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } header.Root = common.BytesToHash(hasher.Sum(nil)) if config.IsCancun(header.Number, header.Time) { - var pExcess, pUsed = uint64(0), uint64(0) - if parent.ExcessBlobGas() != nil { - pExcess = *parent.ExcessBlobGas() - pUsed = *parent.BlobGasUsed() - } - excess := eip4844.CalcExcessBlobGas(pExcess, pUsed) + excess := eip4844.CalcExcessBlobGas(config, parent.Header()) used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) header.ExcessBlobGas = &excess header.BlobGasUsed = &used diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 4ab14bbcc0..78cf2c05af 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -51,11 +51,6 @@ const ( // transaction. There can be multiple of these embedded into a single tx. blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement - // maxBlobsPerTransaction is the maximum number of blobs a single transaction - // is allowed to contain. Whilst the spec states it's unlimited, the block - // data slots are protocol bound, which implicitly also limit this. - maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob - // txAvgSize is an approximate byte size of a transaction metadata to avoid // tiny overflows causing all txs to move a shelf higher, wasting disk space. txAvgSize = 4 * 1024 @@ -223,6 +218,11 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { // very relaxed ones can be included even if the fees go up, when the closer // ones could already be invalid. // +// - Because the maximum number of blobs allowed in a block can change per +// fork, the pool is designed to handle the maximum number of blobs allowed +// in the chain's latest defined fork -- even if it isn't active. This +// avoids needing to upgrade the database around the fork boundary. +// // When the pool eventually reaches saturation, some old transactions - that may // never execute - will need to be evicted in favor of newer ones. The eviction // strategy is quite complex: @@ -387,7 +387,8 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.Addres fails = append(fails, id) } } - store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, newSlotter(), index) + slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index) if err != nil { return err } @@ -414,13 +415,13 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.Addres blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice) ) if p.head.ExcessBlobGas != nil { - blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas)) + blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), p.head)) } p.evict = newPriceHeap(basefee, blobfee, p.index) // Pool initialized, attach the blob limbo to it to track blobs included // recently but not yet finalized - p.limbo, err = newLimbo(limbodir) + p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) if err != nil { p.Close() return err @@ -834,7 +835,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) ) if newHead.ExcessBlobGas != nil { - blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas)) + blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), newHead)) } p.evict.reinit(basefee, blobfee, false) @@ -1598,7 +1599,8 @@ func (p *BlobPool) updateStorageMetrics() { metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) - if shelf.SlotSize/blobSize > maxBlobsPerTransaction { + maxBlobs := eip4844.LatestMaxBlobsPerBlock(p.chain.Config()) + if shelf.SlotSize/blobSize > uint32(maxBlobs) { oversizedDataused += slotDataused oversizedDatagaps += slotDatagaps oversizedSlotused += shelf.FilledSlots diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 3d90ec4412..0187e8d9a0 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -51,8 +51,10 @@ var ( testBlobVHashes [][32]byte ) +const testMaxBlobsPerBlock = 6 + func init() { - for i := 0; i < 10; i++ { + for i := 0; i < 24; i++ { testBlob := &kzg4844.Blob{byte(i)} testBlobs = append(testBlobs, testBlob) @@ -121,7 +123,12 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { mid := new(big.Int).Add(lo, hi) mid.Div(mid, big.NewInt(2)) - if eip4844.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 { + tmp := mid.Uint64() + if eip4844.CalcBlobFee(bc.Config(), &types.Header{ + Number: blockNumber, + Time: blockTime, + ExcessBlobGas: &tmp, + }).Cmp(bc.blobfee.ToBig()) > 0 { hi = mid } else { lo = mid @@ -194,10 +201,43 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) } +// makeMultiBlobTx is a utility method to construct a ramdom blob tx with +// certain number of blobs in its sidecar. +func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, key *ecdsa.PrivateKey) *types.Transaction { + var ( + blobs []kzg4844.Blob + blobHashes []common.Hash + commitments []kzg4844.Commitment + proofs []kzg4844.Proof + ) + for i := 0; i < blobCount; i++ { + blobs = append(blobs, *testBlobs[i]) + commitments = append(commitments, testBlobCommits[i]) + proofs = append(proofs, testBlobProofs[i]) + blobHashes = append(blobHashes, testBlobVHashes[i]) + } + blobtx := &types.BlobTx{ + ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID), + Nonce: nonce, + GasTipCap: uint256.NewInt(gasTipCap), + GasFeeCap: uint256.NewInt(gasFeeCap), + Gas: 21000, + BlobFeeCap: uint256.NewInt(blobFeeCap), + BlobHashes: blobHashes, + Value: uint256.NewInt(100), + Sidecar: &types.BlobTxSidecar{ + Blobs: blobs, + Commitments: commitments, + Proofs: proofs, + }, + } + return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) +} + // makeUnsignedTx is a utility method to construct a random blob transaction // without signing it. func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx { - return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rand.Intn(len(testBlobs))) + return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rnd.Intn(len(testBlobs))) } // makeUnsignedTx is a utility method to construct a random blob transaction @@ -415,7 +455,7 @@ func TestOpenDrops(t *testing.T) { defer os.RemoveAll(storage) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) // Insert a malformed transaction to verify that decoding errors (or format // changes) are handled gracefully (case 1) @@ -738,7 +778,7 @@ func TestOpenIndex(t *testing.T) { defer os.RemoveAll(storage) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) // Insert a sequence of transactions with varying price points to check that // the cumulative minimum will be maintained. @@ -827,7 +867,7 @@ func TestOpenHeap(t *testing.T) { defer os.RemoveAll(storage) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) // Insert a few transactions from a few accounts. To remove randomness from // the heap initialization, use a deterministic account/tx/priority ordering. @@ -914,7 +954,7 @@ func TestOpenCap(t *testing.T) { defer os.RemoveAll(storage) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) // Insert a few transactions from a few accounts var ( @@ -992,6 +1032,108 @@ func TestOpenCap(t *testing.T) { } } +// TestChangingSlotterSize attempts to mimic a scenario where the max blob count +// of the pool is increased. This would happen during a client release where a +// new fork is added with a max blob count higher than the previous fork. We +// want to make sure transactions a persisted between those runs. +func TestChangingSlotterSize(t *testing.T) { + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(6), nil) + + // Create transactions from a few accounts. + var ( + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + + tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1) + tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, key2) + tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, key3) + + blob1, _ = rlp.EncodeToBytes(tx1) + blob2, _ = rlp.EncodeToBytes(tx2) + ) + + // Write the two safely sized txs to store. note: although the store is + // configured for a blob count of 6, it can also support around ~1mb of call + // data - all this to say that we aren't using the the absolute largest shelf + // available. + store.Put(blob1) + store.Put(blob2) + store.Close() + + // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24. + for _, maxBlobs := range []int{6, 24} { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.Commit(0, true, false) + + // Make custom chain config where the max blob count changes based on the loop variable. + cancunTime := uint64(0) + config := ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + LondonBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + CancunTime: &cancunTime, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: ¶ms.BlobConfig{ + Target: maxBlobs / 2, + Max: maxBlobs, + UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction, + }, + }, + } + chain := &testBlockChain{ + config: config, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain) + if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + + // Try to add the big blob tx. In the initial iteration it should overflow + // the pool. On the subsequent iteration it should be accepted. + errs := pool.Add([]*types.Transaction{tx3}, false, true) + if _, ok := pool.index[addr3]; ok && maxBlobs == 6 { + t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } else if !ok && maxBlobs == 10 { + t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } + + // Verify the regular two txs are always available. + if got := pool.Get(tx1.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1) + } + if got := pool.Get(tx2.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2) + } + + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) + + pool.Close() + } +} + // Tests that adding transaction will correctly store it in the persistent store // and update all the indices. // @@ -1369,7 +1511,7 @@ func TestAdd(t *testing.T) { defer os.RemoveAll(storage) // late defer, still ok os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) // Insert the seed transactions for the pool startup var ( diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go index b03dd83d69..e392932401 100644 --- a/core/txpool/blobpool/evictheap_test.go +++ b/core/txpool/blobpool/evictheap_test.go @@ -26,7 +26,7 @@ import ( "github.com/holiman/uint256" ) -var rand = mrand.New(mrand.NewSource(1)) +var rnd = mrand.New(mrand.NewSource(1)) // verifyHeapInternals verifies that all accounts present in the index are also // present in the heap and internals are consistent across various indices. @@ -193,12 +193,12 @@ func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) { index := make(map[common.Address][]*blobTxMeta) for i := 0; i < int(blobs); i++ { var addr common.Address - rand.Read(addr[:]) + rnd.Read(addr[:]) var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) + execTip = uint256.NewInt(rnd.Uint64()) + execFee = uint256.NewInt(rnd.Uint64()) + blobFee = uint256.NewInt(rnd.Uint64()) basefeeJumps = dynamicFeeJumps(execFee) blobfeeJumps = dynamicFeeJumps(blobFee) @@ -218,13 +218,13 @@ func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) { }} } // Create a price heap and reinit it over and over - heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), index) + heap := newPriceHeap(uint256.NewInt(rnd.Uint64()), uint256.NewInt(rnd.Uint64()), index) basefees := make([]*uint256.Int, b.N) blobfees := make([]*uint256.Int, b.N) for i := 0; i < b.N; i++ { - basefees[i] = uint256.NewInt(rand.Uint64()) - blobfees[i] = uint256.NewInt(rand.Uint64()) + basefees[i] = uint256.NewInt(rnd.Uint64()) + blobfees[i] = uint256.NewInt(rnd.Uint64()) } b.ResetTimer() b.ReportAllocs() @@ -269,12 +269,12 @@ func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) { index := make(map[common.Address][]*blobTxMeta) for i := 0; i < int(blobs); i++ { var addr common.Address - rand.Read(addr[:]) + rnd.Read(addr[:]) var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) + execTip = uint256.NewInt(rnd.Uint64()) + execFee = uint256.NewInt(rnd.Uint64()) + blobFee = uint256.NewInt(rnd.Uint64()) basefeeJumps = dynamicFeeJumps(execFee) blobfeeJumps = dynamicFeeJumps(blobFee) @@ -294,18 +294,18 @@ func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) { }} } // Create a price heap and overflow it over and over - evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), index) + evict := newPriceHeap(uint256.NewInt(rnd.Uint64()), uint256.NewInt(rnd.Uint64()), index) var ( addrs = make([]common.Address, b.N) metas = make([]*blobTxMeta, b.N) ) for i := 0; i < b.N; i++ { - rand.Read(addrs[i][:]) + rnd.Read(addrs[i][:]) var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) + execTip = uint256.NewInt(rnd.Uint64()) + execFee = uint256.NewInt(rnd.Uint64()) + blobFee = uint256.NewInt(rnd.Uint64()) basefeeJumps = dynamicFeeJumps(execFee) blobfeeJumps = dynamicFeeJumps(blobFee) diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 32381a3936..d5992f2906 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -48,7 +48,7 @@ type limbo struct { } // newLimbo opens and indexes a set of limboed blob transactions. -func newLimbo(datadir string) (*limbo, error) { +func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) { l := &limbo{ index: make(map[common.Hash]uint64), groups: make(map[uint64]map[uint64]common.Hash), @@ -60,7 +60,7 @@ func newLimbo(datadir string) (*limbo, error) { fails = append(fails, id) } } - store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(), index) + store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index) if err != nil { return nil, err } diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go index cf0e0454a0..1eaee6d7df 100644 --- a/core/txpool/blobpool/priority_test.go +++ b/core/txpool/blobpool/priority_test.go @@ -52,7 +52,7 @@ func TestPriorityCalculation(t *testing.T) { func BenchmarkDynamicFeeJumpCalculation(b *testing.B) { fees := make([]*uint256.Int, b.N) for i := 0; i < b.N; i++ { - fees[i] = uint256.NewInt(rand.Uint64()) + fees[i] = uint256.NewInt(rnd.Uint64()) } b.ResetTimer() b.ReportAllocs() @@ -76,8 +76,8 @@ func BenchmarkPriorityCalculation(b *testing.B) { txBasefeeJumps := make([]float64, b.N) txBlobfeeJumps := make([]float64, b.N) for i := 0; i < b.N; i++ { - txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) - txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) + txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rnd.Uint64())) + txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rnd.Uint64())) } b.ResetTimer() b.ReportAllocs() diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go index 35349c3445..84ccc0f27b 100644 --- a/core/txpool/blobpool/slotter.go +++ b/core/txpool/blobpool/slotter.go @@ -25,13 +25,13 @@ package blobpool // The slotter also creates a shelf for 0-blob transactions. Whilst those are not // allowed in the current protocol, having an empty shelf is not a relevant use // of resources, but it makes stress testing with junk transactions simpler. -func newSlotter() func() (uint32, bool) { +func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) { slotsize := uint32(txAvgSize) slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return return func() (size uint32, done bool) { slotsize += blobSize - finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize + finished := slotsize > uint32(maxBlobsPerTransaction)*blobSize+txMaxSize return slotsize, finished } diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go index a7b43b4d22..8d46f47d2c 100644 --- a/core/txpool/blobpool/slotter_test.go +++ b/core/txpool/blobpool/slotter_test.go @@ -21,7 +21,7 @@ import "testing" // Tests that the slotter creates the expected database shelves. func TestNewSlotter(t *testing.T) { // Generate the database shelve sizes - slotter := newSlotter() + slotter := newSlotter(6) var shelves []uint32 for { diff --git a/core/txpool/validation.go b/core/txpool/validation.go index dec711b173..8cd13776a6 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -23,6 +23,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -144,8 +145,9 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types if len(hashes) == 0 { return errors.New("blobless blob transaction") } - if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { - return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + maxBlobs := eip4844.MaxBlobsPerBlock(opts.Config, head.Time) + if len(hashes) > maxBlobs { + return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), maxBlobs) } // Ensure commitments, proofs and hashes are valid if err := validateBlobSidecar(hashes, sidecar); err != nil { diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go index 500fb37156..4e873eedff 100644 --- a/core/verkle_witness_test.go +++ b/core/verkle_witness_test.go @@ -58,6 +58,9 @@ var ( VerkleTime: u64(0), TerminalTotalDifficulty: common.Big0, EnableVerkleAtGenesis: true, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Verkle: params.DefaultPragueBlobConfig, + }, // TODO uncomment when proof generation is merged // ProofInBlocks: true, } @@ -79,6 +82,9 @@ var ( VerkleTime: u64(0), TerminalTotalDifficulty: common.Big0, EnableVerkleAtGenesis: true, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Verkle: params.DefaultPragueBlobConfig, + }, } ) @@ -226,11 +232,11 @@ func TestProcessParentBlockHash(t *testing.T) { var num = 2 for i := 1; i <= num; i++ { header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} - vmContext := NewEVMBlockContext(header, nil, new(common.Address)) chainConfig := params.MergedTestChainConfig if isVerkle { chainConfig = testVerkleChainConfig } + vmContext := NewEVMBlockContext(header, nil, new(common.Address)) evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) ProcessParentBlockHash(header.ParentHash, evm) } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 6074e9a096..44f5bc8273 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -314,6 +314,10 @@ func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { return fakeHeader(n, parentHash) } +func (d *dummyChain) Config() *params.ChainConfig { + return nil +} + // TestBlockhash tests the blockhash operation. It's a bit special, since it internally // requires access to a chain reader. func TestBlockhash(t *testing.T) { diff --git a/eth/api_backend.go b/eth/api_backend.go index 90439e8eab..52ecd91789 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -356,7 +356,7 @@ func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount uint64, lastB func (b *EthAPIBackend) BlobBaseFee(ctx context.Context) *big.Int { if excess := b.CurrentHeader().ExcessBlobGas; excess != nil { - return eip4844.CalcBlobFee(*excess) + return eip4844.CalcBlobFee(b.ChainConfig(), b.CurrentHeader()) } return nil } diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 407f0db3eb..9840d9c6ad 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -1227,6 +1227,7 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) { genesis.Config.ShanghaiTime = &time genesis.Config.CancunTime = &time genesis.Config.PragueTime = &time + genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule n, ethservice := startEthService(t, genesis, blocks) @@ -1543,6 +1544,7 @@ func TestParentBeaconBlockRoot(t *testing.T) { time := blocks[len(blocks)-1].Time() + 5 genesis.Config.ShanghaiTime = &time genesis.Config.CancunTime = &time + genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule n, ethservice := startEthService(t, genesis, blocks) defer n.Close() @@ -1625,6 +1627,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) { timestamp := blocks[len(blocks)-2].Time() + 5 genesis.Config.ShanghaiTime = ×tamp genesis.Config.CancunTime = ×tamp + genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule n, ethservice := startEthService(t, genesis, blocks[:9]) defer n.Close() diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index f80b1d6096..52b3591086 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -1108,7 +1108,7 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) { doTxNotify{peer: "C", hashes: []common.Hash{{0x07}, {0x08}}, types: []byte{types.BlobTxType, types.BlobTxType}, - sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock}, + sizes: []uint32{params.BlobTxBlobGasPerBlob * 10, params.BlobTxBlobGasPerBlob * 10}, }, doWait{time: txArriveTimeout, step: true}, isWaiting(nil), @@ -1125,8 +1125,8 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) { {common.Hash{0x06}, types.LegacyTxType, maxTxRetrievalSize}, }, "C": { - {common.Hash{0x07}, types.BlobTxType, params.MaxBlobGasPerBlock}, - {common.Hash{0x08}, types.BlobTxType, params.MaxBlobGasPerBlock}, + {common.Hash{0x07}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10}, + {common.Hash{0x08}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10}, }, }, fetching: map[string][]common.Hash{ diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 1e625e21c0..697263f20b 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) @@ -97,8 +96,10 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { } // Fill in blob base fee and next blob base fee. if excessBlobGas := bf.header.ExcessBlobGas; excessBlobGas != nil { - bf.results.blobBaseFee = eip4844.CalcBlobFee(*excessBlobGas) - bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*excessBlobGas, *bf.header.BlobGasUsed)) + bf.results.blobBaseFee = eip4844.CalcBlobFee(config, bf.header) + excess := eip4844.CalcExcessBlobGas(config, bf.header) + next := &types.Header{Number: bf.header.Number, Time: bf.header.Time, ExcessBlobGas: &excess} + bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(config, next) } else { bf.results.blobBaseFee = new(big.Int) bf.results.nextBlobBaseFee = new(big.Int) @@ -106,7 +107,8 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { // Compute gas used ratio for normal and blob gas. bf.results.gasUsedRatio = float64(bf.header.GasUsed) / float64(bf.header.GasLimit) if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil { - bf.results.blobGasUsedRatio = float64(*blobGasUsed) / params.MaxBlobGasPerBlock + maxBlobs := eip4844.MaxBlobsPerBlock(config, bf.header.Time) + bf.results.blobGasUsedRatio = float64(*blobGasUsed) / float64(maxBlobs) } if len(percentiles) == 0 { diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 185224d834..0a32c278cb 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -157,6 +157,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, cancunBlock *big.Int, pe ts := gspec.Timestamp + cancunBlock.Uint64()*10 // fixed 10 sec block time in blockgen config.ShanghaiTime = &ts config.CancunTime = &ts + config.BlobScheduleConfig = params.DefaultBlobSchedule signer = types.LatestSigner(gspec.Config) } diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go index d918f5aca4..dc46b6d5ef 100644 --- a/eth/tracers/internal/tracetest/supply_test.go +++ b/eth/tracers/internal/tracetest/supply_test.go @@ -360,6 +360,7 @@ func TestSupplySelfdestruct(t *testing.T) { cancunTime := uint64(0) gspec.Config.ShanghaiTime = &cancunTime gspec.Config.CancunTime = &cancunTime + gspec.Config.BlobScheduleConfig = params.DefaultBlobSchedule postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) if err != nil { diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json index 05da3b42e1..f8e08532a4 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json @@ -41,7 +41,14 @@ "grayGlacierBlock": 0, "shanghaiTime": 0, "cancunTime": 0, - "terminalTotalDifficulty": 0 + "terminalTotalDifficulty": 0, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + } + } } }, "context": { diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json index f8adbabf63..9f452ca5bd 100644 --- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json @@ -41,7 +41,14 @@ "grayGlacierBlock": 0, "shanghaiTime": 0, "cancunTime": 0, - "terminalTotalDifficulty": 0 + "terminalTotalDifficulty": 0, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + } + } } }, "context": { @@ -54,7 +61,9 @@ }, "input": "0x03f8b1820539806485174876e800825208940c2c51a0990aee1d73c1228de1586883415575088080c083020000f842a00100c9fbdf97f747e85847b4f3fff408f89c26842f77c882858bf2c89923849aa00138e3896f3c27f2389147507f8bcec52028b0efca6ee842ed83c9158873943880a0dbac3f97a532c9b00e6239b29036245a5bfbb96940b9d848634661abee98b945a03eec8525f261c2e79798f7b45a5d6ccaefa24576d53ba5023e919b86841c0675", "result": { - "0x0000000000000000000000000000000000000000": { "balance": "0x272e0528" }, + "0x0000000000000000000000000000000000000000": { + "balance": "0x272e0528" + }, "0x0c2c51a0990aee1d73c1228de158688341557508": { "balance": "0xde0b6b3a7640000" } diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json index b7d5ee1c54..043130a072 100644 --- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json @@ -53,7 +53,19 @@ "shanghaiTime": 0, "cancunTime": 0, "pragueTime": 0, - "terminalTotalDifficulty": 0 + "terminalTotalDifficulty": 0, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 5007716 + } + } } }, "context": { diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go index abc2699498..e29144e04e 100644 --- a/eth/tracers/internal/tracetest/util.go +++ b/eth/tracers/internal/tracetest/util.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" // Force-load native and js packages, to trigger registration @@ -53,8 +54,9 @@ func (c *callContext) toBlockContext(genesis *core.Genesis) vm.BlockContext { } if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil { - excessBlobGas := eip4844.CalcExcessBlobGas(*genesis.ExcessBlobGas, *genesis.BlobGasUsed) - context.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) + excess := eip4844.CalcExcessBlobGas(genesis.Config, genesis.ToBlock().Header()) + header := &types.Header{ExcessBlobGas: &excess, Number: genesis.Config.LondonBlock, Time: *genesis.Config.CancunTime} + context.BlobBaseFee = eip4844.CalcBlobFee(genesis.Config, header) } return context } diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go index fa4e5b1904..bae7445cb4 100644 --- a/eth/tracers/live/supply.go +++ b/eth/tracers/live/supply.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "gopkg.in/natefinch/lumberjack.v2" ) @@ -83,6 +84,7 @@ type supplyTracer struct { delta supplyInfo txCallstack []supplyTxCallstack // Callstack for current transaction logger *lumberjack.Logger + chainConfig *params.ChainConfig } type supplyTracerConfig struct { @@ -112,14 +114,15 @@ func newSupplyTracer(cfg json.RawMessage) (*tracing.Hooks, error) { logger: logger, } return &tracing.Hooks{ - OnBlockStart: t.onBlockStart, - OnBlockEnd: t.onBlockEnd, - OnGenesisBlock: t.onGenesisBlock, - OnTxStart: t.onTxStart, - OnBalanceChange: t.onBalanceChange, - OnEnter: t.onEnter, - OnExit: t.onExit, - OnClose: t.onClose, + OnBlockchainInit: t.onBlockchainInit, + OnBlockStart: t.onBlockStart, + OnBlockEnd: t.onBlockEnd, + OnGenesisBlock: t.onGenesisBlock, + OnTxStart: t.onTxStart, + OnBalanceChange: t.onBalanceChange, + OnEnter: t.onEnter, + OnExit: t.onExit, + OnClose: t.onClose, }, nil } @@ -146,6 +149,10 @@ func (s *supplyTracer) resetDelta() { s.delta = newSupplyInfo() } +func (s *supplyTracer) onBlockchainInit(chainConfig *params.ChainConfig) { + s.chainConfig = chainConfig +} + func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) { s.resetDelta() @@ -161,8 +168,7 @@ func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) { // Blob burnt gas if blobGas := ev.Block.BlobGasUsed(); blobGas != nil && *blobGas > 0 && ev.Block.ExcessBlobGas() != nil { var ( - excess = *ev.Block.ExcessBlobGas() - baseFee = eip4844.CalcBlobFee(excess) + baseFee = eip4844.CalcBlobFee(s.chainConfig, ev.Block.Header()) burn = new(big.Int).Mul(new(big.Int).SetUint64(*blobGas), baseFee) ) s.delta.Burn.Blob = burn diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 4ad8a552d2..5fc7ed68f9 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -65,7 +66,7 @@ var ( ) var genesis = &core.Genesis{ - Config: params.AllEthashProtocolChanges, + Config: params.AllDevChainProtocolChanges, Alloc: types.GenesisAlloc{ testAddr: {Balance: testBalance}, revertContractAddr: {Code: revertCode}, @@ -136,7 +137,7 @@ func generateTestChain() []*types.Block { g.AddTx(testTx2) } } - _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 2, generate) + _, blocks, _ := core.GenerateChainWithGenesis(genesis, beacon.New(ethash.NewFaker()), 2, generate) return append([]*types.Block{genesis.ToBlock()}, blocks...) } @@ -223,7 +224,7 @@ func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { if got != nil && got.Number != nil && got.Number.Sign() == 0 { got.Number = big.NewInt(0) // hack to make DeepEqual work } - if !reflect.DeepEqual(got, tt.want) { + if got.Hash() != tt.want.Hash() { t.Fatalf("HeaderByNumber(%v) got = %v, want %v", tt.block, got, tt.want) } }) @@ -314,7 +315,7 @@ func testChainID(t *testing.T, client *rpc.Client) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if id == nil || id.Cmp(params.AllEthashProtocolChanges.ChainID) != 0 { + if id == nil || id.Cmp(params.AllDevChainProtocolChanges.ChainID) != 0 { t.Fatalf("ChainID returned wrong number: %+v", id) } } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d9cec560ea..c0b37c516b 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -624,6 +624,7 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp type ChainContextBackend interface { Engine() consensus.Engine HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error) + ChainConfig() *params.ChainConfig } // ChainContext is an implementation of core.ChainContext. It's main use-case @@ -652,6 +653,10 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H return header } +func (context *ChainContext) Config() *params.ChainConfig { + return context.b.ChainConfig() +} + func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *override.StateOverride, blockOverrides *override.BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) if blockOverrides != nil { diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 130eaa9724..d364b80485 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -159,9 +159,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, if sim.chainConfig.IsCancun(header.Number, header.Time) { var excess uint64 if sim.chainConfig.IsCancun(parent.Number, parent.Time) { - excess = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed) - } else { - excess = eip4844.CalcExcessBlobGas(0, 0) + excess = eip4844.CalcExcessBlobGas(sim.chainConfig, parent) } header.ExcessBlobGas = &excess } @@ -415,3 +413,7 @@ func (b *simBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) } return nil, errors.New("header not found") } + +func (b *simBackend) ChainConfig() *params.ChainConfig { + return b.b.ChainConfig() +} diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 175ac13a0f..7a7d63c535 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -37,10 +37,6 @@ import ( "github.com/holiman/uint256" ) -var ( - maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob -) - // TransactionArgs represents the arguments to construct a new transaction // or a message call. type TransactionArgs struct { @@ -125,8 +121,9 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGas if args.BlobHashes != nil && len(args.BlobHashes) == 0 { return errors.New(`need at least 1 blob for a blob transaction`) } - if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobsPerTransaction { - return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobsPerTransaction) + maxBlobs := eip4844.MaxBlobsPerBlock(b.ChainConfig(), b.CurrentHeader().Time) + if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobs { + return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobs) } // create check @@ -191,7 +188,9 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend, head if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 { return errors.New("maxFeePerBlobGas, if specified, must be non-zero") } - args.setCancunFeeDefaults(head) + if b.ChainConfig().IsCancun(head.Number, head.Time) { + args.setCancunFeeDefaults(b.ChainConfig(), head) + } // If both gasPrice and at least one of the EIP-1559 fee parameters are specified, error. if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") @@ -243,15 +242,10 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend, head } // setCancunFeeDefaults fills in reasonable default fee values for unspecified fields. -func (args *TransactionArgs) setCancunFeeDefaults(head *types.Header) { +func (args *TransactionArgs) setCancunFeeDefaults(config *params.ChainConfig, head *types.Header) { // Set maxFeePerBlobGas if it is missing. if args.BlobHashes != nil && args.BlobFeeCap == nil { - var excessBlobGas uint64 - if head.ExcessBlobGas != nil { - excessBlobGas = *head.ExcessBlobGas - } - // ExcessBlobGas must be set for a Cancun block. - blobBaseFee := eip4844.CalcBlobFee(excessBlobGas) + blobBaseFee := eip4844.CalcBlobFee(config, head) // Set the max fee to be 2 times larger than the previous block's blob base fee. // The additional slack allows the tx to not become invalidated if the base // fee is rising. diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 7355c2463c..e017804861 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -279,6 +279,7 @@ func newBackendMock() *backendMock { BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(1000), CancunTime: &cancunTime, + BlobScheduleConfig: params.DefaultBlobSchedule, } return &backendMock{ current: &types.Header{ diff --git a/miner/worker.go b/miner/worker.go index b5aa080025..f8f4bae833 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -210,10 +210,7 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir if miner.chainConfig.IsCancun(header.Number, header.Time) { var excessBlobGas uint64 if miner.chainConfig.IsCancun(parent.Number, parent.Time) { - excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed) - } else { - // For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0 - excessBlobGas = eip4844.CalcExcessBlobGas(0, 0) + excessBlobGas = eip4844.CalcExcessBlobGas(miner.chainConfig, parent) } header.BlobGasUsed = new(uint64) header.ExcessBlobGas = &excessBlobGas @@ -284,7 +281,8 @@ func (miner *Miner) commitBlobTransaction(env *environment, tx *types.Transactio // isn't really a better place right now. The blob gas limit is checked at block validation time // and not during execution. This means core.ApplyTransaction will not return an error if the // tx has too many blobs. So we have to explicitly check it here. - if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + maxBlobs := eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) + if env.blobs+len(sc.Blobs) > maxBlobs { return errors.New("max data blobs reached") } receipt, err := miner.applyTransaction(env, tx) @@ -333,7 +331,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran } // If we don't have enough blob space for any further blob transactions, // skip that list altogether - if !blobTxs.Empty() && env.blobs*params.BlobTxBlobGasPerBlob >= params.MaxBlobGasPerBlock { + if !blobTxs.Empty() && env.blobs >= eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) { log.Trace("Not enough blob space for further blob transactions") blobTxs.Clear() // Fall though to pick up any plain txs @@ -367,11 +365,19 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran txs.Pop() continue } - if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas { - log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas) - txs.Pop() - continue + + // Most of the blob gas logic here is agnostic as to if the chain supports + // blobs or not, however the max check panics when called on a chain without + // a defined schedule, so we need to verify it's safe to call. + if miner.chainConfig.IsCancun(env.header.Number, env.header.Time) { + left := eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) - env.blobs + if left < int(ltx.BlobGas/params.BlobTxBlobGasPerBlob) { + log.Trace("Not enough blob space left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas/params.BlobTxBlobGasPerBlob) + txs.Pop() + continue + } } + // Transaction seems to fit, pull it up from the pool tx := ltx.Resolve() if tx == nil { @@ -379,6 +385,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran txs.Pop() continue } + // Error may be ignored here. The error has already been checked // during transaction acceptance in the transaction pool. from, _ := types.Sender(env.signer, tx) @@ -430,7 +437,7 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) filter.BaseFee = uint256.MustFromBig(env.header.BaseFee) } if env.header.ExcessBlobGas != nil { - filter.BlobFee = uint256.MustFromBig(eip4844.CalcBlobFee(*env.header.ExcessBlobGas)) + filter.BlobFee = uint256.MustFromBig(eip4844.CalcBlobFee(miner.chainConfig, env.header)) } filter.OnlyPlainTxs, filter.OnlyBlobTxs = true, false pendingPlainTxs := miner.txpool.Pending(filter) diff --git a/params/config.go b/params/config.go index 9269cb2310..0dbe4c509a 100644 --- a/params/config.go +++ b/params/config.go @@ -17,6 +17,7 @@ package params import ( + "errors" "fmt" "math" "math/big" @@ -60,6 +61,9 @@ var ( CancunTime: newUint64(1710338135), DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"), Ethash: new(EthashConfig), + BlobScheduleConfig: &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + }, } // HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network. HoleskyChainConfig = &ChainConfig{ @@ -84,6 +88,9 @@ var ( ShanghaiTime: newUint64(1696000704), CancunTime: newUint64(1707305664), Ethash: new(EthashConfig), + BlobScheduleConfig: &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + }, } // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. SepoliaChainConfig = &ChainConfig{ @@ -108,6 +115,9 @@ var ( ShanghaiTime: newUint64(1677557088), CancunTime: newUint64(1706655072), Ethash: new(EthashConfig), + BlobScheduleConfig: &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + }, } // AllEthashProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Ethash consensus. @@ -158,6 +168,10 @@ var ( CancunTime: newUint64(0), TerminalTotalDifficulty: big.NewInt(0), PragueTime: newUint64(0), + BlobScheduleConfig: &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + Prague: DefaultPragueBlobConfig, + }, } // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced @@ -248,6 +262,10 @@ var ( TerminalTotalDifficulty: big.NewInt(0), Ethash: new(EthashConfig), Clique: nil, + BlobScheduleConfig: &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + Prague: DefaultPragueBlobConfig, + }, } // NonActivatedConfig defines the chain configuration without activating @@ -282,6 +300,26 @@ var ( TestRules = TestChainConfig.Rules(new(big.Int), false, 0) ) +var ( + // DefaultCancunBlobConfig is the default blob configuration for the Cancun fork. + DefaultCancunBlobConfig = &BlobConfig{ + Target: 3, + Max: 6, + UpdateFraction: 3338477, + } + // DefaultPragueBlobConfig is the default blob configuration for the Prague fork. + DefaultPragueBlobConfig = &BlobConfig{ + Target: 6, + Max: 9, + UpdateFraction: 5007716, + } + // DefaultBlobSchedule is the latest configured blob schedule for test chains. + DefaultBlobSchedule = &BlobScheduleConfig{ + Cancun: DefaultCancunBlobConfig, + Prague: DefaultPragueBlobConfig, + } +) + // NetworkNames are user friendly names to use in the chain spec banner. var NetworkNames = map[string]string{ MainnetChainConfig.ChainID.String(): "mainnet", @@ -346,8 +384,9 @@ type ChainConfig struct { EnableVerkleAtGenesis bool `json:"enableVerkleAtGenesis,omitempty"` // Various consensus engines - Ethash *EthashConfig `json:"ethash,omitempty"` - Clique *CliqueConfig `json:"clique,omitempty"` + Ethash *EthashConfig `json:"ethash,omitempty"` + Clique *CliqueConfig `json:"clique,omitempty"` + BlobScheduleConfig *BlobScheduleConfig `json:"blobSchedule,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -447,6 +486,20 @@ func (c *ChainConfig) Description() string { return banner } +// BlobConfig specifies the target and max blobs per block for the associated fork. +type BlobConfig struct { + Target int `json:"target"` + Max int `json:"max"` + UpdateFraction uint64 `json:"baseFeeUpdateFraction"` +} + +// BlobScheduleConfig determines target and max number of blobs allow per fork. +type BlobScheduleConfig struct { + Cancun *BlobConfig `json:"cancun,omitempty"` + Prague *BlobConfig `json:"prague,omitempty"` + Verkle *BlobConfig `json:"verkle,omitempty"` +} + // IsHomestead returns whether num is either equal to the homestead block or greater. func (c *ChainConfig) IsHomestead(num *big.Int) bool { return isBlockForked(c.HomesteadBlock, num) @@ -662,6 +715,45 @@ func (c *ChainConfig) CheckConfigForkOrder() error { lastFork = cur } } + + // Check that all forks with blobs explicitly define the blob schedule configuration. + bsc := c.BlobScheduleConfig + if bsc == nil { + bsc = new(BlobScheduleConfig) + } + for _, cur := range []struct { + name string + timestamp *uint64 + config *BlobConfig + }{ + {name: "cancun", timestamp: c.CancunTime, config: bsc.Cancun}, + {name: "prague", timestamp: c.PragueTime, config: bsc.Prague}, + } { + if cur.config != nil { + if err := cur.config.validate(); err != nil { + return fmt.Errorf("invalid blob configuration for fork %s: %v", cur.name, err) + } + } + if cur.timestamp != nil { + // If the fork is configured, a blob schedule must be defined for it. + if cur.config == nil { + return fmt.Errorf("unsupported fork configuration: missing blob configuration entry for %v in schedule", cur.name) + } + } + } + return nil +} + +func (bc *BlobConfig) validate() error { + if bc.Max < 0 { + return errors.New("max < 0") + } + if bc.Target < 0 { + return errors.New("target < 0") + } + if bc.UpdateFraction == 0 { + return errors.New("update fraction must be defined and non-zero") + } return nil } diff --git a/params/protocol_params.go b/params/protocol_params.go index c9179634a5..6b06dadaef 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -172,12 +172,8 @@ const ( BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs - BlobTxBlobGaspriceUpdateFraction = 3338477 // Controls the maximum rate of change for blob gas price BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile. - BlobTxTargetBlobGasPerBlock = 3 * BlobTxBlobGasPerBlob // Target consumable blob gas for data blobs per block (for 1559-like pricing) - MaxBlobGasPerBlock = 6 * BlobTxBlobGasPerBlob // Maximum consumable blob gas for data blobs per block - HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935. ) diff --git a/tests/init.go b/tests/init.go index 8429f38e44..ee8d354224 100644 --- a/tests/init.go +++ b/tests/init.go @@ -334,6 +334,9 @@ var Forks = map[string]*params.ChainConfig{ TerminalTotalDifficulty: big.NewInt(0), ShanghaiTime: u64(0), CancunTime: u64(0), + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + }, }, "ShanghaiToCancunAtTime15k": { ChainID: big.NewInt(1), @@ -353,6 +356,9 @@ var Forks = map[string]*params.ChainConfig{ TerminalTotalDifficulty: big.NewInt(0), ShanghaiTime: u64(0), CancunTime: u64(15_000), + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + }, }, "Prague": { ChainID: big.NewInt(1), @@ -374,6 +380,10 @@ var Forks = map[string]*params.ChainConfig{ CancunTime: u64(0), PragueTime: u64(0), DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + }, }, "CancunToPragueAtTime15k": { ChainID: big.NewInt(1), @@ -395,6 +405,10 @@ var Forks = map[string]*params.ChainConfig{ CancunTime: u64(0), PragueTime: u64(15_000), DepositContractAddress: params.MainnetChainConfig.DepositContractAddress, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Prague: params.DefaultPragueBlobConfig, + }, }, "Osaka": { ChainID: big.NewInt(1), diff --git a/tests/state_test.go b/tests/state_test.go index da4b8ef79d..0c9553c075 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -303,7 +303,7 @@ func runBenchmark(b *testing.B, t *StateTest) { // Prepare the EVM. txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase) + context := core.NewEVMBlockContext(block.Header(), &dummyChain{config: config}, &t.json.Env.Coinbase) context.GetHash = vmTestBlockHash context.BaseFee = baseFee evm := vm.NewEVM(context, state.StateDB, config, vmconfig) diff --git a/tests/state_test_util.go b/tests/state_test_util.go index e658b62ebf..24caf41ed9 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -275,13 +276,14 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh return st, common.Hash{}, 0, err } - { // Blob transactions may be present after the Cancun fork. - // In production, - // - the header is verified against the max in eip4844.go:VerifyEIP4844Header - // - the block body is verified against the header in block_validator.go:ValidateBody - // Here, we just do this shortcut smaller fix, since state tests do not - // utilize those codepaths - if len(msg.BlobHashes)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + // Blob transactions may be present after the Cancun fork. + // In production, + // - the header is verified against the max in eip4844.go:VerifyEIP4844Header + // - the block body is verified against the header in block_validator.go:ValidateBody + // Here, we just do this shortcut smaller fix, since state tests do not + // utilize those codepaths. + if config.IsCancun(new(big.Int), block.Time()) { + if len(msg.BlobHashes) > eip4844.MaxBlobsPerBlock(config, block.Time()) { return st, common.Hash{}, 0, errors.New("blob gas exceeds maximum") } } @@ -299,7 +301,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh } // Prepare the EVM. - context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase) + context := core.NewEVMBlockContext(block.Header(), &dummyChain{config: config}, &t.json.Env.Coinbase) context.GetHash = vmTestBlockHash context.BaseFee = baseFee context.Random = nil @@ -312,8 +314,13 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh context.Difficulty = big.NewInt(0) } if config.IsCancun(new(big.Int), block.Time()) && t.json.Env.ExcessBlobGas != nil { - context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas) + header := &types.Header{ + Time: block.Time(), + ExcessBlobGas: t.json.Env.ExcessBlobGas, + } + context.BlobBaseFee = eip4844.CalcBlobFee(config, header) } + evm := vm.NewEVM(context, st.StateDB, config, vmconfig) if tracer := vmconfig.Tracer; tracer != nil && tracer.OnTxStart != nil { @@ -543,3 +550,12 @@ func (st *StateTestState) Close() { st.Snapshots = nil } } + +// dummyChain implements the core.ChainContext interface. +type dummyChain struct { + config *params.ChainConfig +} + +func (d *dummyChain) Engine() consensus.Engine { return nil } +func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header { return nil } +func (d *dummyChain) Config() *params.ChainConfig { return d.config } From e332431cb28c6d374504fc5343fafaf3157a6aee Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 Feb 2025 17:16:26 +0100 Subject: [PATCH 05/17] core: assign default difficulty to zero for chain without ethash (#31067) I hit this case while trying something with the simulated backend. The EVM only enables instruction set forks after the merge when 'Random' is set. In the simulated backend, the random value will be set via the engine API for all blocks after genesis. But for the genesis block itself, the random value will not be assigned in the vm.BlockContext because the genesis has a non-zero difficulty. For my case, this meant that estimateGas did not work for the first transaction sent on the simulated chain, since the contract contained a PUSH0 instruction. This could also be fixed by explicitly configuring a zero difficulty in the simulated backend. However, I think that zero difficulty is a better default these days. --------- Co-authored-by: lightclient --- core/genesis.go | 8 ++++++-- core/genesis_test.go | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index d1ee1b6e92..8f71f9ef1e 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -464,8 +464,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { if g.GasLimit == 0 { head.GasLimit = params.GenesisGasLimit } - if g.Difficulty == nil && g.Mixhash == (common.Hash{}) { - head.Difficulty = params.GenesisDifficulty + if g.Difficulty == nil { + if g.Config != nil && g.Config.Ethash == nil { + head.Difficulty = big.NewInt(0) + } else if g.Mixhash == (common.Hash{}) { + head.Difficulty = params.GenesisDifficulty + } } if g.Config != nil && g.Config.IsLondon(common.Big0) { if g.BaseFee != nil { diff --git a/core/genesis_test.go b/core/genesis_test.go index 3e120a9307..ad75afcd9e 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -44,14 +44,14 @@ func testSetupGenesis(t *testing.T, scheme string) { var ( customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") customg = Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)}, + Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3), Ethash: ¶ms.EthashConfig{}}, Alloc: types.GenesisAlloc{ {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, }, } oldcustomg = customg ) - oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)} + oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2), Ethash: ¶ms.EthashConfig{}} tests := []struct { name string From 7c7b7f6ab1b3f56c527ce1fb39db3061a9ec0ac6 Mon Sep 17 00:00:00 2001 From: Martin HS Date: Tue, 4 Feb 2025 17:23:01 +0100 Subject: [PATCH 06/17] core/txpool: remove locals-tracking from txpools (#30559) Replaces #29297, descendant from #27535 --------- This PR removes `locals` as a concept from transaction pools. Therefore, the pool now acts as very a good simulation/approximation of how our peers' pools behave. What this PR does instead, is implement a locals-tracker, which basically is a little thing which, from time to time, asks the pool "did you forget this transaction?". If it did, the tracker resubmits it. If the txpool _had_ forgotten it, chances are that the peers had also forgotten it. It will be propagated again. Doing this change means that we can simplify the pool internals, quite a lot. ### The semantics of `local` Historically, there has been two features, or usecases, that has been combined into the concept of `locals`. 1. "I want my local node to remember this transaction indefinitely, and resubmit to the network occasionally" 2. "I want this (valid) transaction included to be top-prio for my miner" This PR splits these features up, let's call it `1: local` and `2: prio`. The `prio` is not actually individual transaction, but rather a set of `address`es to prioritize. The attribute `local` means it will be tracked, and `prio` means it will be prioritized by miner. For `local`: anything transaction received via the RPC is marked as `local`, and tracked by the tracker. For `prio`: any transactions from this sender is included first, when building a block. The existing commandline-flag `--txpool.locals` sets the set of `prio` addresses. --------- Co-authored-by: Gary Rong --- core/txpool/blobpool/blobpool.go | 9 +- core/txpool/legacypool/legacypool.go | 385 +++--------- core/txpool/legacypool/legacypool_test.go | 575 +++--------------- core/txpool/legacypool/list.go | 63 +- core/txpool/{legacypool => locals}/journal.go | 2 +- core/txpool/locals/tx_tracker.go | 212 +++++++ core/txpool/subpool.go | 5 +- core/txpool/txpool.go | 21 +- eth/api_backend.go | 5 +- eth/backend.go | 20 +- eth/catalyst/api_test.go | 18 +- eth/catalyst/simulated_beacon_test.go | 52 +- eth/handler.go | 4 +- eth/handler_eth_test.go | 6 +- eth/handler_test.go | 2 +- miner/miner.go | 8 + miner/payload_building_test.go | 2 +- miner/worker.go | 31 +- 18 files changed, 493 insertions(+), 927 deletions(-) rename core/txpool/{legacypool => locals}/journal.go (99%) create mode 100644 core/txpool/locals/tx_tracker.go diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 78cf2c05af..51b8b67c61 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -1269,7 +1269,7 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844. // Add inserts a set of blob transactions into the pool if they pass validation (both // consensus validity and pool restrictions). -func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { +func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error { var ( adds = make([]*types.Transaction, 0, len(txs)) errs = make([]error, len(txs)) @@ -1701,13 +1701,6 @@ func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*ty return []*types.Transaction{}, []*types.Transaction{} } -// Locals retrieves the accounts currently considered local by the pool. -// -// There is no notion of local accounts in the blob pool. -func (p *BlobPool) Locals() []common.Address { - return []common.Address{} -} - // Status returns the known status (unknown/pending/queued) of a transaction // identified by their hashes. func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 3d780ad373..1440af5440 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -99,7 +99,6 @@ var ( pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) - localGauge = metrics.NewRegisteredGauge("txpool/local", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) @@ -159,10 +158,6 @@ var DefaultConfig = Config{ // unreasonable or unworkable. func (config *Config) sanitize() Config { conf := *config - if conf.Rejournal < time.Second { - log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) - conf.Rejournal = time.Second - } if conf.PriceLimit < 1 { log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) conf.PriceLimit = DefaultConfig.PriceLimit @@ -214,9 +209,6 @@ type LegacyPool struct { currentState *state.StateDB // Current state in the blockchain head pendingNonces *noncer // Pending state tracking virtual nonces - locals *accountSet // Set of local transaction to exempt from eviction rules - journal *journal // Journal of local transaction to back up to disk - reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools pending map[common.Address]*list // All currently processable transactions queue map[common.Address]*list // Queued but non-processable transactions @@ -262,16 +254,8 @@ func New(config Config, chain BlockChain) *LegacyPool { reorgShutdownCh: make(chan struct{}), initDoneCh: make(chan struct{}), } - pool.locals = newAccountSet(pool.signer) - for _, addr := range config.Locals { - log.Info("Setting new local account", "address", addr) - pool.locals.add(addr) - } pool.priced = newPricedList(pool.all) - if !config.NoLocals && config.Journal != "" { - pool.journal = newTxJournal(config.Journal) - } return pool } @@ -287,8 +271,7 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool { } // Init sets the gas price needed to keep a transaction in the pool and the chain -// head to allow balance / nonce checks. The transaction journal will be loaded -// from disk and filtered based on the provided starting settings. The internal +// head to allow balance / nonce checks. The internal // goroutines will be spun up and the pool deemed operational afterwards. func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error { // Set the address reserver to request exclusive access to pooled accounts @@ -311,20 +294,9 @@ func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.A pool.currentState = statedb pool.pendingNonces = newNoncer(statedb) - // Start the reorg loop early, so it can handle requests generated during - // journal loading. pool.wg.Add(1) go pool.scheduleReorgLoop() - // If local transactions and journaling is enabled, load from disk - if pool.journal != nil { - if err := pool.journal.load(pool.addLocals); err != nil { - log.Warn("Failed to load transaction journal", "err", err) - } - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) - } - } pool.wg.Add(1) go pool.loop() return nil @@ -340,13 +312,11 @@ func (pool *LegacyPool) loop() { prevPending, prevQueued, prevStales int // Start the stats reporting and transaction eviction tickers - report = time.NewTicker(statsReportInterval) - evict = time.NewTicker(evictionInterval) - journal = time.NewTicker(pool.config.Rejournal) + report = time.NewTicker(statsReportInterval) + evict = time.NewTicker(evictionInterval) ) defer report.Stop() defer evict.Stop() - defer journal.Stop() // Notify tests that the init phase is done close(pool.initDoneCh) @@ -372,11 +342,7 @@ func (pool *LegacyPool) loop() { case <-evict.C: pool.mu.Lock() for addr := range pool.queue { - // Skip local transactions from the eviction mechanism - if pool.locals.contains(addr) { - continue - } - // Any non-locals old enough should be removed + // Any old enough should be removed if time.Since(pool.beats[addr]) > pool.config.Lifetime { list := pool.queue[addr].Flatten() for _, tx := range list { @@ -386,16 +352,6 @@ func (pool *LegacyPool) loop() { } } pool.mu.Unlock() - - // Handle local transaction journal rotation - case <-journal.C: - if pool.journal != nil { - pool.mu.Lock() - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate local tx journal", "err", err) - } - pool.mu.Unlock() - } } } } @@ -406,9 +362,6 @@ func (pool *LegacyPool) Close() error { close(pool.reorgShutdownCh) pool.wg.Wait() - if pool.journal != nil { - pool.journal.close() - } log.Info("Transaction pool stopped") return nil } @@ -444,7 +397,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) { // If the min miner fee increased, remove transactions below the new threshold if newTip.Cmp(old) > 0 { // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead - drop := pool.all.RemotesBelowTip(tip) + drop := pool.all.TxsBelowTip(tip) for _, tx := range drop { pool.removeTx(tx.Hash(), false, true) } @@ -549,7 +502,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] txs := list.Flatten() // If the miner requests tip enforcement, cap the lists now - if minTipBig != nil && !pool.locals.contains(addr) { + if minTipBig != nil { for i, tx := range txs { if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 { txs = txs[:i] @@ -577,35 +530,11 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] return pending } -// Locals retrieves the accounts currently considered local by the pool. -func (pool *LegacyPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - - return pool.locals.flatten() -} - -// local retrieves all currently known local transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -func (pool *LegacyPool) local() map[common.Address]types.Transactions { - txs := make(map[common.Address]types.Transactions) - for addr := range pool.locals.accounts { - if pending := pool.pending[addr]; pending != nil { - txs[addr] = append(txs[addr], pending.Flatten()...) - } - if queued := pool.queue[addr]; queued != nil { - txs[addr] = append(txs[addr], queued.Flatten()...) - } - } - return txs -} - // validateTxBasics checks whether a transaction is valid according to the consensus // rules, but does not check state-dependent validation such as sufficient balance. // This check is meant as an early check which only needs to be performed once, // and does not require the pool mutex to be held. -func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { +func (pool *LegacyPool) validateTxBasics(tx *types.Transaction) error { opts := &txpool.ValidationOptions{ Config: pool.chainconfig, Accept: 0 | @@ -615,9 +544,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro MaxSize: txMaxSize, MinTip: pool.gasTip.Load().ToBig(), } - if local { - opts.MinTip = new(big.Int) - } if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil { return err } @@ -665,11 +591,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction) error { // add validates a transaction and inserts it into the non-executable queue for later // pending promotion and execution. If the transaction is a replacement for an already // pending or queued one, it overwrites the previous transaction if its price is higher. -// -// If a newly added transaction is marked as local, its sending account will be -// added to the allowlist, preventing any associated transaction from being dropped -// out of the pool due to pricing constraints. -func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { +func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { @@ -677,9 +599,6 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e knownTxMeter.Mark(1) return false, txpool.ErrAlreadyKnown } - // Make the local flag. If it's from local source or it's from the network but - // the sender is marked as local previously, treat it as the local transaction. - isLocal := local || pool.locals.containsTx(tx) // If the transaction fails basic validation, discard it if err := pool.validateTx(tx); err != nil { @@ -715,7 +634,7 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it - if !isLocal && pool.priced.Underpriced(tx) { + if pool.priced.Underpriced(tx) { log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) underpricedTxMeter.Mark(1) return false, txpool.ErrUnderpriced @@ -731,19 +650,18 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e } // New transaction is better than our worse ones, make room for it. - // If it's a local transaction, forcibly discard all available transactions. - // Otherwise if we can't make enough room for new one, abort the operation. - drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) + // If we can't make enough room for new one, abort the operation. + drop, success := pool.priced.Discard(pool.all.Slots() - int(pool.config.GlobalSlots+pool.config.GlobalQueue) + numSlots(tx)) // Special case, we still can't make the room for the new remote one. - if !isLocal && !success { + if !success { log.Trace("Discarding overflown transaction", "hash", hash) overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } // If the new transaction is a future transaction it should never churn pending transactions - if !isLocal && pool.isGapped(from, tx) { + if pool.isGapped(from, tx) { var replacesPending bool for _, dropTx := range drop { dropSender, _ := types.Sender(pool.signer, dropTx) @@ -755,7 +673,7 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e // Add all transactions back to the priced queue if replacesPending { for _, dropTx := range drop { - pool.priced.Put(dropTx, false) + pool.priced.Put(dropTx) } log.Trace("Discarding future transaction replacing pending tx", "hash", hash) return false, txpool.ErrFutureReplacePending @@ -788,9 +706,8 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } - pool.all.Add(tx, isLocal) - pool.priced.Put(tx, isLocal) - pool.journalTx(from, tx) + pool.all.Add(tx) + pool.priced.Put(tx) pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) @@ -799,20 +716,10 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e return old != nil, nil } // New transaction isn't replacing a pending one, push into queue - replaced, err = pool.enqueueTx(hash, tx, isLocal, true) + replaced, err = pool.enqueueTx(hash, tx, true) if err != nil { return false, err } - // Mark local addresses and journal local transactions - if local && !pool.locals.contains(from) { - log.Info("Setting new local account", "address", from) - pool.locals.add(from) - pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. - } - if isLocal { - localGauge.Inc(1) - } - pool.journalTx(from, tx) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) return replaced, nil @@ -845,7 +752,7 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! -func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { +func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) { // Try to insert the transaction into the future queue from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { @@ -872,8 +779,8 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) } if addAll { - pool.all.Add(tx, local) - pool.priced.Put(tx, local) + pool.all.Add(tx) + pool.priced.Put(tx) } // If we never record the heartbeat, do it right now. if _, exist := pool.beats[from]; !exist { @@ -882,18 +789,6 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local return old != nil, nil } -// journalTx adds the specified transaction to the local disk journal if it is -// deemed to have been sent from a local account. -func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { - // Only journal if it's enabled and the transaction is local - if pool.journal == nil || !pool.locals.contains(from) { - return - } - if err := pool.journal.insert(tx); err != nil { - log.Warn("Failed to journal local transaction", "err", err) - } -} - // promoteTx adds a transaction to the pending (processable) list of transactions // and returns whether it was inserted or an older was better. // @@ -930,28 +825,13 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ return true } -// addLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as local ones, ensuring they go around the local pricing constraints. -// -// This method is used to add transactions from the RPC API and performs synchronous pool -// reorganization and event propagation. -func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { - return pool.Add(txs, !pool.config.NoLocals, true) -} - -// addLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper around addLocals. -func (pool *LegacyPool) addLocal(tx *types.Transaction) error { - return pool.addLocals([]*types.Transaction{tx})[0] -} - -// addRemotes enqueues a batch of transactions into the pool if they are valid. If the -// senders are not among the locally tracked ones, full pricing constraints will apply. +// addRemotes enqueues a batch of transactions into the pool if they are valid. +// Full pricing constraints will apply. // // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { - return pool.Add(txs, false, false) + return pool.Add(txs, false) } // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience @@ -962,23 +842,19 @@ func (pool *LegacyPool) addRemote(tx *types.Transaction) error { // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { - return pool.Add(txs, false, true) + return pool.Add(txs, true) } // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { - return pool.Add([]*types.Transaction{tx}, false, true)[0] + return pool.Add([]*types.Transaction{tx}, true)[0] } -// Add enqueues a batch of transactions into the pool if they are valid. Depending -// on the local flag, full pricing constraints will or will not be applied. +// Add enqueues a batch of transactions into the pool if they are valid. // // If sync is set, the method will block until all internal maintenance related // to the add is finished. Only use this during tests for determinism! -func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { - // Do not treat as local if local transactions have been disabled - local = local && !pool.config.NoLocals - +func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( errs = make([]error, len(txs)) @@ -994,7 +870,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error // Exclude transactions with basic errors, e.g invalid signatures and // insufficient intrinsic gas as soon as possible and cache senders // in transactions before obtaining lock - if err := pool.validateTxBasics(tx, local); err != nil { + if err := pool.validateTxBasics(tx); err != nil { errs[i] = err log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err) invalidTxMeter.Mark(1) @@ -1009,7 +885,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) + newErrs, dirtyAddrs := pool.addTxsLocked(news) pool.mu.Unlock() var nilSlot = 0 @@ -1030,11 +906,11 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. -func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { +func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) errs := make([]error, len(txs)) for i, tx := range txs { - replaced, err := pool.add(tx, local) + replaced, err := pool.add(tx) errs[i] = err if err == nil && !replaced { dirty.addTx(tx) @@ -1126,9 +1002,6 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo if outofbound { pool.priced.Removed(1) } - if pool.locals.contains(addr) { - localGauge.Dec(1) - } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { @@ -1139,7 +1012,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo // Postpone any invalidated transactions for _, tx := range invalids { // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(tx.Hash(), tx, false, false) + pool.enqueueTx(tx.Hash(), tx, false) } // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) @@ -1204,7 +1077,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { launchNextRun bool reset *txpoolResetRequest dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*sortedMap) + queuedEvents = make(map[common.Address]*SortedMap) ) for { // Launch next background reorg if needed @@ -1217,7 +1090,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { launchNextRun = false reset, dirtyAccounts = nil, nil - queuedEvents = make(map[common.Address]*sortedMap) + queuedEvents = make(map[common.Address]*SortedMap) } select { @@ -1246,7 +1119,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { // request one later if they want the events sent. addr, _ := types.Sender(pool.signer, tx) if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newSortedMap() + queuedEvents[addr] = NewSortedMap() } queuedEvents[addr].Put(tx) @@ -1265,7 +1138,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { +func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*SortedMap) { defer func(t0 time.Time) { reorgDurationTimer.Update(time.Since(t0)) }(time.Now()) @@ -1332,7 +1205,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) if _, ok := events[addr]; !ok { - events[addr] = newSortedMap() + events[addr] = NewSortedMap() } events[addr].Put(tx) } @@ -1441,7 +1314,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) core.SenderCacher().Recover(pool.signer, reinject) - pool.addTxsLocked(reinject, false) + pool.addTxsLocked(reinject) } // promoteExecutables moves transactions that have become processable from the @@ -1486,22 +1359,17 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T queuedGauge.Dec(int64(len(readies))) // Drop all transactions over the allowed limit - var caps types.Transactions - if !pool.locals.contains(addr) { - caps = list.Cap(int(pool.config.AccountQueue)) - for _, tx := range caps { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) - } - queuedRateLimitMeter.Mark(int64(len(caps))) + var caps = list.Cap(int(pool.config.AccountQueue)) + for _, tx := range caps { + hash := tx.Hash() + pool.all.Remove(hash) + log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } + queuedRateLimitMeter.Mark(int64(len(caps))) // Mark all the items dropped as removed pool.priced.Removed(len(forwards) + len(drops) + len(caps)) queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - } + // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.queue, addr) @@ -1531,14 +1399,14 @@ func (pool *LegacyPool) truncatePending() { spammers := prque.New[int64, common.Address](nil) for addr, list := range pool.pending { // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { + if uint64(list.Len()) > pool.config.AccountSlots { spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders offenders := []common.Address{} for pending > pool.config.GlobalSlots && !spammers.Empty() { - // Retrieve the next offender if not local address + // Retrieve the next offender offender, _ := spammers.Pop() offenders = append(offenders, offender) @@ -1564,9 +1432,7 @@ func (pool *LegacyPool) truncatePending() { } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(offenders[i]) { - localGauge.Dec(int64(len(caps))) - } + pending-- } } @@ -1591,9 +1457,6 @@ func (pool *LegacyPool) truncatePending() { } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(caps))) - } pending-- } } @@ -1614,13 +1477,11 @@ func (pool *LegacyPool) truncateQueue() { // Sort all accounts with queued transactions by heartbeat addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { - if !pool.locals.contains(addr) { // don't drop locals - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } + addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) } sort.Sort(sort.Reverse(addresses)) - // Drop transactions until the total is below the limit or only locals remain + // Drop transactions until the total is below the limit for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { addr := addresses[len(addresses)-1] list := pool.queue[addr.address] @@ -1680,12 +1541,10 @@ func (pool *LegacyPool) demoteUnexecutables() { log.Trace("Demoting pending transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) + pool.enqueueTx(hash, tx, false) } pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - } + // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { gapped := list.Cap(0) @@ -1694,7 +1553,7 @@ func (pool *LegacyPool) demoteUnexecutables() { log.Error("Demoting invalidated transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) + pool.enqueueTx(hash, tx, false) } pendingGauge.Dec(int64(len(gapped))) } @@ -1741,21 +1600,6 @@ func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { return as } -// contains checks if a given address is contained within the set. -func (as *accountSet) contains(addr common.Address) bool { - _, exist := as.accounts[addr] - return exist -} - -// containsTx checks if the sender of a given tx is within the set. If the sender -// cannot be derived, this method returns false. -func (as *accountSet) containsTx(tx *types.Transaction) bool { - if addr, err := types.Sender(as.signer, tx); err == nil { - return as.contains(addr) - } - return false -} - // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} @@ -1793,43 +1637,29 @@ func (as *accountSet) merge(other *accountSet) { // internal mechanisms. The sole purpose of the type is to permit out-of-bound // peeking into the pool in LegacyPool.Get without having to acquire the widely scoped // LegacyPool.mu mutex. -// -// This lookup set combines the notion of "local transactions", which is useful -// to build upper-level structure. type lookup struct { - slots int - lock sync.RWMutex - locals map[common.Hash]*types.Transaction - remotes map[common.Hash]*types.Transaction + slots int + lock sync.RWMutex + txs map[common.Hash]*types.Transaction } // newLookup returns a new lookup structure. func newLookup() *lookup { return &lookup{ - locals: make(map[common.Hash]*types.Transaction), - remotes: make(map[common.Hash]*types.Transaction), + txs: make(map[common.Hash]*types.Transaction), } } // Range calls f on each key and value present in the map. The callback passed // should return the indicator whether the iteration needs to be continued. // Callers need to specify which set (or both) to be iterated. -func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { +func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { t.lock.RLock() defer t.lock.RUnlock() - if local { - for key, value := range t.locals { - if !f(key, value, true) { - return - } - } - } - if remote { - for key, value := range t.remotes { - if !f(key, value, false) { - return - } + for key, value := range t.txs { + if !f(key, value) { + return } } } @@ -1839,26 +1669,7 @@ func (t *lookup) Get(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() - if tx := t.locals[hash]; tx != nil { - return tx - } - return t.remotes[hash] -} - -// GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.locals[hash] -} - -// GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.remotes[hash] + return t.txs[hash] } // Count returns the current number of transactions in the lookup. @@ -1866,23 +1677,7 @@ func (t *lookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() - return len(t.locals) + len(t.remotes) -} - -// LocalCount returns the current number of local transactions in the lookup. -func (t *lookup) LocalCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) -} - -// RemoteCount returns the current number of remote transactions in the lookup. -func (t *lookup) RemoteCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.remotes) + return len(t.txs) } // Slots returns the current number of slots used in the lookup. @@ -1894,18 +1689,14 @@ func (t *lookup) Slots() int { } // Add adds a transaction to the lookup. -func (t *lookup) Add(tx *types.Transaction, local bool) { +func (t *lookup) Add(tx *types.Transaction) { t.lock.Lock() defer t.lock.Unlock() t.slots += numSlots(tx) slotsGauge.Update(int64(t.slots)) - if local { - t.locals[tx.Hash()] = tx - } else { - t.remotes[tx.Hash()] = tx - } + t.txs[tx.Hash()] = tx } // Remove removes a transaction from the lookup. @@ -1913,10 +1704,7 @@ func (t *lookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() - tx, ok := t.locals[hash] - if !ok { - tx, ok = t.remotes[hash] - } + tx, ok := t.txs[hash] if !ok { log.Error("No transaction found to be deleted", "hash", hash) return @@ -1924,36 +1712,18 @@ func (t *lookup) Remove(hash common.Hash) { t.slots -= numSlots(tx) slotsGauge.Update(int64(t.slots)) - delete(t.locals, hash) - delete(t.remotes, hash) + delete(t.txs, hash) } -// RemoteToLocals migrates the transactions belongs to the given locals to locals -// set. The assumption is held the locals set is thread-safe to be used. -func (t *lookup) RemoteToLocals(locals *accountSet) int { - t.lock.Lock() - defer t.lock.Unlock() - - var migrated int - for hash, tx := range t.remotes { - if locals.containsTx(tx) { - t.locals[hash] = tx - delete(t.remotes, hash) - migrated += 1 - } - } - return migrated -} - -// RemotesBelowTip finds all remote transactions below the given tip threshold. -func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { +// TxsBelowTip finds all remote transactions below the given tip threshold. +func (t *lookup) TxsBelowTip(threshold *big.Int) types.Transactions { found := make(types.Transactions, 0, 128) - t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { + t.Range(func(hash common.Hash, tx *types.Transaction) bool { if tx.GasTipCapIntCmp(threshold) < 0 { found = append(found, tx) } return true - }, false, true) // Only iterate remotes + }) return found } @@ -1982,24 +1752,13 @@ func (pool *LegacyPool) Clear() { // The transaction addition may attempt to reserve the sender addr which // can't happen until Clear releases the reservation lock. Clear cannot // acquire the subpool lock until the transaction addition is completed. - for _, tx := range pool.all.remotes { + for _, tx := range pool.all.txs { senderAddr, _ := types.Sender(pool.signer, tx) pool.reserve(senderAddr, false) } - for localSender := range pool.locals.accounts { - pool.reserve(localSender, false) - } - pool.all = newLookup() pool.priced = newPricedList(pool.all) pool.pending = make(map[common.Address]*list) pool.queue = make(map[common.Address]*list) pool.pendingNonces = newNoncer(pool.currentState) - - if !pool.config.NoLocals && pool.config.Journal != "" { - pool.journal = newTxJournal(pool.config.Journal) - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) - } - } } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index abbde8cae3..55699e93ee 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -23,7 +23,6 @@ import ( "fmt" "math/big" "math/rand" - "os" "sync" "sync/atomic" "testing" @@ -183,7 +182,7 @@ func validatePoolInternals(pool *LegacyPool) error { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } pool.priced.Reheap() - priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.RemoteCount() + priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.Count() if priced != remote { return fmt.Errorf("total priced transaction count %d != %d", priced, remote) } @@ -350,9 +349,6 @@ func TestInvalidTransactions(t *testing.T) { if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } - if err := pool.addLocal(tx); err != nil { - t.Error("expected", nil, "got", err) - } } func TestQueue(t *testing.T) { @@ -366,7 +362,7 @@ func TestQueue(t *testing.T) { testAddBalance(pool, from, big.NewInt(1000)) <-pool.requestReset(nil, nil) - pool.enqueueTx(tx.Hash(), tx, false, true) + pool.enqueueTx(tx.Hash(), tx, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) @@ -375,7 +371,7 @@ func TestQueue(t *testing.T) { tx = transaction(1, 100, key) from, _ = deriveSender(tx) testSetNonce(pool, from, 2) - pool.enqueueTx(tx.Hash(), tx, false, true) + pool.enqueueTx(tx.Hash(), tx, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { @@ -399,9 +395,9 @@ func TestQueue2(t *testing.T) { testAddBalance(pool, from, big.NewInt(1000)) pool.reset(nil, nil) - pool.enqueueTx(tx1.Hash(), tx1, false, true) - pool.enqueueTx(tx2.Hash(), tx2, false, true) - pool.enqueueTx(tx3.Hash(), tx3, false, true) + pool.enqueueTx(tx1.Hash(), tx1, true) + pool.enqueueTx(tx2.Hash(), tx2, true) + pool.enqueueTx(tx3.Hash(), tx3, true) pool.promoteExecutables([]common.Address{from}) if len(pool.pending) != 1 { @@ -476,14 +472,14 @@ func TestChainFork(t *testing.T) { resetState() tx := transaction(0, 100000, key) - if _, err := pool.add(tx, false); err != nil { + if _, err := pool.add(tx); err != nil { t.Error("didn't expect error", err) } pool.removeTx(tx.Hash(), true, true) // reset the pool's internal state resetState() - if _, err := pool.add(tx, false); err != nil { + if _, err := pool.add(tx); err != nil { t.Error("didn't expect error", err) } } @@ -510,10 +506,10 @@ func TestDoubleNonce(t *testing.T) { tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key) // Add the first two transaction, ensure higher priced stays only - if replace, err := pool.add(tx1, false); err != nil || replace { + if replace, err := pool.add(tx1); err != nil || replace { t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace) } - if replace, err := pool.add(tx2, false); err != nil || !replace { + if replace, err := pool.add(tx2); err != nil || !replace { t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) } <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) @@ -525,7 +521,7 @@ func TestDoubleNonce(t *testing.T) { } // Add the third transaction and ensure it's not saved (smaller price) - pool.add(tx3, false) + pool.add(tx3) <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) @@ -548,7 +544,7 @@ func TestMissingNonce(t *testing.T) { addr := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, addr, big.NewInt(100000000000000)) tx := transaction(1, 100000, key) - if _, err := pool.add(tx, false); err != nil { + if _, err := pool.add(tx); err != nil { t.Error("didn't expect error", err) } if len(pool.pending) != 0 { @@ -607,21 +603,21 @@ func TestDropping(t *testing.T) { tx11 = transaction(11, 200, key) tx12 = transaction(12, 300, key) ) - pool.all.Add(tx0, false) - pool.priced.Put(tx0, false) + pool.all.Add(tx0) + pool.priced.Put(tx0) pool.promoteTx(account, tx0.Hash(), tx0) - pool.all.Add(tx1, false) - pool.priced.Put(tx1, false) + pool.all.Add(tx1) + pool.priced.Put(tx1) pool.promoteTx(account, tx1.Hash(), tx1) - pool.all.Add(tx2, false) - pool.priced.Put(tx2, false) + pool.all.Add(tx2) + pool.priced.Put(tx2) pool.promoteTx(account, tx2.Hash(), tx2) - pool.enqueueTx(tx10.Hash(), tx10, false, true) - pool.enqueueTx(tx11.Hash(), tx11, false, true) - pool.enqueueTx(tx12.Hash(), tx12, false, true) + pool.enqueueTx(tx10.Hash(), tx10, true) + pool.enqueueTx(tx11.Hash(), tx11, true) + pool.enqueueTx(tx12.Hash(), tx12, true) // Check that pre and post validations leave the pool as is if pool.pending[account].Len() != 3 { @@ -899,13 +895,6 @@ func TestQueueAccountLimiting(t *testing.T) { // This logic should not hold for local transactions, unless the local tracking // mechanism is disabled. func TestQueueGlobalLimiting(t *testing.T) { - testQueueGlobalLimiting(t, false) -} -func TestQueueGlobalLimitingNoLocals(t *testing.T) { - testQueueGlobalLimiting(t, true) -} - -func testQueueGlobalLimiting(t *testing.T, nolocals bool) { t.Parallel() // Create the pool to test the limit enforcement with @@ -913,7 +902,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig - config.NoLocals = nolocals + config.NoLocals = true config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) pool := New(config, blockchain) @@ -926,7 +915,6 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) } - local := keys[len(keys)-1] // Generate and queue a batch of transactions nonces := make(map[common.Address]uint64) @@ -952,51 +940,12 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { if queued > int(config.GlobalQueue) { t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) } - // Generate a batch of transactions from the local account and import them - txs = txs[:0] - for i := uint64(0); i < 3*config.GlobalQueue; i++ { - txs = append(txs, transaction(i+1, 100000, local)) - } - pool.addLocals(txs) - - // If locals are disabled, the previous eviction algorithm should apply here too - if nolocals { - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - } else { - // Local exemptions are enabled, make sure the local account owned the queue - if len(pool.queue) != 1 { - t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) - } - // Also ensure no local transactions are ever dropped, even if above global limits - if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { - t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) - } - } } // Tests that if an account remains idle for a prolonged amount of time, any // non-executable transactions queued up are dropped to prevent wasting resources // on shuffling them around. -// -// This logic should not hold for local transactions, unless the local tracking -// mechanism is disabled. func TestQueueTimeLimiting(t *testing.T) { - testQueueTimeLimiting(t, false) -} -func TestQueueTimeLimitingNoLocals(t *testing.T) { - testQueueTimeLimiting(t, true) -} - -func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Reduce the eviction interval to a testable amount defer func(old time.Duration) { evictionInterval = old }(evictionInterval) evictionInterval = time.Millisecond * 100 @@ -1007,23 +956,17 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { config := testTxPoolConfig config.Lifetime = time.Second - config.NoLocals = nolocals pool := New(config, blockchain) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() - // Create two test accounts to ensure remotes expire but locals do not - local, _ := crypto.GenerateKey() + // Create a test account to ensure remotes expire remote, _ := crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) - // Add the two transactions and ensure they both are queued up - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } + // Add the transaction and ensure it is queued up if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } @@ -1031,7 +974,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if pending != 0 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - if queued != 2 { + if queued != 1 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } if err := validatePoolInternals(pool); err != nil { @@ -1046,7 +989,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if pending != 0 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - if queued != 2 { + if queued != 1 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } if err := validatePoolInternals(pool); err != nil { @@ -1060,14 +1003,8 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if pending != 0 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) @@ -1075,7 +1012,6 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // remove current transactions and increase nonce to prepare for a reset and cleanup statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) - statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) <-pool.requestReset(nil, nil) // make sure queue, pending are cleared @@ -1091,18 +1027,12 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { } // Queue gapped transactions - if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } time.Sleep(5 * evictionInterval) // A half lifetime pass // Queue executable transactions, the life cycle should be restarted. - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } @@ -1110,11 +1040,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // All gapped transactions shouldn't be kicked out pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) @@ -1123,17 +1053,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // The whole life time pass after last promotion, kick out stale transactions time.Sleep(2 * config.Lifetime) pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) @@ -1363,8 +1287,6 @@ func TestPendingMinimumAllowance(t *testing.T) { // Tests that setting the transaction pool gas price to a higher value correctly // discards everything cheaper than that and moves any gapped transactions back // from the pending pool to the queue. -// -// Note, local transactions are never allowed to be dropped. func TestRepricing(t *testing.T) { t.Parallel() @@ -1382,7 +1304,7 @@ func TestRepricing(t *testing.T) { defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) + keys := make([]*ecdsa.PrivateKey, 3) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1402,20 +1324,17 @@ func TestRepricing(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2])) - ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) - // Import the batch and that both pending and queued transactions match up pool.addRemotesSync(txs) - pool.addLocal(ltx) pending, queued := pool.Stats() - if pending != 7 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + if pending != 6 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 6) } if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } - if err := validateEvents(events, 7); err != nil { + if err := validateEvents(events, 6); err != nil { t.Fatalf("original event firing failed: %v", err) } if err := validatePoolInternals(pool); err != nil { @@ -1425,8 +1344,8 @@ func TestRepricing(t *testing.T) { pool.SetGasTip(big.NewInt(2)) pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) @@ -1453,21 +1372,7 @@ func TestRepricing(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - // However we can add local underpriced transactions - tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { - t.Fatalf("failed to add underpriced local transaction: %v", err) - } - if pending, _ = pool.Stats(); pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("post-reprice local event firing failed: %v", err) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // And we can fill gaps with properly priced transactions + // we can fill gaps with properly priced transactions if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } @@ -1504,29 +1409,16 @@ func TestMinGasPriceEnforced(t *testing.T) { tx := pricedTransaction(0, 100000, big.NewInt(2), key) pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1)) - if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { + if err := pool.Add([]*types.Transaction{tx}, true)[0]; !errors.Is(err, txpool.ErrUnderpriced) { t.Fatalf("Min tip not enforced") } tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key) pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1)) - if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { + if err := pool.Add([]*types.Transaction{tx}, true)[0]; !errors.Is(err, txpool.ErrUnderpriced) { t.Fatalf("Min tip not enforced") } - - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - // Make sure the tx is accepted if locals are enabled - pool.config.NoLocals = false - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil { - t.Fatalf("Min tip enforced with locals enabled, error: %v", err) - } } // Tests that setting the transaction pool gas price to a higher value correctly @@ -1567,20 +1459,17 @@ func TestRepricingDynamicFee(t *testing.T) { txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])) txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2])) - ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) - // Import the batch and that both pending and queued transactions match up pool.addRemotesSync(txs) - pool.addLocal(ltx) pending, queued := pool.Stats() - if pending != 7 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + if pending != 6 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 6) } if queued != 3 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } - if err := validateEvents(events, 7); err != nil { + if err := validateEvents(events, 6); err != nil { t.Fatalf("original event firing failed: %v", err) } if err := validatePoolInternals(pool); err != nil { @@ -1590,8 +1479,8 @@ func TestRepricingDynamicFee(t *testing.T) { pool.SetGasTip(big.NewInt(2)) pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) } if queued != 5 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) @@ -1621,20 +1510,7 @@ func TestRepricingDynamicFee(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - // However we can add local underpriced transactions - tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { - t.Fatalf("failed to add underpriced local transaction: %v", err) - } - if pending, _ = pool.Stats(); pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if err := validateEvents(events, 1); err != nil { - t.Fatalf("post-reprice local event firing failed: %v", err) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } + // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) if err := pool.addRemote(tx); err != nil { @@ -1656,77 +1532,6 @@ func TestRepricingDynamicFee(t *testing.T) { } } -// Tests that setting the transaction pool gas price to a higher value does not -// remove local transactions (legacy & dynamic fee). -func TestRepricingKeepsLocals(t *testing.T) { - t.Parallel() - - // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - - pool := New(testTxPoolConfig, blockchain) - pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() - - // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 3) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) - } - // Create transaction (both pending and queued) with a linearly growing gasprice - for i := uint64(0); i < 500; i++ { - // Add pending transaction. - pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(pendingTx); err != nil { - t.Fatal(err) - } - // Add queued transaction. - queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(queuedTx); err != nil { - t.Fatal(err) - } - - // Add pending dynamic fee transaction. - pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(pendingTx); err != nil { - t.Fatal(err) - } - // Add queued dynamic fee transaction. - queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(queuedTx); err != nil { - t.Fatal(err) - } - } - pending, queued := pool.Stats() - expPending, expQueued := 1000, 1000 - validate := func() { - pending, queued = pool.Stats() - if pending != expPending { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending) - } - if queued != expQueued { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) - } - - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - } - validate() - - // Reprice the pool and check that nothing is dropped - pool.SetGasTip(big.NewInt(2)) - validate() - - pool.SetGasTip(big.NewInt(2)) - pool.SetGasTip(big.NewInt(4)) - pool.SetGasTip(big.NewInt(8)) - pool.SetGasTip(big.NewInt(100)) - validate() -} - // Tests that when the pool reaches its global transaction limit, underpriced // transactions are gradually shifted out for more expensive ones and any gapped // pending transactions are moved into the queue. @@ -1756,21 +1561,18 @@ func TestUnderpricing(t *testing.T) { keys := make([]*ecdsa.PrivateKey, 5) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(10000000)) } // Generate and queue a batch of transactions, both pending and queued txs := types.Transactions{} - txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) - txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) - - txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1])) - - ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) // pending + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) // pending + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[2])) // pending + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1])) // queued // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) - pool.addLocal(ltx) + pool.addRemotesSync(txs) pending, queued := pool.Stats() if pending != 3 { @@ -1790,7 +1592,7 @@ func TestUnderpricing(t *testing.T) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } // Replace a future transaction with a future transaction - if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 t.Fatalf("failed to add well priced transaction: %v", err) } // Ensure that adding high priced transactions drops cheap ones, but not own @@ -1800,48 +1602,26 @@ func TestUnderpricing(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } // Ensure that replacing a pending transaction with a future transaction fails - if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { + if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) } pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 4 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 4); err != nil { t.Fatalf("additional event firing failed: %v", err) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - // Ensure that adding local transactions can push out even higher priced ones - ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { - t.Fatalf("failed to append underpriced local transaction: %v", err) - } - ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { - t.Fatalf("failed to add new underpriced local transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("local event firing failed: %v", err) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } } // Tests that more expensive transactions push out cheap ones from the pool, but @@ -1915,8 +1695,6 @@ func TestStableUnderpricing(t *testing.T) { // Tests that when the pool reaches its global transaction limit, underpriced // transactions (legacy & dynamic fee) are gradually shifted out for more // expensive ones and any gapped pending transactions are moved into the queue. -// -// Note, local transactions are never allowed to be dropped. func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() @@ -1941,15 +1719,13 @@ func TestUnderpricingDynamicFee(t *testing.T) { // Generate and queue a batch of transactions, both pending and queued txs := types.Transactions{} - txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) - txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) - txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) // pending + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) // pending + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) // queued + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2])) // pending - ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) - - // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 - pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + // Import the batch and check that both pending and queued transactions match up + pool.addRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1 pending, queued := pool.Stats() if pending != 3 { @@ -1967,13 +1743,13 @@ func TestUnderpricingDynamicFee(t *testing.T) { // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.addRemoteSync(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) - if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + if err := pool.addRemoteSync(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } @@ -1986,40 +1762,18 @@ func TestUnderpricingDynamicFee(t *testing.T) { t.Fatalf("failed to add well priced transaction: %v", err) } pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 4 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 3); err != nil { t.Fatalf("additional event firing failed: %v", err) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - // Ensure that adding local transactions can push out even higher priced ones - ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { - t.Fatalf("failed to append underpriced local transaction: %v", err) - } - ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { - t.Fatalf("failed to add new underpriced local transaction: %v", err) - } - pending, queued = pool.Stats() - if pending != 3 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) - } - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - if err := validateEvents(events, 2); err != nil { - t.Fatalf("local event firing failed: %v", err) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } } // Tests whether highest fee cap transaction is retained after a batch of high effective @@ -2039,7 +1793,7 @@ func TestDualHeapEviction(t *testing.T) { ) check := func(tx *types.Transaction, name string) { - if pool.all.GetRemote(tx.Hash()) == nil { + if pool.all.Get(tx.Hash()) == nil { t.Fatalf("highest %s transaction evicted from the pool", name) } } @@ -2336,122 +2090,6 @@ func TestReplacementDynamicFee(t *testing.T) { } } -// Tests that local transactions are journaled to disk, but remote transactions -// get discarded between restarts. -func TestJournaling(t *testing.T) { testJournaling(t, false) } -func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } - -func testJournaling(t *testing.T, nolocals bool) { - t.Parallel() - - // Create a temporary file for the journal - file, err := os.CreateTemp("", "") - if err != nil { - t.Fatalf("failed to create temporary journal: %v", err) - } - journal := file.Name() - defer os.Remove(journal) - - // Clean up the temporary file, we only need the path for now - file.Close() - os.Remove(journal) - - // Create the original pool to inject transaction into the journal - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - - config := testTxPoolConfig - config.NoLocals = nolocals - config.Journal = journal - config.Rejournal = time.Second - - pool := New(config, blockchain) - pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - - // Create two test accounts to ensure remotes expire but locals do not - local, _ := crypto.GenerateKey() - remote, _ := crypto.GenerateKey() - - testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) - testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) - - // Add three local and a remote transactions and ensure they are queued up - if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add local transaction: %v", err) - } - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - pending, queued := pool.Stats() - if pending != 4 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) - } - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive - pool.Close() - statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - - pool = New(config, blockchain) - pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - - pending, queued = pool.Stats() - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - if nolocals { - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - } else { - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - // Bump the nonce temporarily and ensure the newly invalidated transaction is removed - statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) - <-pool.requestReset(nil, nil) - time.Sleep(2 * config.Rejournal) - pool.Close() - - statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) - pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - - pending, queued = pool.Stats() - if pending != 0 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) - } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - pool.Close() -} - // TestStatusCheck tests that the pool can correctly retrieve the // pending status of individual transactions. func TestStatusCheck(t *testing.T) { @@ -2566,7 +2204,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) { for i := 0; i < size; i++ { tx := transaction(uint64(1+i), 100000, key) - pool.enqueueTx(tx.Hash(), tx, false, true) + pool.enqueueTx(tx.Hash(), tx, true) } // Benchmark the speed of pool validation b.ResetTimer() @@ -2576,15 +2214,11 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } // Benchmarks the speed of batched transaction insertion. -func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, false) } -func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, false) } -func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, false) } +func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100) } +func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000) } +func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000) } -func BenchmarkBatchLocalInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, true) } -func BenchmarkBatchLocalInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, true) } -func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, true) } - -func benchmarkBatchInsert(b *testing.B, size int, local bool) { +func benchmarkBatchInsert(b *testing.B, size int) { // Generate a batch of transactions to enqueue into the pool pool, key := setupPool() defer pool.Close() @@ -2602,46 +2236,7 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Benchmark importing the transactions into the queue b.ResetTimer() for _, batch := range batches { - if local { - pool.addLocals(batch) - } else { - pool.addRemotes(batch) - } - } -} - -func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { - // Allocate keys for testing - key, _ := crypto.GenerateKey() - account := crypto.PubkeyToAddress(key.PublicKey) - - remoteKey, _ := crypto.GenerateKey() - remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) - - locals := make([]*types.Transaction, 4096+1024) // Occupy all slots - for i := 0; i < len(locals); i++ { - locals[i] = transaction(uint64(i), 100000, key) - } - remotes := make([]*types.Transaction, 1000) - for i := 0; i < len(remotes); i++ { - remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice - } - // Benchmark importing the transactions into the queue - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - pool, _ := setupPool() - testAddBalance(pool, account, big.NewInt(100000000)) - for _, local := range locals { - pool.addLocal(local) - } - b.StartTimer() - // Assign a high enough balance for testing - testAddBalance(pool, remoteAddr, big.NewInt(100000000)) - for i := 0; i < len(remotes); i++ { - pool.addRemotes([]*types.Transaction{remotes[i]}) - } - pool.Close() + pool.addRemotes(batch) } } diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index b749db44d4..736c28ec4a 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -52,31 +52,31 @@ func (h *nonceHeap) Pop() interface{} { return x } -// sortedMap is a nonce->transaction hash map with a heap based index to allow +// SortedMap is a nonce->transaction hash map with a heap based index to allow // iterating over the contents in a nonce-incrementing way. -type sortedMap struct { +type SortedMap struct { items map[uint64]*types.Transaction // Hash map storing the transaction data index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) cache types.Transactions // Cache of the transactions already sorted cacheMu sync.Mutex // Mutex covering the cache } -// newSortedMap creates a new nonce-sorted transaction map. -func newSortedMap() *sortedMap { - return &sortedMap{ +// NewSortedMap creates a new nonce-sorted transaction map. +func NewSortedMap() *SortedMap { + return &SortedMap{ items: make(map[uint64]*types.Transaction), index: new(nonceHeap), } } // Get retrieves the current transactions associated with the given nonce. -func (m *sortedMap) Get(nonce uint64) *types.Transaction { +func (m *SortedMap) Get(nonce uint64) *types.Transaction { return m.items[nonce] } // Put inserts a new transaction into the map, also updating the map's nonce // index. If a transaction already exists with the same nonce, it's overwritten. -func (m *sortedMap) Put(tx *types.Transaction) { +func (m *SortedMap) Put(tx *types.Transaction) { nonce := tx.Nonce() if m.items[nonce] == nil { heap.Push(m.index, nonce) @@ -89,7 +89,7 @@ func (m *sortedMap) Put(tx *types.Transaction) { // Forward removes all transactions from the map with a nonce lower than the // provided threshold. Every removed transaction is returned for any post-removal // maintenance. -func (m *sortedMap) Forward(threshold uint64) types.Transactions { +func (m *SortedMap) Forward(threshold uint64) types.Transactions { var removed types.Transactions // Pop off heap items until the threshold is reached @@ -112,7 +112,7 @@ func (m *sortedMap) Forward(threshold uint64) types.Transactions { // Filter, as opposed to 'filter', re-initialises the heap after the operation is done. // If you want to do several consecutive filterings, it's therefore better to first // do a .filter(func1) followed by .Filter(func2) or reheap() -func (m *sortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { +func (m *SortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { removed := m.filter(filter) // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { @@ -121,7 +121,7 @@ func (m *sortedMap) Filter(filter func(*types.Transaction) bool) types.Transacti return removed } -func (m *sortedMap) reheap() { +func (m *SortedMap) reheap() { *m.index = make([]uint64, 0, len(m.items)) for nonce := range m.items { *m.index = append(*m.index, nonce) @@ -134,7 +134,7 @@ func (m *sortedMap) reheap() { // filter is identical to Filter, but **does not** regenerate the heap. This method // should only be used if followed immediately by a call to Filter or reheap() -func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transactions { +func (m *SortedMap) filter(filter func(*types.Transaction) bool) types.Transactions { var removed types.Transactions // Collect all the transactions to filter out @@ -154,7 +154,7 @@ func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transacti // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. -func (m *sortedMap) Cap(threshold int) types.Transactions { +func (m *SortedMap) Cap(threshold int) types.Transactions { // Short circuit if the number of items is under the limit if len(m.items) <= threshold { return nil @@ -181,7 +181,7 @@ func (m *sortedMap) Cap(threshold int) types.Transactions { // Remove deletes a transaction from the maintained map, returning whether the // transaction was found. -func (m *sortedMap) Remove(nonce uint64) bool { +func (m *SortedMap) Remove(nonce uint64) bool { // Short circuit if no transaction is present _, ok := m.items[nonce] if !ok { @@ -209,7 +209,7 @@ func (m *sortedMap) Remove(nonce uint64) bool { // Note, all transactions with nonces lower than start will also be returned to // prevent getting into an invalid state. This is not something that should ever // happen but better to be self correcting than failing! -func (m *sortedMap) Ready(start uint64) types.Transactions { +func (m *SortedMap) Ready(start uint64) types.Transactions { // Short circuit if no transactions are available if m.index.Len() == 0 || (*m.index)[0] > start { return nil @@ -229,11 +229,11 @@ func (m *sortedMap) Ready(start uint64) types.Transactions { } // Len returns the length of the transaction map. -func (m *sortedMap) Len() int { +func (m *SortedMap) Len() int { return len(m.items) } -func (m *sortedMap) flatten() types.Transactions { +func (m *SortedMap) flatten() types.Transactions { m.cacheMu.Lock() defer m.cacheMu.Unlock() // If the sorting was not cached yet, create and cache it @@ -250,7 +250,7 @@ func (m *sortedMap) flatten() types.Transactions { // Flatten creates a nonce-sorted slice of transactions based on the loosely // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. -func (m *sortedMap) Flatten() types.Transactions { +func (m *SortedMap) Flatten() types.Transactions { cache := m.flatten() // Copy the cache to prevent accidental modification txs := make(types.Transactions, len(cache)) @@ -260,7 +260,7 @@ func (m *sortedMap) Flatten() types.Transactions { // LastElement returns the last element of a flattened list, thus, the // transaction with the highest nonce -func (m *sortedMap) LastElement() *types.Transaction { +func (m *SortedMap) LastElement() *types.Transaction { cache := m.flatten() return cache[len(cache)-1] } @@ -271,7 +271,7 @@ func (m *sortedMap) LastElement() *types.Transaction { // executable/future queue, with minor behavioral changes. type list struct { strict bool // Whether nonces are strictly continuous or not - txs *sortedMap // Heap indexed sorted hash map of the transactions + txs *SortedMap // Heap indexed sorted hash map of the transactions costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) @@ -283,7 +283,7 @@ type list struct { func newList(strict bool) *list { return &list{ strict: strict, - txs: newSortedMap(), + txs: NewSortedMap(), costcap: new(uint256.Int), totalcost: new(uint256.Int), } @@ -556,10 +556,7 @@ func newPricedList(all *lookup) *pricedList { } // Put inserts a new transaction into the heap. -func (l *pricedList) Put(tx *types.Transaction, local bool) { - if local { - return - } +func (l *pricedList) Put(tx *types.Transaction) { // Insert every new transaction to the urgent heap first; Discard will balance the heaps heap.Push(&l.urgent, tx) } @@ -593,7 +590,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { // Discard stale price points if found at the heap start for len(h.list) > 0 { head := h.list[0] - if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated + if l.all.Get(head.Hash()) == nil { // Removed or migrated l.stales.Add(-1) heap.Pop(h) continue @@ -612,15 +609,13 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { // Discard finds a number of most underpriced transactions, removes them from the // priced list and returns them for further removal from the entire pool. // If noPending is set to true, we will only consider the floating list -// -// Note local transaction won't be considered for eviction. -func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { +func (l *pricedList) Discard(slots int) (types.Transactions, bool) { drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop for slots > 0 { if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) - if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated + if l.all.Get(tx.Hash()) == nil { // Removed or migrated l.stales.Add(-1) continue } @@ -633,7 +628,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { } // Discard stale transactions if found during cleanup tx := heap.Pop(&l.floating).(*types.Transaction) - if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated + if l.all.Get(tx.Hash()) == nil { // Removed or migrated l.stales.Add(-1) continue } @@ -643,7 +638,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { } } // If we still can't make enough room for the new transaction - if slots > 0 && !force { + if slots > 0 { for _, tx := range drop { heap.Push(&l.urgent, tx) } @@ -658,11 +653,11 @@ func (l *pricedList) Reheap() { defer l.reheapMu.Unlock() start := time.Now() l.stales.Store(0) - l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount()) - l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { + l.urgent.list = make([]*types.Transaction, 0, l.all.Count()) + l.all.Range(func(hash common.Hash, tx *types.Transaction) bool { l.urgent.list = append(l.urgent.list, tx) return true - }, false, true) // Only iterate remotes + }) heap.Init(&l.urgent) // balance out the two heaps by moving the worse half of transactions into the diff --git a/core/txpool/legacypool/journal.go b/core/txpool/locals/journal.go similarity index 99% rename from core/txpool/legacypool/journal.go rename to core/txpool/locals/journal.go index 899ed00bcc..46fd6de346 100644 --- a/core/txpool/legacypool/journal.go +++ b/core/txpool/locals/journal.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package locals import ( "errors" diff --git a/core/txpool/locals/tx_tracker.go b/core/txpool/locals/tx_tracker.go new file mode 100644 index 0000000000..a24fcb1f4e --- /dev/null +++ b/core/txpool/locals/tx_tracker.go @@ -0,0 +1,212 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package locals implements tracking for "local" transactions +package locals + +import ( + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" + "golang.org/x/exp/slices" +) + +var ( + recheckInterval = time.Minute + localGauge = metrics.GetOrRegisterGauge("txpool/local", nil) +) + +// TxTracker is a struct used to track priority transactions; it will check from +// time to time if the main pool has forgotten about any of the transaction +// it is tracking, and if so, submit it again. +// This is used to track 'locals'. +// This struct does not care about transaction validity, price-bumps or account limits, +// but optimistically accepts transactions. +type TxTracker struct { + all map[common.Hash]*types.Transaction // All tracked transactions + byAddr map[common.Address]*legacypool.SortedMap // Transactions by address + + journal *journal // Journal of local transaction to back up to disk + rejournal time.Duration // How often to rotate journal + pool *txpool.TxPool // The tx pool to interact with + signer types.Signer + + shutdownCh chan struct{} + mu sync.Mutex + wg sync.WaitGroup +} + +// New creates a new TxTracker +func New(journalPath string, journalTime time.Duration, chainConfig *params.ChainConfig, next *txpool.TxPool) *TxTracker { + pool := &TxTracker{ + all: make(map[common.Hash]*types.Transaction), + byAddr: make(map[common.Address]*legacypool.SortedMap), + signer: types.LatestSigner(chainConfig), + shutdownCh: make(chan struct{}), + pool: next, + } + if journalPath != "" { + pool.journal = newTxJournal(journalPath) + pool.rejournal = journalTime + } + return pool +} + +// Track adds a transaction to the tracked set. +// Note: blob-type transactions are ignored. +func (tracker *TxTracker) Track(tx *types.Transaction) { + tracker.TrackAll([]*types.Transaction{tx}) +} + +// TrackAll adds a list of transactions to the tracked set. +// Note: blob-type transactions are ignored. +func (tracker *TxTracker) TrackAll(txs []*types.Transaction) { + tracker.mu.Lock() + defer tracker.mu.Unlock() + + for _, tx := range txs { + if tx.Type() == types.BlobTxType { + continue + } + // If we're already tracking it, it's a no-op + if _, ok := tracker.all[tx.Hash()]; ok { + continue + } + addr, err := types.Sender(tracker.signer, tx) + if err != nil { // Ignore this tx + continue + } + tracker.all[tx.Hash()] = tx + if tracker.byAddr[addr] == nil { + tracker.byAddr[addr] = legacypool.NewSortedMap() + } + tracker.byAddr[addr].Put(tx) + + if tracker.journal != nil { + _ = tracker.journal.insert(tx) + } + } + localGauge.Update(int64(len(tracker.all))) +} + +// recheck checks and returns any transactions that needs to be resubmitted. +func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transaction, rejournal map[common.Address]types.Transactions) { + tracker.mu.Lock() + defer tracker.mu.Unlock() + + var ( + numStales = 0 + numOk = 0 + ) + for sender, txs := range tracker.byAddr { + // Wipe the stales + stales := txs.Forward(tracker.pool.Nonce(sender)) + for _, tx := range stales { + delete(tracker.all, tx.Hash()) + } + numStales += len(stales) + + // Check the non-stale + for _, tx := range txs.Flatten() { + if tracker.pool.Has(tx.Hash()) { + numOk++ + continue + } + resubmits = append(resubmits, tx) + } + } + + if journalCheck { // rejournal + rejournal = make(map[common.Address]types.Transactions) + for _, tx := range tracker.all { + addr, _ := types.Sender(tracker.signer, tx) + rejournal[addr] = append(rejournal[addr], tx) + } + // Sort them + for _, list := range rejournal { + // cmp(a, b) should return a negative number when a < b, + slices.SortFunc(list, func(a, b *types.Transaction) int { + return int(a.Nonce() - b.Nonce()) + }) + } + } + localGauge.Update(int64(len(tracker.all))) + log.Debug("Tx tracker status", "need-resubmit", len(resubmits), "stale", numStales, "ok", numOk) + return resubmits, rejournal +} + +// Start implements node.Lifecycle interface +// Start is called after all services have been constructed and the networking +// layer was also initialized to spawn any goroutines required by the service. +func (tracker *TxTracker) Start() error { + tracker.wg.Add(1) + go tracker.loop() + return nil +} + +// Stop implements node.Lifecycle interface +// Stop terminates all goroutines belonging to the service, blocking until they +// are all terminated. +func (tracker *TxTracker) Stop() error { + close(tracker.shutdownCh) + tracker.wg.Wait() + return nil +} + +func (tracker *TxTracker) loop() { + defer tracker.wg.Done() + + if tracker.journal != nil { + tracker.journal.load(func(transactions []*types.Transaction) []error { + tracker.TrackAll(transactions) + return nil + }) + defer tracker.journal.close() + } + var ( + lastJournal = time.Now() + timer = time.NewTimer(10 * time.Second) // Do initial check after 10 seconds, do rechecks more seldom. + ) + for { + select { + case <-tracker.shutdownCh: + return + case <-timer.C: + checkJournal := tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal + resubmits, rejournal := tracker.recheck(checkJournal) + if len(resubmits) > 0 { + tracker.pool.Add(resubmits, false) + } + if checkJournal { + // Lock to prevent journal.rotate <-> journal.insert (via TrackAll) conflicts + tracker.mu.Lock() + lastJournal = time.Now() + if err := tracker.journal.rotate(rejournal); err != nil { + log.Warn("Transaction journal rotation failed", "err", err) + } + tracker.mu.Unlock() + } + timer.Reset(recheckInterval) + } + } +} diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 9ee0a69c0b..5ad0f5b0e0 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -132,7 +132,7 @@ type SubPool interface { // Add enqueues a batch of transactions into the pool if they are valid. Due // to the large transaction churn, add may postpone fully integrating the tx // to a later point to batch multiple ones together. - Add(txs []*types.Transaction, local bool, sync bool) []error + Add(txs []*types.Transaction, sync bool) []error // Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. @@ -162,9 +162,6 @@ type SubPool interface { // pending as well as queued transactions of this address, grouped by nonce. ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) - // Locals retrieves the accounts currently considered local by the pool. - Locals() []common.Address - // Status returns the known status (unknown/pending/queued) of a transaction // identified by their hashes. Status(hash common.Hash) TxStatus diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 182706d63c..361dbe8b38 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -328,7 +328,7 @@ func (p *TxPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Pr // Add enqueues a batch of transactions into the pool if they are valid. Due // to the large transaction churn, add may postpone fully integrating the tx // to a later point to batch multiple ones together. -func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { +func (p *TxPool) Add(txs []*types.Transaction, sync bool) []error { // Split the input transactions between the subpools. It shouldn't really // happen that we receive merged batches, but better graceful than strange // errors. @@ -355,7 +355,7 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { // back the errors into the original sort order. errsets := make([][]error, len(p.subpools)) for i := 0; i < len(p.subpools); i++ { - errsets[i] = p.subpools[i].Add(txsets[i], local, sync) + errsets[i] = p.subpools[i].Add(txsets[i], sync) } errs := make([]error, len(txs)) for i, split := range splits { @@ -456,23 +456,6 @@ func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*type return []*types.Transaction{}, []*types.Transaction{} } -// Locals retrieves the accounts currently considered local by the pool. -func (p *TxPool) Locals() []common.Address { - // Retrieve the locals from each subpool and deduplicate them - locals := make(map[common.Address]struct{}) - for _, subpool := range p.subpools { - for _, local := range subpool.Locals() { - locals[local] = struct{}{} - } - } - // Flatten and return the deduplicated local set - flat := make([]common.Address, 0, len(locals)) - for local := range locals { - flat = append(flat, local) - } - return flat -} - // Status returns the known status (unknown/pending/queued) of a transaction // identified by its hash. func (p *TxPool) Status(hash common.Hash) TxStatus { diff --git a/eth/api_backend.go b/eth/api_backend.go index 52ecd91789..66621190dd 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -272,7 +272,10 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri } func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0] + if locals := b.eth.localTxTracker; locals != nil { + locals.Track(signedTx) + } + return b.eth.txPool.Add([]*types.Transaction{signedTx}, false)[0] } func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { diff --git a/eth/backend.go b/eth/backend.go index a3aa0a7b9b..fea7e4e1fe 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -23,6 +23,7 @@ import ( "math/big" "runtime" "sync" + "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -35,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" + "github.com/ethereum/go-ethereum/core/txpool/locals" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" @@ -67,9 +69,10 @@ type Config = ethconfig.Config // Ethereum implements the Ethereum full node service. type Ethereum struct { // core protocol objects - config *ethconfig.Config - txPool *txpool.TxPool - blockchain *core.BlockChain + config *ethconfig.Config + txPool *txpool.TxPool + localTxTracker *locals.TxTracker + blockchain *core.BlockChain handler *handler discmix *enode.FairMix @@ -237,6 +240,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } + + if !config.TxPool.NoLocals { + rejournal := config.TxPool.Rejournal + if rejournal < time.Second { + log.Warn("Sanitizing invalid txpool journal time", "provided", rejournal, "updated", time.Second) + rejournal = time.Second + } + eth.localTxTracker = locals.New(config.TxPool.Journal, rejournal, eth.blockchain.Config(), eth.txPool) + stack.RegisterLifecycle(eth.localTxTracker) + } // Permit the downloader to use the trie cache allowance during fast sync cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit if eth.handler, err = newHandler(&handlerConfig{ @@ -255,6 +268,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { eth.miner = miner.New(eth, config.Miner, eth.engine) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) + eth.miner.SetPrioAddresses(config.TxPool.Locals) eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} if eth.APIBackend.allowUnprotectedTxs { diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 9840d9c6ad..8fc3361192 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -115,7 +115,7 @@ func TestEth2AssembleBlock(t *testing.T) { if err != nil { t.Fatalf("error signing transaction, err=%v", err) } - ethservice.TxPool().Add([]*types.Transaction{tx}, true, true) + ethservice.TxPool().Add([]*types.Transaction{tx}, true) blockParams := engine.PayloadAttributes{ Timestamp: blocks[9].Time() + 5, } @@ -152,7 +152,7 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) { // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() - api.eth.TxPool().Add(txs, false, true) + api.eth.TxPool().Add(txs, true) blockParams := engine.PayloadAttributes{ Timestamp: blocks[8].Time() + 5, } @@ -174,7 +174,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() - ethservice.TxPool().Add(txs, true, true) + ethservice.TxPool().Add(txs, true) blockParams := engine.PayloadAttributes{ Timestamp: blocks[8].Time() + 5, } @@ -294,7 +294,7 @@ func TestEth2NewBlock(t *testing.T) { statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) nonce := statedb.GetNonce(testAddr) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) - ethservice.TxPool().Add([]*types.Transaction{tx}, true, true) + ethservice.TxPool().Add([]*types.Transaction{tx}, true) execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{ Timestamp: parent.Time() + 5, @@ -463,7 +463,7 @@ func TestFullAPI(t *testing.T) { statedb, _ := ethservice.BlockChain().StateAt(parent.Root) nonce := statedb.GetNonce(testAddr) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) - ethservice.TxPool().Add([]*types.Transaction{tx}, true, false) + ethservice.TxPool().Add([]*types.Transaction{tx}, false) } setupBlocks(t, ethservice, 10, parent, callback, nil, nil) @@ -594,7 +594,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) { GasPrice: big.NewInt(2 * params.InitialBaseFee), Data: logCode, }) - ethservice.TxPool().Add([]*types.Transaction{tx}, false, true) + ethservice.TxPool().Add([]*types.Transaction{tx}, true) var ( params = engine.PayloadAttributes{ Timestamp: parent.Time + 1, @@ -1246,8 +1246,8 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) { // Create tx to trigger deposit generator. tx2, _ = types.SignTx(types.NewTransaction(statedb.GetNonce(testAddr)+1, ethservice.APIBackend.ChainConfig().DepositContractAddress, new(big.Int), 500000, big.NewInt(2*params.InitialBaseFee), nil), types.LatestSigner(ethservice.BlockChain().Config()), testKey) ) - ethservice.TxPool().Add([]*types.Transaction{tx1}, false, false) - ethservice.TxPool().Add([]*types.Transaction{tx2}, false, false) + ethservice.TxPool().Add([]*types.Transaction{tx1}, false) + ethservice.TxPool().Add([]*types.Transaction{tx2}, false) } // Make some withdrawals to include. @@ -1637,7 +1637,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) { // Put the 10th block's tx in the pool and produce a new block txs := blocks[9].Transactions() - ethservice.TxPool().Add(txs, true, true) + ethservice.TxPool().Add(txs, true) blockParams := engine.PayloadAttributes{ Timestamp: blocks[8].Time() + 5, Withdrawals: make([]*types.Withdrawal, 0), diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go index 79d9ba738e..ea35482896 100644 --- a/eth/catalyst/simulated_beacon_test.go +++ b/eth/catalyst/simulated_beacon_test.go @@ -18,6 +18,7 @@ package catalyst import ( "context" + "fmt" "math/big" "testing" "time" @@ -143,9 +144,14 @@ func TestSimulatedBeaconSendWithdrawals(t *testing.T) { // Tests that zero-period dev mode can handle a lot of simultaneous // transactions/withdrawals func TestOnDemandSpam(t *testing.T) { + // This test is flaky, due to various causes, and the root cause is synchronicity. + // We have optimistic timeouts here and there in the simulated becaon and the worker. + // This test typically fails on 32-bit windows appveyor. + t.Skip("flaky test") var ( withdrawals []types.Withdrawal - txs = make(map[common.Hash]*types.Transaction) + txCount = 20000 + wxCount = 20 testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddr = crypto.PubkeyToAddress(testKey.PublicKey) gasLimit uint64 = 10_000_000 @@ -160,7 +166,7 @@ func TestOnDemandSpam(t *testing.T) { defer sub.Unsubscribe() // generate some withdrawals - for i := 0; i < 20; i++ { + for i := 0; i < wxCount; i++ { withdrawals = append(withdrawals, types.Withdrawal{Index: uint64(i)}) if err := mock.withdrawals.add(&withdrawals[i]); err != nil { t.Fatal("addWithdrawal failed", err) @@ -168,37 +174,37 @@ func TestOnDemandSpam(t *testing.T) { } // generate a bunch of transactions - for i := 0; i < 20000; i++ { - tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{byte(i), byte(1)}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), signer, testKey) - if err != nil { - t.Fatal("error signing transaction", err) + go func() { + for i := 0; i < txCount; i++ { + tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{byte(i), byte(1)}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), signer, testKey) + if err != nil { + panic(fmt.Sprintf("error signing transaction: %v", err)) + } + if err := eth.TxPool().Add([]*types.Transaction{tx}, false)[0]; err != nil { + panic(fmt.Sprintf("error adding txs to pool: %v", err)) + } } - txs[tx.Hash()] = tx - if err := eth.APIBackend.SendTx(context.Background(), tx); err != nil { - t.Fatal("error adding txs to pool", err) - } - } - + }() var ( - includedTxs = make(map[common.Hash]struct{}) - includedWxs []uint64 + includedTxs int + includedWxs int + abort = time.NewTimer(10 * time.Second) ) + defer abort.Stop() for { select { case ev := <-chainHeadCh: block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64()) - for _, itx := range block.Transactions() { - includedTxs[itx.Hash()] = struct{}{} - } - for _, iwx := range block.Withdrawals() { - includedWxs = append(includedWxs, iwx.Index) - } + includedTxs += len(block.Transactions()) + includedWxs += len(block.Withdrawals()) // ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10 - if len(includedTxs) == len(txs) && len(includedWxs) == len(withdrawals) { + if includedTxs == txCount && includedWxs == wxCount { return } - case <-time.After(10 * time.Second): - t.Fatalf("timed out without including all withdrawals/txs: have txs %d, want %d, have wxs %d, want %d", len(includedTxs), len(txs), len(includedWxs), len(withdrawals)) + abort.Reset(10 * time.Second) + case <-abort.C: + t.Fatalf("timed out without including all withdrawals/txs: have txs %d, want %d, have wxs %d, want %d", + includedTxs, txCount, includedWxs, wxCount) } } } diff --git a/eth/handler.go b/eth/handler.go index 8893920497..6ac890902b 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -68,7 +68,7 @@ type txPool interface { Get(hash common.Hash) *types.Transaction // Add should add the given transactions to the pool. - Add(txs []*types.Transaction, local bool, sync bool) []error + Add(txs []*types.Transaction, sync bool) []error // Pending should return pending transactions. // The slice should be modifiable by the caller. @@ -189,7 +189,7 @@ func newHandler(config *handlerConfig) (*handler, error) { return p.RequestTxs(hashes) } addTxs := func(txs []*types.Transaction) []error { - return h.txpool.Add(txs, false, false) + return h.txpool.Add(txs, false) } h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer) return h, nil diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 622880b097..8d572ca966 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -299,8 +299,8 @@ func testSendTransactions(t *testing.T, protocol uint) { tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) insert[nonce] = tx } - go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed - time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) + go handler.txpool.Add(insert, false) // Need goroutine to not block on feed + time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) // Create a source handler to send messages through and a sink peer to receive them p2pSrc, p2pSink := p2p.MsgPipe() @@ -419,7 +419,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) txs[nonce] = tx } - source.txpool.Add(txs, false, false) + source.txpool.Add(txs, false) // Iterate through all the sinks and ensure they all got the transactions for i := range sinks { diff --git a/eth/handler_test.go b/eth/handler_test.go index b63d3e8592..d5d46a3c65 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -80,7 +80,7 @@ func (p *testTxPool) Get(hash common.Hash) *types.Transaction { // Add appends a batch of transactions to the pool, and notifies any // listeners if the addition channel is non nil -func (p *testTxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { +func (p *testTxPool) Add(txs []*types.Transaction, sync bool) []error { p.lock.Lock() defer p.lock.Unlock() diff --git a/miner/miner.go b/miner/miner.go index 9892c08ed6..595ef8081c 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -70,6 +70,7 @@ type Miner struct { chainConfig *params.ChainConfig engine consensus.Engine txpool *txpool.TxPool + prio []common.Address // A list of senders to prioritize chain *core.BlockChain pending *pending pendingMu sync.Mutex // Lock protects the pending block @@ -109,6 +110,13 @@ func (miner *Miner) SetExtra(extra []byte) error { return nil } +// SetPrioAddresses sets a list of addresses to prioritize for transaction inclusion. +func (miner *Miner) SetPrioAddresses(prio []common.Address) { + miner.confMu.Lock() + miner.prio = prio + miner.confMu.Unlock() +} + // SetGasCeil sets the gaslimit to strive for when mining blocks post 1559. // For pre-1559 blocks, it sets the ceiling. func (miner *Miner) SetGasCeil(ceil uint64) { diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index e5eb0297a1..307024c6bb 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -138,7 +138,7 @@ func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*Miner, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.Add(pendingTxs, true, true) + backend.txPool.Add(pendingTxs, true) w := New(backend, testConfig, engine) return w, backend } diff --git a/miner/worker.go b/miner/worker.go index f8f4bae833..16ac7de9a9 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -427,6 +427,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) error { miner.confMu.RLock() tip := miner.config.GasPrice + prio := miner.prio miner.confMu.RUnlock() // Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees @@ -446,31 +447,31 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) pendingBlobTxs := miner.txpool.Pending(filter) // Split the pending transactions into locals and remotes. - localPlainTxs, remotePlainTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingPlainTxs - localBlobTxs, remoteBlobTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingBlobTxs + prioPlainTxs, normalPlainTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingPlainTxs + prioBlobTxs, normalBlobTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingBlobTxs - for _, account := range miner.txpool.Locals() { - if txs := remotePlainTxs[account]; len(txs) > 0 { - delete(remotePlainTxs, account) - localPlainTxs[account] = txs + for _, account := range prio { + if txs := normalPlainTxs[account]; len(txs) > 0 { + delete(normalPlainTxs, account) + prioPlainTxs[account] = txs } - if txs := remoteBlobTxs[account]; len(txs) > 0 { - delete(remoteBlobTxs, account) - localBlobTxs[account] = txs + if txs := normalBlobTxs[account]; len(txs) > 0 { + delete(normalBlobTxs, account) + prioBlobTxs[account] = txs } } // Fill the block with all available pending transactions. - if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 { - plainTxs := newTransactionsByPriceAndNonce(env.signer, localPlainTxs, env.header.BaseFee) - blobTxs := newTransactionsByPriceAndNonce(env.signer, localBlobTxs, env.header.BaseFee) + if len(prioPlainTxs) > 0 || len(prioBlobTxs) > 0 { + plainTxs := newTransactionsByPriceAndNonce(env.signer, prioPlainTxs, env.header.BaseFee) + blobTxs := newTransactionsByPriceAndNonce(env.signer, prioBlobTxs, env.header.BaseFee) if err := miner.commitTransactions(env, plainTxs, blobTxs, interrupt); err != nil { return err } } - if len(remotePlainTxs) > 0 || len(remoteBlobTxs) > 0 { - plainTxs := newTransactionsByPriceAndNonce(env.signer, remotePlainTxs, env.header.BaseFee) - blobTxs := newTransactionsByPriceAndNonce(env.signer, remoteBlobTxs, env.header.BaseFee) + if len(normalPlainTxs) > 0 || len(normalBlobTxs) > 0 { + plainTxs := newTransactionsByPriceAndNonce(env.signer, normalPlainTxs, env.header.BaseFee) + blobTxs := newTransactionsByPriceAndNonce(env.signer, normalBlobTxs, env.header.BaseFee) if err := miner.commitTransactions(env, plainTxs, blobTxs, interrupt); err != nil { return err From 59d2eec9fc5177953eb7798b8fa5c9da5ebf95b5 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 4 Feb 2025 18:10:40 +0100 Subject: [PATCH 07/17] core/txpool/blobpool: fix incorrect arguments in test (#31127) Fixes the linter on master which was broken by https://github.com/ethereum/go-ethereum/pull/30559 --- core/txpool/blobpool/blobpool_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 0187e8d9a0..88d68eb307 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -1108,7 +1108,7 @@ func TestChangingSlotterSize(t *testing.T) { // Try to add the big blob tx. In the initial iteration it should overflow // the pool. On the subsequent iteration it should be accepted. - errs := pool.Add([]*types.Transaction{tx3}, false, true) + errs := pool.Add([]*types.Transaction{tx3}, true) if _, ok := pool.index[addr3]; ok && maxBlobs == 6 { t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) } else if !ok && maxBlobs == 10 { From c4ad459bd2d85137e26d91144f733bbb938de12d Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 4 Feb 2025 13:43:18 -0700 Subject: [PATCH 08/17] consensus/misc/eip4844: use head's target blobs, not parent (#31101) A clarification was made to EIP-7691 stating that at the fork boundary it is required to use the target blob count associated with the head block, rather than the parent as implemented here. See for more: https://github.com/ethereum/EIPs/pull/9249 --- cmd/evm/internal/t8ntool/execution.go | 2 +- consensus/misc/eip4844/eip4844.go | 6 +++--- consensus/misc/eip4844/eip4844_test.go | 5 ++++- core/chain_makers.go | 2 +- core/state_processor_test.go | 2 +- eth/gasprice/feehistory.go | 2 +- eth/tracers/internal/tracetest/util.go | 5 +++-- internal/ethapi/simulate.go | 2 +- miner/worker.go | 2 +- 9 files changed, 16 insertions(+), 12 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 9332f4901b..1613521b0d 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -194,11 +194,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, ExcessBlobGas: pre.Env.ParentExcessBlobGas, BlobGasUsed: pre.Env.ParentBlobGasUsed, } - excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent) header := &types.Header{ Time: pre.Env.Timestamp, ExcessBlobGas: &excessBlobGas, } + excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent, header) vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header) } } diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index 1d76b21b30..148ea60274 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -50,7 +50,7 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) } // Verify the excessBlobGas is correct based on the parent header - expectedExcessBlobGas := CalcExcessBlobGas(config, parent) + expectedExcessBlobGas := CalcExcessBlobGas(config, parent, header) if *header.ExcessBlobGas != expectedExcessBlobGas { return fmt.Errorf("invalid excessBlobGas: have %d, want %d", *header.ExcessBlobGas, expectedExcessBlobGas) } @@ -59,9 +59,9 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade // CalcExcessBlobGas calculates the excess blob gas after applying the set of // blobs on top of the excess blob gas. -func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header) uint64 { +func CalcExcessBlobGas(config *params.ChainConfig, parent, header *types.Header) uint64 { var ( - targetGas = uint64(targetBlobsPerBlock(config, parent.Time)) * params.BlobTxBlobGasPerBlob + targetGas = uint64(targetBlobsPerBlock(config, header.Time)) * params.BlobTxBlobGasPerBlob parentExcessBlobGas uint64 parentBlobGasUsed uint64 ) diff --git a/consensus/misc/eip4844/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go index 839ea8df22..7d221aa96e 100644 --- a/consensus/misc/eip4844/eip4844_test.go +++ b/consensus/misc/eip4844/eip4844_test.go @@ -57,12 +57,15 @@ func TestCalcExcessBlobGas(t *testing.T) { } for i, tt := range tests { blobGasUsed := uint64(tt.blobs) * params.BlobTxBlobGasPerBlob + head := &types.Header{ + Time: *config.CancunTime, + } parent := &types.Header{ Time: *config.CancunTime, ExcessBlobGas: &tt.excess, BlobGasUsed: &blobGasUsed, } - result := CalcExcessBlobGas(config, parent) + result := CalcExcessBlobGas(config, parent, head) if result != tt.want { t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want) } diff --git a/core/chain_makers.go b/core/chain_makers.go index 19f433d7b0..2ade70f175 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -600,7 +600,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi } } if cm.config.IsCancun(header.Number, header.Time) { - excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parent.Header()) + excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parent.Header(), header) header.ExcessBlobGas = &excessBlobGas header.BlobGasUsed = new(uint64) header.ParentBeaconRoot = new(common.Hash) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 7d62e90ad4..b0258ae37b 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -407,7 +407,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } header.Root = common.BytesToHash(hasher.Sum(nil)) if config.IsCancun(header.Number, header.Time) { - excess := eip4844.CalcExcessBlobGas(config, parent.Header()) + excess := eip4844.CalcExcessBlobGas(config, parent.Header(), header) used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) header.ExcessBlobGas = &excess header.BlobGasUsed = &used diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 697263f20b..94fc5b35f9 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -97,7 +97,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { // Fill in blob base fee and next blob base fee. if excessBlobGas := bf.header.ExcessBlobGas; excessBlobGas != nil { bf.results.blobBaseFee = eip4844.CalcBlobFee(config, bf.header) - excess := eip4844.CalcExcessBlobGas(config, bf.header) + excess := eip4844.CalcExcessBlobGas(config, bf.header, bf.header) next := &types.Header{Number: bf.header.Number, Time: bf.header.Time, ExcessBlobGas: &excess} bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(config, next) } else { diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go index e29144e04e..97896213b4 100644 --- a/eth/tracers/internal/tracetest/util.go +++ b/eth/tracers/internal/tracetest/util.go @@ -54,8 +54,9 @@ func (c *callContext) toBlockContext(genesis *core.Genesis) vm.BlockContext { } if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil { - excess := eip4844.CalcExcessBlobGas(genesis.Config, genesis.ToBlock().Header()) - header := &types.Header{ExcessBlobGas: &excess, Number: genesis.Config.LondonBlock, Time: *genesis.Config.CancunTime} + header := &types.Header{Number: genesis.Config.LondonBlock, Time: *genesis.Config.CancunTime} + excess := eip4844.CalcExcessBlobGas(genesis.Config, header, genesis.ToBlock().Header()) + header.ExcessBlobGas = &excess context.BlobBaseFee = eip4844.CalcBlobFee(genesis.Config, header) } return context diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index d364b80485..0262f69c02 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -159,7 +159,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, if sim.chainConfig.IsCancun(header.Number, header.Time) { var excess uint64 if sim.chainConfig.IsCancun(parent.Number, parent.Time) { - excess = eip4844.CalcExcessBlobGas(sim.chainConfig, parent) + excess = eip4844.CalcExcessBlobGas(sim.chainConfig, parent, header) } header.ExcessBlobGas = &excess } diff --git a/miner/worker.go b/miner/worker.go index 16ac7de9a9..6ec23ec624 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -210,7 +210,7 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir if miner.chainConfig.IsCancun(header.Number, header.Time) { var excessBlobGas uint64 if miner.chainConfig.IsCancun(parent.Number, parent.Time) { - excessBlobGas = eip4844.CalcExcessBlobGas(miner.chainConfig, parent) + excessBlobGas = eip4844.CalcExcessBlobGas(miner.chainConfig, parent, header) } header.BlobGasUsed = new(uint64) header.ExcessBlobGas = &excessBlobGas From ed1d46b3d31f250e5385a4b8b8f97293acf8adee Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 5 Feb 2025 10:35:03 +0100 Subject: [PATCH 09/17] consensus/misc/eip4844: more changes for blob gas calculation (#31128) This PR changes the signature of `CalcExcessBlobGas` to take in just the header timestamp instead of the whole object. It also adds a sanity check for the parent->child block order to `VerifyEIP4844Header`. --- cmd/evm/internal/t8ntool/execution.go | 2 +- consensus/misc/eip4844/eip4844.go | 9 ++++++--- consensus/misc/eip4844/eip4844_test.go | 8 ++------ core/chain_makers.go | 7 ++++--- core/state_processor_test.go | 2 +- eth/gasprice/feehistory.go | 2 +- eth/tracers/internal/tracetest/util.go | 2 +- internal/ethapi/simulate.go | 2 +- miner/worker.go | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 1613521b0d..793b2d425b 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -198,7 +198,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, Time: pre.Env.Timestamp, ExcessBlobGas: &excessBlobGas, } - excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent, header) + excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent, header.Time) vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header) } } diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index 148ea60274..4a2754b55c 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -34,6 +34,9 @@ var ( // if the current block contains no transactions, the excessBlobGas is updated // accordingly. func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Header) error { + if header.Number.Uint64() != parent.Number.Uint64()+1 { + panic("bad header pair") + } // Verify the header is not malformed if header.ExcessBlobGas == nil { return errors.New("header is missing excessBlobGas") @@ -50,7 +53,7 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) } // Verify the excessBlobGas is correct based on the parent header - expectedExcessBlobGas := CalcExcessBlobGas(config, parent, header) + expectedExcessBlobGas := CalcExcessBlobGas(config, parent, header.Time) if *header.ExcessBlobGas != expectedExcessBlobGas { return fmt.Errorf("invalid excessBlobGas: have %d, want %d", *header.ExcessBlobGas, expectedExcessBlobGas) } @@ -59,9 +62,8 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade // CalcExcessBlobGas calculates the excess blob gas after applying the set of // blobs on top of the excess blob gas. -func CalcExcessBlobGas(config *params.ChainConfig, parent, header *types.Header) uint64 { +func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header, headTimestamp uint64) uint64 { var ( - targetGas = uint64(targetBlobsPerBlock(config, header.Time)) * params.BlobTxBlobGasPerBlob parentExcessBlobGas uint64 parentBlobGasUsed uint64 ) @@ -70,6 +72,7 @@ func CalcExcessBlobGas(config *params.ChainConfig, parent, header *types.Header) parentBlobGasUsed = *parent.BlobGasUsed } excessBlobGas := parentExcessBlobGas + parentBlobGasUsed + targetGas := uint64(targetBlobsPerBlock(config, headTimestamp)) * params.BlobTxBlobGasPerBlob if excessBlobGas < targetGas { return 0 } diff --git a/consensus/misc/eip4844/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go index 7d221aa96e..f4e3cb3d9a 100644 --- a/consensus/misc/eip4844/eip4844_test.go +++ b/consensus/misc/eip4844/eip4844_test.go @@ -57,15 +57,11 @@ func TestCalcExcessBlobGas(t *testing.T) { } for i, tt := range tests { blobGasUsed := uint64(tt.blobs) * params.BlobTxBlobGasPerBlob - head := &types.Header{ - Time: *config.CancunTime, - } - parent := &types.Header{ - Time: *config.CancunTime, + header := &types.Header{ ExcessBlobGas: &tt.excess, BlobGasUsed: &blobGasUsed, } - result := CalcExcessBlobGas(config, parent, head) + result := CalcExcessBlobGas(config, header, *config.CancunTime) if result != tt.want { t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want) } diff --git a/core/chain_makers.go b/core/chain_makers.go index 2ade70f175..8d09390b72 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -582,25 +582,26 @@ func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { time := parent.Time() + 10 // block time is fixed at 10 seconds + parentHeader := parent.Header() header := &types.Header{ Root: state.IntermediateRoot(cm.config.IsEIP158(parent.Number())), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(cm, time, parent.Header()), + Difficulty: engine.CalcDifficulty(cm, time, parentHeader), GasLimit: parent.GasLimit(), Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } if cm.config.IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(cm.config, parent.Header()) + header.BaseFee = eip1559.CalcBaseFee(cm.config, parentHeader) if !cm.config.IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier() header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } } if cm.config.IsCancun(header.Number, header.Time) { - excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parent.Header(), header) + excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parentHeader, time) header.ExcessBlobGas = &excessBlobGas header.BlobGasUsed = new(uint64) header.ParentBeaconRoot = new(common.Hash) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index b0258ae37b..a6ca2781f8 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -407,7 +407,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } header.Root = common.BytesToHash(hasher.Sum(nil)) if config.IsCancun(header.Number, header.Time) { - excess := eip4844.CalcExcessBlobGas(config, parent.Header(), header) + excess := eip4844.CalcExcessBlobGas(config, parent.Header(), header.Time) used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) header.ExcessBlobGas = &excess header.BlobGasUsed = &used diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 94fc5b35f9..fe84950c50 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -97,7 +97,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { // Fill in blob base fee and next blob base fee. if excessBlobGas := bf.header.ExcessBlobGas; excessBlobGas != nil { bf.results.blobBaseFee = eip4844.CalcBlobFee(config, bf.header) - excess := eip4844.CalcExcessBlobGas(config, bf.header, bf.header) + excess := eip4844.CalcExcessBlobGas(config, bf.header, bf.header.Time) next := &types.Header{Number: bf.header.Number, Time: bf.header.Time, ExcessBlobGas: &excess} bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(config, next) } else { diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go index 97896213b4..524a396d5c 100644 --- a/eth/tracers/internal/tracetest/util.go +++ b/eth/tracers/internal/tracetest/util.go @@ -55,7 +55,7 @@ func (c *callContext) toBlockContext(genesis *core.Genesis) vm.BlockContext { if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil { header := &types.Header{Number: genesis.Config.LondonBlock, Time: *genesis.Config.CancunTime} - excess := eip4844.CalcExcessBlobGas(genesis.Config, header, genesis.ToBlock().Header()) + excess := eip4844.CalcExcessBlobGas(genesis.Config, header, genesis.Timestamp) header.ExcessBlobGas = &excess context.BlobBaseFee = eip4844.CalcBlobFee(genesis.Config, header) } diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go index 0262f69c02..c461b1f0a1 100644 --- a/internal/ethapi/simulate.go +++ b/internal/ethapi/simulate.go @@ -159,7 +159,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, if sim.chainConfig.IsCancun(header.Number, header.Time) { var excess uint64 if sim.chainConfig.IsCancun(parent.Number, parent.Time) { - excess = eip4844.CalcExcessBlobGas(sim.chainConfig, parent, header) + excess = eip4844.CalcExcessBlobGas(sim.chainConfig, parent, header.Time) } header.ExcessBlobGas = &excess } diff --git a/miner/worker.go b/miner/worker.go index 6ec23ec624..2c54421196 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -210,7 +210,7 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir if miner.chainConfig.IsCancun(header.Number, header.Time) { var excessBlobGas uint64 if miner.chainConfig.IsCancun(parent.Number, parent.Time) { - excessBlobGas = eip4844.CalcExcessBlobGas(miner.chainConfig, parent, header) + excessBlobGas = eip4844.CalcExcessBlobGas(miner.chainConfig, parent, timestamp) } header.BlobGasUsed = new(uint64) header.ExcessBlobGas = &excessBlobGas From aaaf01d71232d1b7da5ab2ae9258f7fb9f22b1bf Mon Sep 17 00:00:00 2001 From: Sina M <1591639+s1na@users.noreply.github.com> Date: Wed, 5 Feb 2025 13:58:25 +0100 Subject: [PATCH 10/17] core/tracing: state journal wrapper (#30441) Here we add some more changes for live tracing API v1.1: - Hook `OnSystemCallStartV2` was introduced with `VMContext` as parameter. - Hook `OnBlockHashRead` was introduced. - `GetCodeHash` was added to the state interface - The new `WrapWithJournal` construction helps with tracking EVM reverts in the tracer. --------- Co-authored-by: Felix Lange --- cmd/evm/internal/t8ntool/execution.go | 2 +- core/genesis.go | 4 +- core/state/statedb.go | 2 +- core/state/statedb_fuzz_test.go | 2 +- core/state/statedb_hooked.go | 11 +- core/state/statedb_hooked_test.go | 4 +- core/state/statedb_test.go | 6 +- core/state_transition.go | 4 +- core/tracing/CHANGELOG.md | 47 +++ .../gen_balance_change_reason_stringer.go | 5 +- core/tracing/hooks.go | 47 +++ core/tracing/journal.go | 237 +++++++++++++ core/tracing/journal_test.go | 335 ++++++++++++++++++ core/txpool/blobpool/blobpool_test.go | 8 +- core/txpool/legacypool/legacypool_test.go | 6 +- core/verkle_witness_test.go | 3 +- core/vm/evm.go | 4 +- core/vm/instructions.go | 3 + core/vm/interface.go | 2 +- core/vm/runtime/runtime_test.go | 2 +- eth/tracers/live/noop.go | 3 + internal/ethapi/override/override.go | 2 +- tests/state_test_util.go | 2 +- 23 files changed, 709 insertions(+), 32 deletions(-) create mode 100644 core/tracing/journal.go create mode 100644 core/tracing/journal_test.go diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 793b2d425b..16c8808360 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -419,7 +419,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB statedb, _ := state.New(types.EmptyRootHash, sdb) for addr, a := range accounts { statedb.SetCode(addr, a.Code) - statedb.SetNonce(addr, a.Nonce) + statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance) for k, v := range a.Storage { statedb.SetState(addr, k, v) diff --git a/core/genesis.go b/core/genesis.go index 8f71f9ef1e..8546f4e37e 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -154,7 +154,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) { statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) } statedb.SetCode(addr, account.Code) - statedb.SetNonce(addr, account.Nonce) + statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis) for key, value := range account.Storage { statedb.SetState(addr, key, value) } @@ -180,7 +180,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) } statedb.SetCode(addr, account.Code) - statedb.SetNonce(addr, account.Nonce) + statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis) for key, value := range account.Storage { statedb.SetState(addr, key, value) } diff --git a/core/state/statedb.go b/core/state/statedb.go index 0310ee6973..efafdc1aa2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -432,7 +432,7 @@ func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int, reason tr } } -func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { +func (s *StateDB) SetNonce(addr common.Address, nonce uint64, reason tracing.NonceChangeReason) { stateObject := s.getOrNewStateObject(addr) if stateObject != nil { stateObject.SetNonce(nonce) diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index ed99cf687c..2923a2c224 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -69,7 +69,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { name: "SetNonce", fn: func(a testAction, s *StateDB) { - s.SetNonce(addr, uint64(a.args[0])) + s.SetNonce(addr, uint64(a.args[0]), tracing.NonceChangeUnspecified) }, args: make([]int64, 1), }, diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go index 25d823cc87..a2fdfe9a21 100644 --- a/core/state/statedb_hooked.go +++ b/core/state/statedb_hooked.go @@ -179,10 +179,13 @@ func (s *hookedStateDB) AddBalance(addr common.Address, amount *uint256.Int, rea return prev } -func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64) { - s.inner.SetNonce(address, nonce) - if s.hooks.OnNonceChange != nil { - s.hooks.OnNonceChange(address, nonce-1, nonce) +func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tracing.NonceChangeReason) { + prev := s.inner.GetNonce(address) + s.inner.SetNonce(address, nonce, reason) + if s.hooks.OnNonceChangeV2 != nil { + s.hooks.OnNonceChangeV2(address, prev, nonce, reason) + } else if s.hooks.OnNonceChange != nil { + s.hooks.OnNonceChange(address, prev, nonce) } } diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go index 874a275993..ce42e96409 100644 --- a/core/state/statedb_hooked_test.go +++ b/core/state/statedb_hooked_test.go @@ -85,7 +85,7 @@ func TestHooks(t *testing.T) { var wants = []string{ "0xaa00000000000000000000000000000000000000.balance: 0->100 (BalanceChangeUnspecified)", "0xaa00000000000000000000000000000000000000.balance: 100->50 (BalanceChangeTransfer)", - "0xaa00000000000000000000000000000000000000.nonce: 1336->1337", + "0xaa00000000000000000000000000000000000000.nonce: 0->1337", "0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728)", "0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000000 ->0x0000000000000000000000000000000000000000000000000000000000000011", "0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000011 ->0x0000000000000000000000000000000000000000000000000000000000000022", @@ -113,7 +113,7 @@ func TestHooks(t *testing.T) { }) sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified) sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer) - sdb.SetNonce(common.Address{0xaa}, 1337) + sdb.SetNonce(common.Address{0xaa}, 1337, tracing.NonceChangeGenesis) sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11")) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22")) diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 67eb9cbdc6..e740c64faa 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -60,7 +60,7 @@ func TestUpdateLeaks(t *testing.T) { for i := byte(0); i < 255; i++ { addr := common.BytesToAddress([]byte{i}) state.AddBalance(addr, uint256.NewInt(uint64(11*i)), tracing.BalanceChangeUnspecified) - state.SetNonce(addr, uint64(42*i)) + state.SetNonce(addr, uint64(42*i), tracing.NonceChangeUnspecified) if i%2 == 0 { state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i})) } @@ -95,7 +95,7 @@ func TestIntermediateLeaks(t *testing.T) { modify := func(state *StateDB, addr common.Address, i, tweak byte) { state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)), tracing.BalanceChangeUnspecified) - state.SetNonce(addr, uint64(42*i+tweak)) + state.SetNonce(addr, uint64(42*i+tweak), tracing.NonceChangeUnspecified) if i%2 == 0 { state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{}) state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak}) @@ -357,7 +357,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { { name: "SetNonce", fn: func(a testAction, s *StateDB) { - s.SetNonce(addr, uint64(a.args[0])) + s.SetNonce(addr, uint64(a.args[0]), tracing.NonceChangeUnspecified) }, args: make([]int64, 1), }, diff --git a/core/state_transition.go b/core/state_transition.go index e9c88eaedf..e7edd76ced 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -487,7 +487,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) { ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, value) } else { // Increment the nonce for the next transaction. - st.state.SetNonce(msg.From, st.state.GetNonce(msg.From)+1) + st.state.SetNonce(msg.From, st.state.GetNonce(msg.From)+1, tracing.NonceChangeEoACall) // Apply EIP-7702 authorizations. if msg.SetCodeAuthorizations != nil { @@ -602,7 +602,7 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization) } // Update nonce and account code. - st.state.SetNonce(authority, auth.Nonce+1) + st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization) if auth.Address == (common.Address{}) { // Delegation to zero address means clear. st.state.SetCode(authority, nil) diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md index 270e0a30bf..a14e123d99 100644 --- a/core/tracing/CHANGELOG.md +++ b/core/tracing/CHANGELOG.md @@ -4,6 +4,53 @@ All notable changes to the tracing interface will be documented in this file. ## [Unreleased] +The tracing interface has been extended with backwards-compatible changes to support more use-cases and simplify tracer code. The most notable change is a state journaling library which emits reverse events when a call is reverted. + +### Deprecated methods + +- `OnSystemCallStart()`: This hook is deprecated in favor of `OnSystemCallStartV2(vm *VMContext)`. +- `OnNonceChange(addr common.Address, prev, new uint64)`: This hook is deprecated in favor of `OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason)`. + +### New methods + +- `OnBlockHashRead(blockNum uint64, hash common.Hash)`: This hook is called when a block hash is read by EVM. +- `OnSystemCallStartV2(vm *VMContext)`. This allows access to EVM context during system calls. It is a successor to `OnSystemCallStart`. +- `OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason)`: This hook is called when a nonce change occurs. It is a successor to `OnNonceChange`. + +### New types + +- `NonceChangeReason` is a new type used to provide a reason for nonce changes. Notably it includes `NonceChangeRevert` which will be emitted by the state journaling library when a nonce change is due to a revert. + +### Modified types + +- `VMContext.StateDB` has been extended with `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash an account. +- `BalanceChangeReason` has been extended with the `BalanceChangeRevert` reason. More on that below. + +### State journaling + +Tracers receive state changes events from the node. The tracer was so far expected to keep track of modified accounts and slots and revert those changes when a call frame failed. Now a utility tracer wrapper is provided which will emit "reverse change" events when a call frame fails. To use this feature the hooks have to be wrapped prior to registering the tracer. The following example demonstrates how to use the state journaling library: + +```go +func init() { + tracers.LiveDirectory.Register("test", func (cfg json.RawMessage) (*tracing.Hooks, error) { + hooks, err := newTestTracer(cfg) + if err != nil { + return nil, err + } + return tracing.WrapWithJournal(hooks) + }) +} +``` + +The state changes that are covered by the journaling library are: + +- `OnBalanceChange`. Note that `OnBalanceChange` will carry the `BalanceChangeRevert` reason. +- `OnNonceChange`, `OnNonceChangeV2` +- `OnCodeChange` +- `OnStorageChange` + +## [v1.14.9](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9) + ### Modified types - `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork. diff --git a/core/tracing/gen_balance_change_reason_stringer.go b/core/tracing/gen_balance_change_reason_stringer.go index d3a515a12d..4f094efb4f 100644 --- a/core/tracing/gen_balance_change_reason_stringer.go +++ b/core/tracing/gen_balance_change_reason_stringer.go @@ -23,11 +23,12 @@ func _() { _ = x[BalanceIncreaseSelfdestruct-12] _ = x[BalanceDecreaseSelfdestruct-13] _ = x[BalanceDecreaseSelfdestructBurn-14] + _ = x[BalanceChangeRevert-15] } -const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn" +const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurnBalanceChangeRevert" -var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400} +var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400, 419} func (i BalanceChangeReason) String() string { if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) { diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 167bcb5c16..4002b57207 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -14,6 +14,14 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// Package tracing defines hooks for 'live tracing' of block processing and transaction +// execution. Here we define the low-level [Hooks] object that carries hooks which are +// invoked by the go-ethereum core at various points in the state transition. +// +// To create a tracer that can be invoked with Geth, you need to register it using +// [github.com/ethereum/go-ethereum/eth/tracers.LiveDirectory.Register]. +// +// See https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing for a tutorial. package tracing import ( @@ -163,6 +171,9 @@ type ( // NonceChangeHook is called when the nonce of an account changes. NonceChangeHook = func(addr common.Address, prev, new uint64) + // NonceChangeHookV2 is called when the nonce of an account changes. + NonceChangeHookV2 = func(addr common.Address, prev, new uint64, reason NonceChangeReason) + // CodeChangeHook is called when the code of an account changes. CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) @@ -171,6 +182,9 @@ type ( // LogHook is called when a log is emitted. LogHook = func(log *types.Log) + + // BlockHashReadHook is called when EVM reads the blockhash of a block. + BlockHashReadHook = func(blockNumber uint64, hash common.Hash) ) type Hooks struct { @@ -195,9 +209,12 @@ type Hooks struct { // State events OnBalanceChange BalanceChangeHook OnNonceChange NonceChangeHook + OnNonceChangeV2 NonceChangeHookV2 OnCodeChange CodeChangeHook OnStorageChange StorageChangeHook OnLog LogHook + // Block hash read + OnBlockHashRead BlockHashReadHook } // BalanceChangeReason is used to indicate the reason for a balance change, useful @@ -249,6 +266,10 @@ const ( // account within the same tx (captured at end of tx). // Note it doesn't account for a self-destruct which appoints itself as recipient. BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14 + + // BalanceChangeRevert is emitted when the balance is reverted back to a previous value due to call failure. + // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). + BalanceChangeRevert BalanceChangeReason = 15 ) // GasChangeReason is used to indicate the reason for a gas change, useful @@ -321,3 +342,29 @@ const ( // it will be "manually" tracked by a direct emit of the gas change event. GasChangeIgnored GasChangeReason = 0xFF ) + +// NonceChangeReason is used to indicate the reason for a nonce change. +type NonceChangeReason byte + +const ( + NonceChangeUnspecified NonceChangeReason = 0 + + // NonceChangeGenesis is the nonce allocated to accounts at genesis. + NonceChangeGenesis NonceChangeReason = 1 + + // NonceChangeEoACall is the nonce change due to an EoA call. + NonceChangeEoACall NonceChangeReason = 2 + + // NonceChangeContractCreator is the nonce change of an account creating a contract. + NonceChangeContractCreator NonceChangeReason = 3 + + // NonceChangeNewContract is the nonce change of a newly created contract. + NonceChangeNewContract NonceChangeReason = 4 + + // NonceChangeTransaction is the nonce change due to a EIP-7702 authorization. + NonceChangeAuthorization NonceChangeReason = 5 + + // NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure. + // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). + NonceChangeRevert NonceChangeReason = 6 +) diff --git a/core/tracing/journal.go b/core/tracing/journal.go new file mode 100644 index 0000000000..8937d4c5ae --- /dev/null +++ b/core/tracing/journal.go @@ -0,0 +1,237 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracing + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// journal is a state change journal to be wrapped around a tracer. +// It will emit the state change hooks with reverse values when a call reverts. +type journal struct { + hooks *Hooks + entries []entry + revisions []int +} + +type entry interface { + revert(tracer *Hooks) +} + +// WrapWithJournal wraps the given tracer with a journaling layer. +func WrapWithJournal(hooks *Hooks) (*Hooks, error) { + if hooks == nil { + return nil, fmt.Errorf("wrapping nil tracer") + } + // No state change to journal, return the wrapped hooks as is + if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil { + return hooks, nil + } + if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil { + return nil, fmt.Errorf("cannot have both OnNonceChange and OnNonceChangeV2") + } + + // Create a new Hooks instance and copy all hooks + wrapped := *hooks + + // Create journal + j := &journal{hooks: hooks} + // Scope hooks need to be re-implemented. + wrapped.OnTxEnd = j.OnTxEnd + wrapped.OnEnter = j.OnEnter + wrapped.OnExit = j.OnExit + // Wrap state change hooks. + if hooks.OnBalanceChange != nil { + wrapped.OnBalanceChange = j.OnBalanceChange + } + if hooks.OnNonceChange != nil || hooks.OnNonceChangeV2 != nil { + // Regardless of which hook version is used in the tracer, + // the journal will want to capture the nonce change reason. + wrapped.OnNonceChangeV2 = j.OnNonceChangeV2 + // A precaution to ensure EVM doesn't call both hooks. + wrapped.OnNonceChange = nil + } + if hooks.OnCodeChange != nil { + wrapped.OnCodeChange = j.OnCodeChange + } + if hooks.OnStorageChange != nil { + wrapped.OnStorageChange = j.OnStorageChange + } + + return &wrapped, nil +} + +// reset clears the journal, after this operation the journal can be used anew. +// It is semantically similar to calling 'NewJournal', but the underlying slices +// can be reused. +func (j *journal) reset() { + j.entries = j.entries[:0] + j.revisions = j.revisions[:0] +} + +// snapshot records a revision and stores it to the revision stack. +func (j *journal) snapshot() { + rev := len(j.entries) + j.revisions = append(j.revisions, rev) +} + +// revert reverts all state changes up to the last tracked revision. +func (j *journal) revert(hooks *Hooks) { + // Replay the journal entries above the last revision to undo changes, + // then remove the reverted changes from the journal. + rev := j.revisions[len(j.revisions)-1] + for i := len(j.entries) - 1; i >= rev; i-- { + j.entries[i].revert(hooks) + } + j.entries = j.entries[:rev] + j.popRevision() +} + +// popRevision removes an item from the revision stack. This basically forgets about +// the last call to snapshot() and moves to the one prior. +func (j *journal) popRevision() { + j.revisions = j.revisions[:len(j.revisions)-1] +} + +// OnTxEnd resets the journal since each transaction has its own EVM call stack. +func (j *journal) OnTxEnd(receipt *types.Receipt, err error) { + j.reset() + if j.hooks.OnTxEnd != nil { + j.hooks.OnTxEnd(receipt, err) + } +} + +// OnEnter is invoked for each EVM call frame and records a journal revision. +func (j *journal) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + j.snapshot() + if j.hooks.OnEnter != nil { + j.hooks.OnEnter(depth, typ, from, to, input, gas, value) + } +} + +// OnExit is invoked when an EVM call frame ends. +// If the call has reverted, all state changes made by that frame are undone. +// If the call did not revert, we forget about changes in that revision. +func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if reverted { + j.revert(j.hooks) + } else { + j.popRevision() + } + if j.hooks.OnExit != nil { + j.hooks.OnExit(depth, output, gasUsed, err, reverted) + } +} + +func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) { + j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new}) + if j.hooks.OnBalanceChange != nil { + j.hooks.OnBalanceChange(addr, prev, new, reason) + } +} + +func (j *journal) OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason) { + // When a contract is created, the nonce of the creator is incremented. + // This change is not reverted when the creation fails. + if reason != NonceChangeContractCreator { + j.entries = append(j.entries, nonceChange{addr: addr, prev: prev, new: new}) + } + if j.hooks.OnNonceChangeV2 != nil { + j.hooks.OnNonceChangeV2(addr, prev, new, reason) + } else if j.hooks.OnNonceChange != nil { + j.hooks.OnNonceChange(addr, prev, new) + } +} + +func (j *journal) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) { + j.entries = append(j.entries, codeChange{ + addr: addr, + prevCodeHash: prevCodeHash, + prevCode: prevCode, + newCodeHash: codeHash, + newCode: code, + }) + if j.hooks.OnCodeChange != nil { + j.hooks.OnCodeChange(addr, prevCodeHash, prevCode, codeHash, code) + } +} + +func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) { + j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new}) + if j.hooks.OnStorageChange != nil { + j.hooks.OnStorageChange(addr, slot, prev, new) + } +} + +type ( + balanceChange struct { + addr common.Address + prev *big.Int + new *big.Int + } + + nonceChange struct { + addr common.Address + prev uint64 + new uint64 + } + + codeChange struct { + addr common.Address + prevCodeHash common.Hash + prevCode []byte + newCodeHash common.Hash + newCode []byte + } + + storageChange struct { + addr common.Address + slot common.Hash + prev common.Hash + new common.Hash + } +) + +func (b balanceChange) revert(hooks *Hooks) { + if hooks.OnBalanceChange != nil { + hooks.OnBalanceChange(b.addr, b.new, b.prev, BalanceChangeRevert) + } +} + +func (n nonceChange) revert(hooks *Hooks) { + if hooks.OnNonceChangeV2 != nil { + hooks.OnNonceChangeV2(n.addr, n.new, n.prev, NonceChangeRevert) + } else if hooks.OnNonceChange != nil { + hooks.OnNonceChange(n.addr, n.new, n.prev) + } +} + +func (c codeChange) revert(hooks *Hooks) { + if hooks.OnCodeChange != nil { + hooks.OnCodeChange(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode) + } +} + +func (s storageChange) revert(hooks *Hooks) { + if hooks.OnStorageChange != nil { + hooks.OnStorageChange(s.addr, s.slot, s.new, s.prev) + } +} diff --git a/core/tracing/journal_test.go b/core/tracing/journal_test.go new file mode 100644 index 0000000000..d9616a2ce8 --- /dev/null +++ b/core/tracing/journal_test.go @@ -0,0 +1,335 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracing + +import ( + "errors" + "math/big" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +type testTracer struct { + t *testing.T + bal *big.Int + nonce uint64 + code []byte + storage map[common.Hash]common.Hash +} + +func (t *testTracer) OnBalanceChange(addr common.Address, prev *big.Int, new *big.Int, reason BalanceChangeReason) { + t.t.Logf("OnBalanceChange(%v, %v -> %v, %v)", addr, prev, new, reason) + if t.bal != nil && t.bal.Cmp(prev) != 0 { + t.t.Errorf(" !! wrong prev balance (expected %v)", t.bal) + } + t.bal = new +} + +func (t *testTracer) OnNonceChange(addr common.Address, prev uint64, new uint64) { + t.t.Logf("OnNonceChange(%v, %v -> %v)", addr, prev, new) + t.nonce = new +} + +func (t *testTracer) OnNonceChangeV2(addr common.Address, prev uint64, new uint64, reason NonceChangeReason) { + t.t.Logf("OnNonceChangeV2(%v, %v -> %v, %v)", addr, prev, new, reason) + t.nonce = new +} + +func (t *testTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) { + t.t.Logf("OnCodeChange(%v, %v -> %v)", addr, prevCodeHash, codeHash) + t.code = code +} + +func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) { + t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new) + if t.storage == nil { + t.storage = make(map[common.Hash]common.Hash) + } + if new == (common.Hash{}) { + delete(t.storage, slot) + } else { + t.storage[slot] = new + } +} + +func TestJournalIntegration(t *testing.T) { + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange, OnCodeChange: tr.OnCodeChange, OnStorageChange: tr.OnStorageChange}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + { + wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, nil, big.NewInt(100), BalanceChangeUnspecified) + wr.OnCodeChange(addr, common.Hash{}, nil, common.Hash{}, []byte{1, 2, 3}) + wr.OnStorageChange(addr, common.Hash{1}, common.Hash{}, common.Hash{2}) + { + wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnNonceChangeV2(addr, 0, 1, NonceChangeUnspecified) + wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified) + wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(250), BalanceChangeUnspecified) + wr.OnStorageChange(addr, common.Hash{1}, common.Hash{2}, common.Hash{3}) + wr.OnStorageChange(addr, common.Hash{2}, common.Hash{}, common.Hash{4}) + wr.OnExit(1, nil, 100, errors.New("revert"), true) + } + wr.OnExit(0, nil, 150, nil, false) + } + + if tr.bal.Cmp(big.NewInt(100)) != 0 { + t.Fatalf("unexpected balance: %v", tr.bal) + } + if tr.nonce != 0 { + t.Fatalf("unexpected nonce: %v", tr.nonce) + } + if len(tr.code) != 3 { + t.Fatalf("unexpected code: %v", tr.code) + } + if len(tr.storage) != 1 { + t.Fatalf("unexpected storage len. want %d, have %d", 1, len(tr.storage)) + } + if tr.storage[common.Hash{1}] != (common.Hash{2}) { + t.Fatalf("unexpected storage. want %v, have %v", common.Hash{2}, tr.storage[common.Hash{1}]) + } +} + +func TestJournalTopRevert(t *testing.T) { + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + { + wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(0), big.NewInt(100), BalanceChangeUnspecified) + { + wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnNonceChangeV2(addr, 0, 1, NonceChangeUnspecified) + wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified) + wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(250), BalanceChangeUnspecified) + wr.OnExit(1, nil, 100, errors.New("revert"), true) + } + wr.OnExit(0, nil, 150, errors.New("revert"), true) + } + + if tr.bal.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("unexpected balance: %v", tr.bal) + } + if tr.nonce != 0 { + t.Fatalf("unexpected nonce: %v", tr.nonce) + } +} + +// This test checks that changes in nested calls are reverted properly. +func TestJournalNestedCalls(t *testing.T) { + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + { + wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(0), big.NewInt(100), BalanceChangeUnspecified) + { + wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified) + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnExit(2, nil, 100, nil, false) + } + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(300), BalanceChangeUnspecified) + wr.OnExit(2, nil, 100, nil, false) + } + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnExit(2, nil, 100, nil, false) + } + wr.OnBalanceChange(addr, big.NewInt(300), big.NewInt(400), BalanceChangeUnspecified) + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(400), big.NewInt(500), BalanceChangeUnspecified) + wr.OnExit(2, nil, 100, errors.New("revert"), true) + } + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnExit(2, nil, 100, errors.New("revert"), true) + } + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnBalanceChange(addr, big.NewInt(400), big.NewInt(600), BalanceChangeUnspecified) + wr.OnExit(2, nil, 100, nil, false) + } + wr.OnExit(1, nil, 100, errors.New("revert"), true) + } + wr.OnExit(0, nil, 150, nil, false) + } + + if tr.bal.Uint64() != 100 { + t.Fatalf("unexpected balance: %v", tr.bal) + } +} + +func TestNonceIncOnCreate(t *testing.T) { + const opCREATE = 0xf0 + + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnNonceChange: tr.OnNonceChange}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + { + wr.OnEnter(0, opCREATE, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnNonceChangeV2(addr, 0, 1, NonceChangeContractCreator) + wr.OnExit(0, nil, 100, errors.New("revert"), true) + } + + if tr.nonce != 1 { + t.Fatalf("unexpected nonce: %v", tr.nonce) + } +} + +func TestOnNonceChangeV2(t *testing.T) { + tr := &testTracer{t: t} + wr, err := WrapWithJournal(&Hooks{OnNonceChangeV2: tr.OnNonceChangeV2}) + if err != nil { + t.Fatalf("failed to wrap test tracer: %v", err) + } + + addr := common.HexToAddress("0x1234") + { + wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0)) + wr.OnNonceChangeV2(addr, 0, 1, NonceChangeEoACall) + wr.OnExit(2, nil, 100, nil, true) + } + + if tr.nonce != 0 { + t.Fatalf("unexpected nonce: %v", tr.nonce) + } +} + +func TestAllHooksCalled(t *testing.T) { + tracer := newTracerAllHooks() + hooks := tracer.hooks() + + wrapped, err := WrapWithJournal(hooks) + if err != nil { + t.Fatalf("failed to wrap hooks with journal: %v", err) + } + + // Get the underlying value of the wrapped hooks + wrappedValue := reflect.ValueOf(wrapped).Elem() + wrappedType := wrappedValue.Type() + + // Iterate over all fields of the wrapped hooks + for i := 0; i < wrappedType.NumField(); i++ { + field := wrappedType.Field(i) + + // Skip fields that are not function types + if field.Type.Kind() != reflect.Func { + continue + } + // Skip non-hooks, i.e. Copy + if field.Name == "copy" { + continue + } + // Skip if field is not set + if wrappedValue.Field(i).IsNil() { + continue + } + + // Get the method + method := wrappedValue.Field(i) + + // Call the method with zero values + params := make([]reflect.Value, method.Type().NumIn()) + for j := 0; j < method.Type().NumIn(); j++ { + params[j] = reflect.Zero(method.Type().In(j)) + } + method.Call(params) + } + + // Check if all hooks were called + if tracer.numCalled() != tracer.hooksCount() { + t.Errorf("Not all hooks were called. Expected %d, got %d", tracer.hooksCount(), tracer.numCalled()) + } + + for hookName, called := range tracer.hooksCalled { + if !called { + t.Errorf("Hook %s was not called", hookName) + } + } +} + +type tracerAllHooks struct { + hooksCalled map[string]bool +} + +func newTracerAllHooks() *tracerAllHooks { + t := &tracerAllHooks{hooksCalled: make(map[string]bool)} + // Initialize all hooks to false. We will use this to + // get total count of hooks. + hooksType := reflect.TypeOf((*Hooks)(nil)).Elem() + for i := 0; i < hooksType.NumField(); i++ { + t.hooksCalled[hooksType.Field(i).Name] = false + } + delete(t.hooksCalled, "OnNonceChange") + return t +} + +func (t *tracerAllHooks) hooksCount() int { + return len(t.hooksCalled) +} + +func (t *tracerAllHooks) numCalled() int { + count := 0 + for _, called := range t.hooksCalled { + if called { + count++ + } + } + return count +} + +func (t *tracerAllHooks) hooks() *Hooks { + h := &Hooks{} + // Create a function for each hook that sets the + // corresponding hooksCalled field to true. + hooksValue := reflect.ValueOf(h).Elem() + for i := 0; i < hooksValue.NumField(); i++ { + field := hooksValue.Type().Field(i) + if field.Name == "OnNonceChange" { + continue + } + hookMethod := reflect.MakeFunc(field.Type, func(args []reflect.Value) []reflect.Value { + t.hooksCalled[field.Name] = true + return nil + }) + hooksValue.Field(i).Set(hookMethod) + } + return h +} diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 88d68eb307..f7c9e4844b 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -680,9 +680,9 @@ func TestOpenDrops(t *testing.T) { statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) - statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3) + statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3, tracing.NonceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) - statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2) + statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2, tracing.NonceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) @@ -1526,7 +1526,7 @@ func TestAdd(t *testing.T) { // Seed the state database with this account statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance), tracing.BalanceChangeUnspecified) - statedb.SetNonce(addrs[acc], seed.nonce) + statedb.SetNonce(addrs[acc], seed.nonce, tracing.NonceChangeUnspecified) // Sign the seed transactions and store them in the data store for _, tx := range seed.txs { @@ -1581,7 +1581,7 @@ func TestAdd(t *testing.T) { // Apply the nonce updates to the state db for _, tx := range txs { sender, _ := types.Sender(types.LatestSigner(params.MainnetChainConfig), tx) - chain.statedb.SetNonce(sender, tx.Nonce()+1) + chain.statedb.SetNonce(sender, tx.Nonce()+1, tracing.NonceChangeUnspecified) } pool.Reset(chain.CurrentBlock(), header) verifyPoolInternals(t, pool) diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 55699e93ee..fdbcad3d4f 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -251,7 +251,7 @@ func (c *testChain) State() (*state.StateDB, error) { if *c.trigger { c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) // simulate that the new head block included tx0 and tx1 - c.statedb.SetNonce(c.address, 2) + c.statedb.SetNonce(c.address, 2, tracing.NonceChangeUnspecified) c.statedb.SetBalance(c.address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified) *c.trigger = false } @@ -312,7 +312,7 @@ func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) { func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { pool.mu.Lock() - pool.currentState.SetNonce(addr, nonce) + pool.currentState.SetNonce(addr, nonce, tracing.NonceChangeUnspecified) pool.mu.Unlock() } @@ -1011,7 +1011,7 @@ func TestQueueTimeLimiting(t *testing.T) { } // remove current transactions and increase nonce to prepare for a reset and cleanup - statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) + statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2, tracing.NonceChangeUnspecified) <-pool.requestReset(nil, nil) // make sure queue, pending are cleared diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go index 4e873eedff..de2280ced1 100644 --- a/core/verkle_witness_test.go +++ b/core/verkle_witness_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -226,7 +227,7 @@ func TestProcessParentBlockHash(t *testing.T) { // block 2 parent hash is 0x0200.... // etc checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { - statedb.SetNonce(params.HistoryStorageAddress, 1) + statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified) statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode) // Process n blocks, from 1 .. num var num = 2 diff --git a/core/vm/evm.go b/core/vm/evm.go index 3f9ae621a3..442441e9ae 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -439,7 +439,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if nonce+1 < nonce { return nil, common.Address{}, gas, ErrNonceUintOverflow } - evm.StateDB.SetNonce(caller.Address(), nonce+1) + evm.StateDB.SetNonce(caller.Address(), nonce+1, tracing.NonceChangeContractCreator) // Charge the contract creation init gas in verkle mode if evm.chainRules.IsEIP4762 { @@ -487,7 +487,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, evm.StateDB.CreateContract(address) if evm.chainRules.IsEIP158 { - evm.StateDB.SetNonce(address, 1) + evm.StateDB.SetNonce(address, 1, tracing.NonceChangeNewContract) } // Charge the contract creation init gas in verkle mode if evm.chainRules.IsEIP4762 { diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 9b9a31a855..c9eea33507 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -448,6 +448,9 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( if witness := interpreter.evm.StateDB.Witness(); witness != nil { witness.AddBlockHash(num64) } + if tracer := interpreter.evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil { + tracer.OnBlockHashRead(num64, res) + } num.SetBytes(res[:]) } else { num.Clear() diff --git a/core/vm/interface.go b/core/vm/interface.go index 3488526fc4..0d7862a66e 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -39,7 +39,7 @@ type StateDB interface { GetBalance(common.Address) *uint256.Int GetNonce(common.Address) uint64 - SetNonce(common.Address, uint64) + SetNonce(common.Address, uint64, tracing.NonceChangeReason) GetCodeHash(common.Address) common.Hash GetCode(common.Address) []byte diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 44f5bc8273..bde230b6da 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -414,7 +414,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode eoa := common.HexToAddress("E0") { cfg.State.CreateAccount(eoa) - cfg.State.SetNonce(eoa, 100) + cfg.State.SetNonce(eoa, 100, tracing.NonceChangeUnspecified) } reverting := common.HexToAddress("EE") { diff --git a/eth/tracers/live/noop.go b/eth/tracers/live/noop.go index 46c5700d25..f3def85606 100644 --- a/eth/tracers/live/noop.go +++ b/eth/tracers/live/noop.go @@ -57,6 +57,7 @@ func newNoopTracer(_ json.RawMessage) (*tracing.Hooks, error) { OnCodeChange: t.OnCodeChange, OnStorageChange: t.OnStorageChange, OnLog: t.OnLog, + OnBlockHashRead: t.OnBlockHashRead, }, nil } @@ -108,5 +109,7 @@ func (t *noop) OnLog(l *types.Log) { } +func (t *noop) OnBlockHashRead(number uint64, hash common.Hash) {} + func (t *noop) OnGasChange(old, new uint64, reason tracing.GasChangeReason) { } diff --git a/internal/ethapi/override/override.go b/internal/ethapi/override/override.go index 70b6210275..f6a8a94ffd 100644 --- a/internal/ethapi/override/override.go +++ b/internal/ethapi/override/override.go @@ -86,7 +86,7 @@ func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.Precompi } // Override account nonce. if account.Nonce != nil { - statedb.SetNonce(addr, uint64(*account.Nonce)) + statedb.SetNonce(addr, uint64(*account.Nonce), tracing.NonceChangeUnspecified) } // Override account(contract) code. if account.Code != nil { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 24caf41ed9..a22e470ad8 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -512,7 +512,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo statedb, _ := state.New(types.EmptyRootHash, sdb) for addr, a := range accounts { statedb.SetCode(addr, a.Code) - statedb.SetNonce(addr, a.Nonce) + statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeUnspecified) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceChangeUnspecified) for k, v := range a.Storage { statedb.SetState(addr, k, v) From 5d97db8d03a3c55a6d1735543a2d41027333f37b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 5 Feb 2025 23:01:17 +0100 Subject: [PATCH 11/17] all: update license comments and AUTHORS (#31133) --- .mailmap | 89 +++++- AUTHORS | 300 +++++++++++++++++- beacon/light/sync/head_sync.go | 2 +- beacon/params/networks.go | 2 +- cmd/abigen/namefilter.go | 16 + cmd/abigen/namefilter_test.go | 16 + cmd/blsync/main.go | 16 +- cmd/evm/eofparse.go | 2 +- cmd/evm/eofparse_test.go | 16 + cmd/geth/attach_test.go | 14 +- cmd/geth/logging_test.go | 4 +- cmd/geth/logtestcmd_active.go | 4 +- cmd/geth/logtestcmd_inactive.go | 4 +- core/rawdb/accessors_trie.go | 4 +- core/state/statedb_fuzz_test.go | 2 +- core/txindexer.go | 2 +- core/txindexer_test.go | 2 +- core/txpool/blobpool/lookup.go | 2 +- core/txpool/legacypool/legacypool2_test.go | 1 + core/txpool/txpool.go | 2 +- core/types/tx_blob_test.go | 16 + core/vm/eof_control_flow.go | 14 +- core/vm/eof_test.go | 2 +- core/vm/jump_table_test.go | 2 +- core/vm/memory_test.go | 16 + core/vm/program/program.go | 6 +- core/vm/program/program_test.go | 6 +- eth/tracers/dir.go | 2 +- .../internal/tracetest/flat_calltrace_test.go | 16 + eth/tracers/internal/tracetest/makeTest.js | 16 + .../internal/tracetest/prestate_test.go | 2 +- eth/tracers/internal/tracetest/supply_test.go | 2 +- eth/tracers/internal/tracetest/util.go | 16 + eth/tracers/internal/util.go | 2 +- eth/tracers/internal/util_test.go | 3 +- eth/tracers/native/call_flat.go | 2 +- eth/tracers/tracker_test.go | 2 +- ethclient/simulated/backend_test.go | 2 +- ethclient/simulated/rollback_test.go | 16 + ethdb/pebble/pebble_test.go | 2 +- graphql/internal/graphiql/build.go | 16 + internal/era/accumulator.go | 16 +- internal/era/builder.go | 16 +- internal/era/e2store/e2store.go | 16 +- internal/era/e2store/e2store_test.go | 16 +- internal/era/era.go | 16 +- internal/era/era_test.go | 16 +- internal/era/iterator.go | 16 +- miner/payload_building.go | 2 +- miner/payload_building_test.go | 2 +- miner/pending.go | 2 +- p2p/config.go | 1 + p2p/pipes/pipe.go | 2 +- rpc/client_opt_test.go | 16 + tests/fuzzers/difficulty/difficulty_test.go | 2 +- tests/fuzzers/rangeproof/rangeproof_test.go | 2 +- tests/fuzzers/txfetcher/txfetcher_test.go | 2 +- trie/trie_id.go | 2 +- trie/trienode/node.go | 2 +- trie/utils/verkle.go | 2 +- trie/utils/verkle_test.go | 4 +- trie/verkle.go | 2 +- trie/verkle_test.go | 2 +- triedb/pathdb/errors.go | 2 +- triedb/pathdb/execute.go | 2 +- triedb/pathdb/history.go | 4 +- triedb/pathdb/history_inspect.go | 2 +- triedb/pathdb/history_test.go | 4 +- triedb/pathdb/layertree.go | 4 +- triedb/pathdb/metrics.go | 4 +- triedb/pathdb/nodes.go | 2 +- triedb/pathdb/reader.go | 2 +- triedb/pathdb/states.go | 2 +- triedb/pathdb/states_test.go | 2 +- triedb/states.go | 4 +- version/version.go | 2 +- 76 files changed, 697 insertions(+), 157 deletions(-) diff --git a/.mailmap b/.mailmap index 312e51d854..92a9e07748 100644 --- a/.mailmap +++ b/.mailmap @@ -5,6 +5,9 @@ Aaron Kumavis Abel Nieto Abel Nieto +Adrian Sutton +Adrian Sutton + Afri Schoedon <58883403+q9f@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com> @@ -22,6 +25,9 @@ Alexey Akhunov Alon Muroch +Andrei Silviu Dragnea +Andrei Silviu Dragnea + Andrey Petrov Andrey Petrov @@ -51,11 +57,17 @@ Chris Ziogas Christoph Jentzsch +Daniel Liu +Daniel Liu <139250065@qq.com> + Diederik Loerakker Dimitry Khokhlov +Ha ĐANG + Domino Valdano +Domino Valdano Edgar Aroutiounian @@ -82,6 +94,9 @@ Gavin Wood Gregg Dourgarian +guangwu +guangwu + Guillaume Ballet Guillaume Ballet <3272758+gballet@users.noreply.github.com> @@ -95,13 +110,21 @@ Heiko Hees Henning Diedrich Henning Diedrich Drake Burroughs +henridf +henridf + Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> +Ikko Eltociear Ashimine + Iskander (Alex) Sharipov Iskander (Alex) Sharipov Jae Kwon +James Prestwich +James Prestwich <10149425+prestwich@users.noreply.github.com> + Janoš Guljaš Janoš Guljaš Janos Guljas @@ -120,23 +143,38 @@ Jeffrey Wilcke Jens Agerberg +Jeremy Schlatter +Jeremy Schlatter + +John Chase <68833933+joohhnnn@users.noreply.github.com> + Joseph Chow Joseph Chow ethers - Joseph Goulden Justin Drake +Karl Bartel +Karl Bartel + Kenso Trabing Kenso Trabing +Liyi Guo <102356659+colinlyguo@users.noreply.github.com> + +lmittmann <3458786+lmittmann@users.noreply.github.com> +lmittmann <3458786+lmittmann@users.noreply.github.com> + Liang Ma Liang Ma Louis Holbrook Louis Holbrook +makcandrov +makcandrov <108467407+makcandrov@users.noreply.github.com> + Maran Hidskes Marian Oancea @@ -144,17 +182,33 @@ Marian Oancea Martin Becze Martin Becze +Martin Holst Swende + Martin Lundfall -Matt Garnett <14004106+lightclient@users.noreply.github.com> +Marius van der Wijden +Marius van der Wijden <115323661+vdwijden@users.noreply.github.com> + +Matt Garnett +Matt Garnett <14004106+lightclient@users.noreply.github.com> Matthew Halpern Matthew Halpern +meowsbits +meowsbits <45600330+meowsbits@users.noreply.github.com> + Michael Riabzev +Michael de Hoog +Michael de Hoog + Nchinda Nchinda +Nebojsa Urosevic + +nedifi <103940716+nedifi@users.noreply.github.com> + Nick Dodson Nick Johnson @@ -169,6 +223,9 @@ Olivier Hervieu Pascal Dierich Pascal Dierich +Paweł Bylica +Paweł Bylica + RJ Catalano RJ Catalano @@ -179,8 +236,22 @@ Rene Lubov <41963722+renaynay@users.noreply.github.com> Robert Zaremba Robert Zaremba +Roberto Bayardo +Roberto Bayardo + Roman Mandeleil +Sebastian Stammler +Sebastian Stammler + +Seungbae Yu +Seungbae Yu <72970043+dbadoy@users.noreply.github.com> + +Sina Mahmoodi <1591639+s1na@users.noreply.github.com> + +Steve Milk +Steve Milk <915337710@qq.com> + Sorin Neacsu Sorin Neacsu @@ -191,8 +262,14 @@ Taylor Gerring Thomas Bocek +tianyeyouyou +tianyeyouyou <150894831+tianyeyouyou@users.noreply.github.com> + Tim Cooijmans +ucwong +ucwong + Valentin Wüstholz Valentin Wüstholz @@ -221,6 +298,9 @@ Xudong Liu <33193253+r1cs@users.noreply.github.com> Yohann Léon +yzb <335357057@qq.com> +yzb <335357057@qq.com> + Zachinquarantine Zachinquarantine @@ -228,9 +308,4 @@ Ziyuan Zhong Zsolt Felföldi -meowsbits -meowsbits <45600330+meowsbits@users.noreply.github.com> - -nedifi <103940716+nedifi@users.noreply.github.com> - Максим Чусовлянов diff --git a/AUTHORS b/AUTHORS index 151c85016e..1ec240aeb6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,52 +1,81 @@ # This is the official list of go-ethereum authors for copyright purposes. +0xbeny <55846654+0xbeny@users.noreply.github.com> +0xbstn +0xe3b0c4 <110295932+0xe3b0c4@users.noreply.github.com> 6543 <6543@obermui.de> +6xiaowu9 <736518585@qq.com> a e r t h Aaron Buchwald +Aaron Chen +Aaron Kumavis +Aayush Rajasekaran Abel Nieto +Abirdcfly Adam Babik Adam Schmideg Aditya Aditya Arora +Adrian Sutton Adrià Cidre Afanasii Kurakin Afri Schoedon <5chdn@users.noreply.github.com> Agustin Armellini Fischer +Ahmet Avci Ahyun Airead Alan Chen Alejandro Isaza Aleksey Smyrnov Ales Katona +alex <152680487+bodhi-crypo@users.noreply.github.com> Alex Beregszaszi +Alex Gartner Alex Leverington Alex Mazalov +Alex Mylonas Alex Pozhilenkov Alex Prut <1648497+alexprut@users.noreply.github.com> +Alex Stokes Alex Wu +Alexander Mint Alexander van der Meij Alexander Yastrebov Alexandre Van de Sande Alexey Akhunov Alexey Shekhirin alexwang <39109351+dipingxian2@users.noreply.github.com> +Alfie John Ali Atiia <42751398+aliatiia@users.noreply.github.com> Ali Hajimirza +Alvaro Sevilla am2rican5 +Amin Talebi +AMIR <31338382+amiremohamadi@users.noreply.github.com> AmitBRD <60668103+AmitBRD@users.noreply.github.com> Anatole <62328077+a2br@users.noreply.github.com> +Andre Patta Andrea Franz +Andrei Kostakov Andrei Maiboroda +Andrei Silviu Dragnea +Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Andrey Petrov +Andryanau Kanstantsin ANOTHEL Antoine Rondelet Antoine Toulme Anton Evangelatov Antonio Salazar Cardozo +Antony Denyer +Anusha <63559942+anusha-ctrl@users.noreply.github.com> Arba Sasmoyo Armani Ferrante Armin Braun Aron Fischer +Arran Schlosberg <519948+ARR4N@users.noreply.github.com> +ArtificialPB +Artyom Aminov atsushi-ishibashi Austin Roberts ayeowch @@ -54,83 +83,135 @@ b00ris b1ackd0t bailantaotao baizhenxuan +Bala Murali Krishna Komatireddy Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com> Balint Gabor baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com> Bas van Kervel Benjamin Brent +Benjamin Prosnitz benma Benoit Verkindt +Bin <49082129+songzhibin97@users.noreply.github.com> Binacs +bitcoin-lightning <153181187+AtomicInnovation321@users.noreply.github.com> +bk <5810624+bkellerman@users.noreply.github.com> bloonfield +bnovil Bo Bo Ye Bob Glickstein Boqin Qin +BorkBorked <107079055+BorkBorked@users.noreply.github.com> Brandon Harden +Brandon Liu Brent Brian Schroeder +Brion <4777457+cifer76@users.noreply.github.com> Bruno Škvorc +buddho +bugmaker9371 <167614621+bugmaker9371@users.noreply.github.com> C. Brown Caesar Chad +cam-schultz <78878559+cam-schultz@users.noreply.github.com> Casey Detrio +caseylove CDsigma +Cedrick Ceelog Ceyhun Onur chabashilah changhong +Charles Cooper Chase Wright +Chawin Aiemvaravutigul Chen Quan +chen4903 <108803001+chen4903@users.noreply.github.com> Cheng Li chenglin <910372762@qq.com> chenyufeng +Chirag Garg <38765776+DeVil2O@users.noreply.github.com> +chirag-bgh <76247491+chirag-bgh@users.noreply.github.com> Chris Pacia Chris Ziogas Christian Muehlhaeuser +Christina <156356273+cratiu222@users.noreply.github.com> Christoph Jentzsch +Christopher Harrison <31964100+chrischarlesharrison@users.noreply.github.com> chuwt +cocoyeal <150209682+cocoyeal@users.noreply.github.com> cong Connor Stein Corey Lin <514971757@qq.com> courtier cpusoft +crazeteam <164632007+crazeteam@users.noreply.github.com> Crispin Flowerday croath cui <523516579@qq.com> +cui fliter +cuinix <65650185+cuinix@users.noreply.github.com> +Curith +cygaar <97691933+cygaar@users.noreply.github.com> +Dan Cline <6798349+Rjected@users.noreply.github.com> Dan DeGreef Dan Kinsley +Dan Laine Dan Sosedoff +danceratopz Daniel A. Nagy +Daniel Fernandes <711733+daferna@users.noreply.github.com> +Daniel Katzan <108216499+dkatzan@users.noreply.github.com> +Daniel Knopik <107140945+dknopik@users.noreply.github.com> +Daniel Liu Daniel Perez Daniel Sloof +Danno Ferrin +Danyal Prout Darioush Jalali Darrel Herbst +Darren Kelly <107671032+darrenvechain@users.noreply.github.com> +dashangcun <907225865@qq.com> Dave Appleton Dave McGregor David Cai +David Dzhalaev <72649244+DavidRomanovizc@users.noreply.github.com> David Huie +David Murdoch <187813+davidmurdoch@users.noreply.github.com> +David Theodore <29786815+infosecual@users.noreply.github.com> +ddl +Dean Eigenmann <7621705+decanus@users.noreply.github.com> +Delweng Denver Derek Chiang Derek Gottfrid +deterclosed <164524498+deterclosed@users.noreply.github.com> +Devon Bear Di Peng Diederik Loerakker Diego Siqueira Diep Pham +Dimitris Apostolou dipingxian2 <39109351+dipingxian2@users.noreply.github.com> divergencetech <94644849+divergencetech@users.noreply.github.com> +dknopik <107140945+dknopik@users.noreply.github.com> dm4 Dmitrij Koniajev Dmitry Shulyak Dmitry Zenovich Domino Valdano +DongXi Huang <418498589@qq.com> Dragan Milic dragonvslinux <35779158+dragononcrypto@users.noreply.github.com> +Dylan Vassallo +easyfold <137396765+easyfold@users.noreply.github.com> Edgar Aroutiounian Eduard S Egon Elbre Elad Eli Elias Naur +Elias Rad <146735585+nnsW3@users.noreply.github.com> Elliot Shepherd Emil emile @@ -151,11 +232,13 @@ Evgeny Evgeny Danilenko <6655321@bk.ru> evgk Evolution404 <35091674+Evolution404@users.noreply.github.com> +Exca-DK <85954505+Exca-DK@users.noreply.github.com> EXEC Fabian Vogelsteller Fabio Barone Fabio Berger FaceHo +felipe Felipe Strozberg <48066928+FelStroz@users.noreply.github.com> Felix Lange Ferenc Szabo @@ -163,68 +246,102 @@ ferhat elmas Ferran Borreguero Fiisio Fire Man <55934298+basdevelop@users.noreply.github.com> +FletcherMan flowerofdream <775654398@qq.com> fomotrader <82184770+fomotrader@users.noreply.github.com> +Ford <153042616+guerrierindien@users.noreply.github.com> ForLina <471133417@qq.com> Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com> Frank Wang Franklin +Freeman Jiang Furkan KAMACI Fuyang Deng GagziW Gary Rong Gautam Botrel +Gealber Morales <48373523+Gealber@users.noreply.github.com> +George Ma <164313692+availhang@users.noreply.github.com> George Ornbo +georgehao +gitglorythegreat Giuseppe Bertone Greg Colvin Gregg Dourgarian Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> +guangwu +Guido Vranken Guifel Guilherme Salgado Guillaume Ballet +Guillaume Michel Guillaume Nicolas GuiltyMorishita Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com> Gus Gustav Simonsson +Gustavo Silva Gísli Kristjánsson Ha ĐANG HackyMiner -hadv +Halimao <1065621723@qq.com> Hanjiang Yu Hao Bryan Cheng Hao Duan +haoran <159284258+hr98w@users.noreply.github.com> +Haotian <51777534+tmelhao@users.noreply.github.com> HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com> Harry Dutton +Harry Kalodner haryu703 <34744512+haryu703@users.noreply.github.com> +hattizai Hendrik Hofstadt Henning Diedrich henopied <13500516+henopied@users.noreply.github.com> +henridf +Henry <101552941+henry-0@users.noreply.github.com> hero5512 holisticode Hongbin Mao Hsien-Tang Kao hsyodyssey <47173566+hsyodyssey@users.noreply.github.com> +Hteev Oli Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com> Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> hydai +hyhnet +hyunchel <3271191+hyunchel@users.noreply.github.com> Hyung-Kyu Hqueue Choi +Hyunsoo Shin (Lake) +hzysvilla Håvard Anda Estensen Ian Macalinao Ian Norden +Icarus Wu icodezjb -Ikko Ashimine +ids +Ignacio Hagopian +Ikko Eltociear Ashimine Ilan Gitter <8359193+gitteri@users.noreply.github.com> +imalasong <55082705+imalasong@users.noreply.github.com> ImanSharaf <78227895+ImanSharaf@users.noreply.github.com> +imulmat4 <117636097+imulmat4@users.noreply.github.com> +Inphi +int88 <106391185+int88@users.noreply.github.com> Isidoro Ghezzi Iskander (Alex) Sharipov +Ivan Aracki Ivan Bogatyy Ivan Daniluk Ivo Georgiev +j2gg0s jacksoom +jackyin <648588267@qq.com> Jae Kwon -James Prestwich <10149425+prestwich@users.noreply.github.com> +Jakub Freebit <49676311+jakub-freebit@users.noreply.github.com> +James Prestwich Jamie Pitts +Janko Simonovic Janoš Guljaš Jared Wasinger Jason Carver @@ -239,42 +356,63 @@ Jeff Wentworth Jeffery Robert Walsh Jeffrey Wilcke Jens Agerberg +Jens W <8270201+DragonDev1906@users.noreply.github.com> Jeremy McNevin Jeremy Schlatter Jerzy Lasyk Jesse Tane Jia Chenhui Jim McDonald +jin <35813306+lochjin@users.noreply.github.com> jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com> jkcomment +Joe Netti JoeGruffins <34998433+JoeGruffins@users.noreply.github.com> Joel Burget John C. Vernaleo +John Chase <68833933+joohhnnn@users.noreply.github.com> John Difool +John Hilliard +John Xu Johns Beharry +Jolly Zhao Jonas Jonathan Brown Jonathan Chappelow Jonathan Gimeno +Jonathan Otto JoranHonig Jordan Krage +Jorge Jorropo Joseph Chow +Joseph Cook <33655003+jmcook1186@users.noreply.github.com> Joshua Colvin Joshua Gutow jovijovi +jp-imx <109574657+jp-imx@users.noreply.github.com> jtakalai JU HYEONG PARK Julian Y Justin Clark-Casey +Justin Dhillon Justin Drake +Justin Traglia <95511699+jtraglia@users.noreply.github.com> Justus +KAI <35927054+ThreeAndTwo@users.noreply.github.com> +kaliubuntu0206 <139627505+kaliubuntu0206@users.noreply.github.com> +Karl Bartel +Karol Chojnowski Kawashima <91420903+sscodereth@users.noreply.github.com> +kazak ken10100147 Kenji Siu Kenso Trabing +Kero +kevaundray Kevin kevin.xu +Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com> KibGzr kiel barry kilic @@ -282,8 +420,10 @@ kimmylin <30611210+kimmylin@users.noreply.github.com> Kitten King <53072918+kittenking@users.noreply.github.com> knarfeh Kobi Gurkan +Koichi Shiraishi komika Konrad Feldmeier +Kosuke Taniguchi <73885532+TaniguchiKosuke@users.noreply.github.com> Kris Shinn Kristofer Peterson Kumar Anirudha @@ -296,6 +436,8 @@ Lefteris Karapetsas Leif Jurvetson Leo Shklovskii LeoLiao +Leon <316032931@qq.com> +levisyin <150114626+levisyin@users.noreply.github.com> Lewis Marshall lhendre Li Dongwei @@ -305,36 +447,58 @@ libby kent libotony LieutenantRoger ligi +lilasxie +Lindlof Lio李欧 -lmittmann +Liyi Guo <102356659+colinlyguo@users.noreply.github.com> +llkhacquan <3724362+llkhacquan@users.noreply.github.com> +lmittmann <3458786+lmittmann@users.noreply.github.com> +lorenzo <31852651+lorenzo-dev1@users.noreply.github.com> Lorenzo Manacorda Louis Holbrook Luca Zeug +Lucas Lucas Hendren +Luozhu <70309026+LuozhuZhang@users.noreply.github.com> +lwh lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> +Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com> +Madhur Shrimal Magicking +makcandrov manlio +Manoj Kumar Maran Hidskes +Marcin Sobczak <77129288+marcindsobczak@users.noreply.github.com> +Marcus Baldassarre Marek Kotewicz Mariano Cortesi +Mario Vega +Marius G <90795310+bearpebble@users.noreply.github.com> +Marius Kjærstad Marius van der Wijden Mark Mark Rushakoff +Mark Tyneway mark.lin +markus <55011443+mdymalla@users.noreply.github.com> +Marquis Shanahan <29431502+9547@users.noreply.github.com> Martin Alex Philip Dawson Martin Holst Swende Martin Klepsch Martin Lundfall Martin Michlmayr Martin Redmond <21436+reds@users.noreply.github.com> +maskpp Mason Fischer Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Mats Julian Olsen -Matt Garnett <14004106+lightclient@users.noreply.github.com> +Matt Garnett Matt K <1036969+mkrump@users.noreply.github.com> Matthew Di Ferrante Matthew Halpern Matthew Wampler-Doty +Matthieu Vachon Max Sistemich Maxim Zhiburt Maximilian Meister @@ -342,34 +506,55 @@ me020523 Melvin Junhee Woo meowsbits Micah Zoltu +Michael de Hoog Michael Forney Michael Riabzev Michael Ruminer michael1011 Miguel Mota Mike Burr +Mikel Cortes <45786396+cortze@users.noreply.github.com> Mikhail Mikheev +Mikhail Vazhnov +miles <66052478+miles-six@users.noreply.github.com> +Miles Chen milesvant +minh-bq <97180373+minh-bq@users.noreply.github.com> +Mio Miro Miya Chen +mmsqe +Mobin Mohanan <47410557+tr1sm0s1n@users.noreply.github.com> Mohanson +moomin <67548026+nothingmin@users.noreply.github.com> mr_franklin +Mskxn <118117161+Mskxn@users.noreply.github.com> Mudit Gupta Mymskmkt <1847234666@qq.com> Nalin Bhardwaj +nand2 +Nathan +Nathan Jo <162083209+qqqeck@users.noreply.github.com> Natsu Kagami +Naveen <116692862+naveen-imtb@users.noreply.github.com> Nchinda Nchinda -nebojsa94 +Nebojsa Urosevic necaremus nedifi <103940716+nedifi@users.noreply.github.com> needkane <604476380@qq.com> +Newt6611 <45097780+Newt6611@users.noreply.github.com> +Ng Wei Han <47109095+weiihann@users.noreply.github.com> Nguyen Kien Trung Nguyen Sy Thanh Son Nic Jansma +Nicholas Nick Dodson Nick Johnson +Nicola Cocchiaro <3538109+ncocchiaro@users.noreply.github.com> Nicolas Feignon +Nicolas Gotchac Nicolas Guillaume +Nikhil Suri Nikita Kozhemyakin Nikola Madjarevic Nilesh Trivedi @@ -379,32 +564,47 @@ njupt-moon <1015041018@njupt.edu.cn> nkbai noam-alchemy <76969113+noam-alchemy@users.noreply.github.com> nobody +noel <72006780+0x00Duke@users.noreply.github.com> Noman +norwnd <112318969+norwnd@users.noreply.github.com> nujabes403 Nye Liu +Obtuse7772 <117080049+Obtuse7772@users.noreply.github.com> Oleg Kovalov Oli Bye Oliver Tale-Yazdi Olivier Hervieu +openex Or Neeman +oseau Osoro Bironga Osuke +panicalways <113693386+panicalways@users.noreply.github.com> Pantelis Peslis +Parithosh Jayanthi +Park Changwan Pascal Dierich Patrick O'Grady Pau +Paul <41552663+molecula451@users.noreply.github.com> Paul Berg +Paul Lange Paul Litvak Paul-Armand Verhaegen Paulo L F Casaretto +Pawan Dhananjay Paweł Bylica Pedro Gomes Pedro Pombeiro +persmor <166146971+persmor@users.noreply.github.com> +Peter (bitfly) <1674920+peterbitfly@users.noreply.github.com> Peter Broadhurst peter cresswell Peter Pratscher Peter Simard +Peter Straus <153843855+krauspt@users.noreply.github.com> Petr Mikusek +phenix3443 Philip Schlump Pierre Neter Pierre R @@ -412,15 +612,24 @@ piersy PilkyuJung Piotr Dyraga ploui <64719999+ploui@users.noreply.github.com> +PolyMa <151764357+polymaer@users.noreply.github.com> Preston Van Loon Prince Sinha +psogv0308 +puhtaytow <18026645+puhtaytow@users.noreply.github.com> Péter Szilágyi +qcrao qd-ethan <31876119+qdgogogo@users.noreply.github.com> Qian Bin +qiuhaohao +Qt +Quentin McGaw Quest Henkart +Rachel Bousfield Rachel Franks Rafael Matias Raghav Sood +Rajaram Gaunker Ralph Caraveo Ramesh Nair rangzen @@ -430,45 +639,65 @@ rhaps107 Ricardo Catalinas Jiménez Ricardo Domingos Richard Hart +RichΛrd Rick RJ Catalano Rob Rob Mulholand Robert Zaremba +Roberto Bayardo Roc Yu +Roman Krasiuk Roman Mazalov <83914728+gopherxyz@users.noreply.github.com> Ross <9055337+Chadsr@users.noreply.github.com> +Rossen Krastev +Roy Crihfield Runchao Han +Ruohui Wang Russ Cox Ryan Schneider +Ryan Tinianov ryanc414 Rémy Roy S. Matthew English salanfe Sam <39165351+Xia-Sam@users.noreply.github.com> +Saman H. Pasha <51169592+saman-pasha@users.noreply.github.com> Sammy Libre <7374093+sammy007@users.noreply.github.com> Samuel Marks +Sanghee Choi <32831939+pengin7384@users.noreply.github.com> +SangIlMo <156392700+SangIlMo@users.noreply.github.com> sanskarkhare +SanYe Sarlor Sasuke1964 Satpal <28562234+SatpalSandhu61@users.noreply.github.com> Saulius Grigaitis Sean +seayyyy <163325936+seay404@users.noreply.github.com> +Sebastian Stammler Serhat Şevki Dinçer +Seungbae Yu +Seungmin Kim Shane Bammel shawn <36943337+lxex@users.noreply.github.com> shigeyuki azuchi Shihao Xia Shiming +Shiming Zhang Shintaro Kaneko shiqinfeng1 <150627601@qq.com> +Shivam Sandbhor +shivhg Shuai Qi Shude Li Shunsuke Watanabe +shuo silence Simon Jentzsch Sina Mahmoodi <1591639+s1na@users.noreply.github.com> sixdays +sjlee1125 <47561537+sjlee1125@users.noreply.github.com> SjonHortensius Slava Karpenko slumber1122 @@ -477,17 +706,29 @@ soc1c Sorin Neacsu Sparty Stein Dekker +Stephen Flynn +Stephen Guo Steve Gattuso +Steve Milk Steve Ruckdashel Steve Waldman Steven E. Harris Steven Roose stompesi stormpang +storyicon +strykerin +sudeep +SuiYuan <165623542+suiyuan1314@users.noreply.github.com> +Sungwoo Kim sunxiaojun2014 Suriyaa Sundararuban Sylvain Laurent +Szupingwang +tactical_retreat +Taeguk Kwon Taeik Lim +taiking tamirms Tangui Clairet Tatsuya Shimoda @@ -495,21 +736,35 @@ Taylor Gerring TColl <38299499+TColl@users.noreply.github.com> terasum tgyKomgo <52910426+tgyKomgo@users.noreply.github.com> +Thabokani <149070269+Thabokani@users.noreply.github.com> Thad Guidry +therainisme Thomas Bocek thomasmodeneis thumb8432 Ti Zhou tia-99 <67107070+tia-99@users.noreply.github.com> +tianyeyouyou +Tien Nguyen <116023870+htiennv@users.noreply.github.com> Tim Cooijmans +TinyFoxy Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com> +tokikuch +Tom <45168162+tomdever@users.noreply.github.com> Tosh Camille +trillo +Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> +trocher tsarpaul +TY <45994721+tylerK1294@users.noreply.github.com> Tyler Chambers <2775339+tylerchambers@users.noreply.github.com> +tylerni7 tzapu -ucwong +ucwong uji <49834542+uji@users.noreply.github.com> ult-bobonovski +Undefinedor +Ursulafe <152976968+Ursulafe@users.noreply.github.com> Valentin Trinqué Valentin Wüstholz Vedhavyas Singareddi @@ -528,39 +783,60 @@ Vitaly V Vivek Anand Vlad Bokov Vlad Gluhovsky +VM <112189277+sysvm@users.noreply.github.com> +vuittont60 <81072379+vuittont60@users.noreply.github.com> +wangjingcun +wangyifan Ward Bradt Water <44689567+codeoneline@users.noreply.github.com> wbt +Wei Tang weimumu <934657014@qq.com> Wenbiao Zheng Wenshao Zhong +Wihan de Beer Will Villanueva William Morriss William Setzer williambannas +willian.eth +winniehere +winterjihwan <113398351+winterjihwan@users.noreply.github.com> wuff1996 <33193253+wuff1996@users.noreply.github.com> Wuxiang Xiaobing Jiang +xiaodong <81516175+javaandfly@users.noreply.github.com> xiekeyang +xinbenlv xincaosu xinluyin <31590468+xinluyin@users.noreply.github.com> +xiyang <90125263+JBossBC@users.noreply.github.com> Xudong Liu <33193253+r1cs@users.noreply.github.com> xwjack yahtoo Yang Hau YaoZengzeng +ycyraum YH-Zhou +Yier <90763233+yierx@users.noreply.github.com> Yihau Chen +yihuang Yohann Léon Yoichi Hirai Yole <007yuyue@gmail.com> Yondon Fu +yong <33920876+yzhaoyu@users.noreply.github.com> YOSHIDA Masanori yoza +ysh0566 +yudrywet <166895665+yudrywet@users.noreply.github.com> +yujinpark +yukionfire yumiel yoomee1313 Yusup yutianwu ywzqwwt <39263032+ywzqwwt@users.noreply.github.com> +yzb <335357057@qq.com> zaccoding Zach Zachinquarantine @@ -568,24 +844,34 @@ zah Zahoor Mohamed Zak Cole zcheng9 +zeim839 <50573884+zeim839@users.noreply.github.com> zer0to0ne <36526113+zer0to0ne@users.noreply.github.com> zgfzgf <48779939+zgfzgf@users.noreply.github.com> Zhang Zhuo zhangsoledad <787953403@qq.com> zhaochonghe <41711151+zhaochonghe@users.noreply.github.com> +zhen peng <505380967@qq.com> Zhenguo Niu +Zheyuan He +Zhihao Lin <3955922+kkqy@users.noreply.github.com> zhiqiangxu <652732310@qq.com> Zhou Zhiyao Ziyuan Zhong Zoe Nolan +zoereco <158379334+zoereco@users.noreply.github.com> +Zoo +Zoro <40222601+BabyHalimao@users.noreply.github.com> Zou Guangxian Zsolt Felföldi Łukasz Kurowski Łukasz Zimnoch ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com> Максим Чусовлянов +かげ <47621124+ronething-bot@users.noreply.github.com> +スパイク <1311798+spkjp@users.noreply.github.com> 大彬 沉风 +牛晓婕 <30611384+niuxiaojie81@users.noreply.github.com> 贺鹏飞 陈佳 유용환 <33824408+eric-yoo@users.noreply.github.com> diff --git a/beacon/light/sync/head_sync.go b/beacon/light/sync/head_sync.go index dd05d39588..5e41258053 100644 --- a/beacon/light/sync/head_sync.go +++ b/beacon/light/sync/head_sync.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/beacon/params/networks.go b/beacon/params/networks.go index 5b00b27953..1204e0176f 100644 --- a/beacon/params/networks.go +++ b/beacon/params/networks.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/cmd/abigen/namefilter.go b/cmd/abigen/namefilter.go index eea5c643c4..00945ef471 100644 --- a/cmd/abigen/namefilter.go +++ b/cmd/abigen/namefilter.go @@ -1,3 +1,19 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + package main import ( diff --git a/cmd/abigen/namefilter_test.go b/cmd/abigen/namefilter_test.go index ccee712018..254b3302e2 100644 --- a/cmd/abigen/namefilter_test.go +++ b/cmd/abigen/namefilter_test.go @@ -1,3 +1,19 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + package main import ( diff --git a/cmd/blsync/main.go b/cmd/blsync/main.go index d74e1496cd..57bc46c367 100644 --- a/cmd/blsync/main.go +++ b/cmd/blsync/main.go @@ -1,18 +1,18 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. +// Copyright 2024 The go-ethereum Authors +// This file is part of go-ethereum. // -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// The go-ethereum library is distributed in the hope that it will be useful, +// go-ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. +// GNU General Public License for more details. // -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . package main diff --git a/cmd/evm/eofparse.go b/cmd/evm/eofparse.go index 92182a53b3..9710735576 100644 --- a/cmd/evm/eofparse.go +++ b/cmd/evm/eofparse.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of go-ethereum. // // go-ethereum is free software: you can redistribute it and/or modify diff --git a/cmd/evm/eofparse_test.go b/cmd/evm/eofparse_test.go index cda4b38fc9..a9119916a5 100644 --- a/cmd/evm/eofparse_test.go +++ b/cmd/evm/eofparse_test.go @@ -1,3 +1,19 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + package main import ( diff --git a/cmd/geth/attach_test.go b/cmd/geth/attach_test.go index ceae3a122e..1df25359c5 100644 --- a/cmd/geth/attach_test.go +++ b/cmd/geth/attach_test.go @@ -1,18 +1,18 @@ // Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. +// This file is part of go-ethereum. // -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// The go-ethereum library is distributed in the hope that it will be useful, +// go-ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. +// GNU General Public License for more details. // -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . package main diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index 4293a860ec..37fffecc30 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -1,5 +1,3 @@ -//go:build integrationtests - // Copyright 2023 The go-ethereum Authors // This file is part of go-ethereum. // @@ -16,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . +//go:build integrationtests + package main import ( diff --git a/cmd/geth/logtestcmd_active.go b/cmd/geth/logtestcmd_active.go index f2a2c5ded5..f9bbc3d4ec 100644 --- a/cmd/geth/logtestcmd_active.go +++ b/cmd/geth/logtestcmd_active.go @@ -1,5 +1,3 @@ -//go:build integrationtests - // Copyright 2023 The go-ethereum Authors // This file is part of go-ethereum. // @@ -16,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . +//go:build integrationtests + package main import ( diff --git a/cmd/geth/logtestcmd_inactive.go b/cmd/geth/logtestcmd_inactive.go index 691ab5bcd8..8dda1c47e4 100644 --- a/cmd/geth/logtestcmd_inactive.go +++ b/cmd/geth/logtestcmd_inactive.go @@ -1,5 +1,3 @@ -//go:build !integrationtests - // Copyright 2023 The go-ethereum Authors // This file is part of go-ethereum. // @@ -16,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . +//go:build !integrationtests + package main import "github.com/urfave/cli/v2" diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index bfe20767d9..8bd6b71eee 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package rawdb diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 2923a2c224..7dada63d45 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package state diff --git a/core/txindexer.go b/core/txindexer.go index b2f2188595..293124f681 100644 --- a/core/txindexer.go +++ b/core/txindexer.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package core diff --git a/core/txindexer_test.go b/core/txindexer_test.go index 246aec8b89..4425f0d9a5 100644 --- a/core/txindexer_test.go +++ b/core/txindexer_test.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package core diff --git a/core/txpool/blobpool/lookup.go b/core/txpool/blobpool/lookup.go index 2d8d0fd2bf..b5cf4d3799 100644 --- a/core/txpool/blobpool/lookup.go +++ b/core/txpool/blobpool/lookup.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go index 8af9624994..d55e85d74f 100644 --- a/core/txpool/legacypool/legacypool2_test.go +++ b/core/txpool/legacypool/legacypool2_test.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . + package legacypool import ( diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 361dbe8b38..0ebf4c7e4b 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/core/types/tx_blob_test.go b/core/types/tx_blob_test.go index 6bd0f183b7..b9e6dcb0bb 100644 --- a/core/types/tx_blob_test.go +++ b/core/types/tx_blob_test.go @@ -1,3 +1,19 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package types import ( diff --git a/core/vm/eof_control_flow.go b/core/vm/eof_control_flow.go index 8230780773..c0a4459906 100644 --- a/core/vm/eof_control_flow.go +++ b/core/vm/eof_control_flow.go @@ -1,18 +1,18 @@ // Copyright 2024 The go-ethereum Authors -// This file is part of go-ethereum. +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package vm diff --git a/core/vm/eof_test.go b/core/vm/eof_test.go index 8106a29728..0a9cf638ce 100644 --- a/core/vm/eof_test.go +++ b/core/vm/eof_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/core/vm/jump_table_test.go b/core/vm/jump_table_test.go index 02558035c0..a4f9759ed2 100644 --- a/core/vm/jump_table_test.go +++ b/core/vm/jump_table_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go index ba36f8023c..41389b729a 100644 --- a/core/vm/memory_test.go +++ b/core/vm/memory_test.go @@ -1,3 +1,19 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package vm import ( diff --git a/core/vm/program/program.go b/core/vm/program/program.go index 3b00bbae6f..5b9cfdcc5f 100644 --- a/core/vm/program/program.go +++ b/core/vm/program/program.go @@ -1,18 +1,18 @@ // Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // -// The library is free software: you can redistribute it and/or modify +// The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// This library is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the goevmlab library. If not, see . +// along with the go-ethereum library. If not, see . // package program is a utility to create EVM bytecode for testing, but _not_ for production. As such: // diff --git a/core/vm/program/program_test.go b/core/vm/program/program_test.go index 0b34210067..ceb7d8be64 100644 --- a/core/vm/program/program_test.go +++ b/core/vm/program/program_test.go @@ -1,18 +1,18 @@ // Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // -// The library is free software: you can redistribute it and/or modify +// The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// This library is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the goevmlab library. If not, see . +// along with the go-ethereum library. If not, see . package program diff --git a/eth/tracers/dir.go b/eth/tracers/dir.go index 1cdfab5454..05c76bceb7 100644 --- a/eth/tracers/dir.go +++ b/eth/tracers/dir.go @@ -1,4 +1,4 @@ -// Copyright 2017 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go index 553eaf1b57..4390a62b4b 100644 --- a/eth/tracers/internal/tracetest/flat_calltrace_test.go +++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -1,3 +1,19 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package tracetest import ( diff --git a/eth/tracers/internal/tracetest/makeTest.js b/eth/tracers/internal/tracetest/makeTest.js index 7a50748930..826c91f639 100644 --- a/eth/tracers/internal/tracetest/makeTest.js +++ b/eth/tracers/internal/tracetest/makeTest.js @@ -1,3 +1,19 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + // makeTest generates a test for the configured tracer by running // a prestate reassembled and a call trace run, assembling all the // gathered information into a test case. diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index ad3d75d8de..680645970a 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The go-ethereum Authors +// Copyright 2022 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go index dc46b6d5ef..3d54ab1868 100644 --- a/eth/tracers/internal/tracetest/supply_test.go +++ b/eth/tracers/internal/tracetest/supply_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go index 524a396d5c..85727e29ed 100644 --- a/eth/tracers/internal/tracetest/util.go +++ b/eth/tracers/internal/tracetest/util.go @@ -1,3 +1,19 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package tracetest import ( diff --git a/eth/tracers/internal/util.go b/eth/tracers/internal/util.go index cff6295566..88a9f5db44 100644 --- a/eth/tracers/internal/util.go +++ b/eth/tracers/internal/util.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/eth/tracers/internal/util_test.go b/eth/tracers/internal/util_test.go index 6a467314cc..d6b51c4d86 100644 --- a/eth/tracers/internal/util_test.go +++ b/eth/tracers/internal/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . + package internal import ( diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index e56d011139..4e7fc31a9c 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/eth/tracers/tracker_test.go b/eth/tracers/tracker_test.go index 46f6ac8e51..447ab4e6b6 100644 --- a/eth/tracers/tracker_test.go +++ b/eth/tracers/tracker_test.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package tracers diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go index 8efe93e243..5bf3c6be27 100644 --- a/ethclient/simulated/backend_test.go +++ b/ethclient/simulated/backend_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/ethclient/simulated/rollback_test.go b/ethclient/simulated/rollback_test.go index 8fc9f5bc86..57c59496d5 100644 --- a/ethclient/simulated/rollback_test.go +++ b/ethclient/simulated/rollback_test.go @@ -1,3 +1,19 @@ +// Copyright 2025 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package simulated import ( diff --git a/ethdb/pebble/pebble_test.go b/ethdb/pebble/pebble_test.go index 1d5611f211..3265491d4a 100644 --- a/ethdb/pebble/pebble_test.go +++ b/ethdb/pebble/pebble_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/graphql/internal/graphiql/build.go b/graphql/internal/graphiql/build.go index 0065c1112f..1daa406a88 100644 --- a/graphql/internal/graphiql/build.go +++ b/graphql/internal/graphiql/build.go @@ -1,3 +1,19 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package graphiql import ( diff --git a/internal/era/accumulator.go b/internal/era/accumulator.go index 2ece2755e1..cb383d8e63 100644 --- a/internal/era/accumulator.go +++ b/internal/era/accumulator.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package era diff --git a/internal/era/builder.go b/internal/era/builder.go index 33261555ba..975561564c 100644 --- a/internal/era/builder.go +++ b/internal/era/builder.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package era diff --git a/internal/era/e2store/e2store.go b/internal/era/e2store/e2store.go index 8e4d5dd24a..9832b72d48 100644 --- a/internal/era/e2store/e2store.go +++ b/internal/era/e2store/e2store.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package e2store diff --git a/internal/era/e2store/e2store_test.go b/internal/era/e2store/e2store_test.go index 353942f0bd..cdaefc484e 100644 --- a/internal/era/e2store/e2store_test.go +++ b/internal/era/e2store/e2store_test.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package e2store diff --git a/internal/era/era.go b/internal/era/era.go index 6ad7339b36..daf337963d 100644 --- a/internal/era/era.go +++ b/internal/era/era.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package era diff --git a/internal/era/era_test.go b/internal/era/era_test.go index d0f56b6f88..e0bebfa449 100644 --- a/internal/era/era_test.go +++ b/internal/era/era_test.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package era diff --git a/internal/era/iterator.go b/internal/era/iterator.go index f48aab46b4..3c4f82d850 100644 --- a/internal/era/iterator.go +++ b/internal/era/iterator.go @@ -1,18 +1,18 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. // -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // -// go-ethereum is distributed in the hope that it will be useful, +// The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// GNU Lesser General Public License for more details. // -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . package era diff --git a/miner/payload_building.go b/miner/payload_building.go index 3090de5d4b..6b010186bf 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package miner diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 307024c6bb..e0791921d6 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package miner diff --git a/miner/pending.go b/miner/pending.go index bb91fe8969..9480f9cae8 100644 --- a/miner/pending.go +++ b/miner/pending.go @@ -1,4 +1,4 @@ -// Copyright 2024 The go-ethereum Authors +// Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/p2p/config.go b/p2p/config.go index 14492a2e55..5ea62e12f5 100644 --- a/p2p/config.go +++ b/p2p/config.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . + package p2p import ( diff --git a/p2p/pipes/pipe.go b/p2p/pipes/pipe.go index cf1f3e2a80..a1423687bb 100644 --- a/p2p/pipes/pipe.go +++ b/p2p/pipes/pipe.go @@ -1,4 +1,4 @@ -// Copyright 2024 The go-ethereum Authors +// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/rpc/client_opt_test.go b/rpc/client_opt_test.go index d7cc2572a7..f62f689f6a 100644 --- a/rpc/client_opt_test.go +++ b/rpc/client_opt_test.go @@ -1,3 +1,19 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package rpc_test import ( diff --git a/tests/fuzzers/difficulty/difficulty_test.go b/tests/fuzzers/difficulty/difficulty_test.go index 49beedb486..c906089f8a 100644 --- a/tests/fuzzers/difficulty/difficulty_test.go +++ b/tests/fuzzers/difficulty/difficulty_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2017 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/tests/fuzzers/rangeproof/rangeproof_test.go b/tests/fuzzers/rangeproof/rangeproof_test.go index bc7badc5b3..6b8d140e9c 100644 --- a/tests/fuzzers/rangeproof/rangeproof_test.go +++ b/tests/fuzzers/rangeproof/rangeproof_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2017 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/tests/fuzzers/txfetcher/txfetcher_test.go b/tests/fuzzers/txfetcher/txfetcher_test.go index ac2e6b1c67..c6647f62d4 100644 --- a/tests/fuzzers/txfetcher/txfetcher_test.go +++ b/tests/fuzzers/txfetcher/txfetcher_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2017 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/trie/trie_id.go b/trie/trie_id.go index 8ab490ca3b..7ac371b53e 100644 --- a/trie/trie_id.go +++ b/trie/trie_id.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package trie diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 047a7a4bd8..b09ec66374 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package trienode diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 1d33f6c3e5..dea210c046 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -1,4 +1,4 @@ -// Copyright 2023 go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index dbec29685b..44c67ba03e 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package utils diff --git a/trie/verkle.go b/trie/verkle.go index 2e4d62cd10..015b8f6590 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -1,4 +1,4 @@ -// Copyright 2023 go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/trie/verkle_test.go b/trie/verkle_test.go index 84eec2ed30..f31ab02df9 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify diff --git a/triedb/pathdb/errors.go b/triedb/pathdb/errors.go index 498bc9ec81..49e9c3ca64 100644 --- a/triedb/pathdb/errors.go +++ b/triedb/pathdb/errors.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/execute.go b/triedb/pathdb/execute.go index 80cecb82e7..2400f280a3 100644 --- a/triedb/pathdb/execute.go +++ b/triedb/pathdb/execute.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index d8fb99ade4..6ba9aaf1bb 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index 7dbe5959dc..9458e2478b 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go index 953f023530..2928d19d74 100644 --- a/triedb/pathdb/history_test.go +++ b/triedb/pathdb/history_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go index 0bd086c2f3..85a5e470e7 100644 --- a/triedb/pathdb/layertree.go +++ b/triedb/pathdb/layertree.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go index 1a2559e38b..45dad6f1ae 100644 --- a/triedb/pathdb/metrics.go +++ b/triedb/pathdb/metrics.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/nodes.go b/triedb/pathdb/nodes.go index dee8c872ac..c56e38066b 100644 --- a/triedb/pathdb/nodes.go +++ b/triedb/pathdb/nodes.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go index a404409035..30f75d1058 100644 --- a/triedb/pathdb/reader.go +++ b/triedb/pathdb/reader.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go index 969782e3c4..873a7ba618 100644 --- a/triedb/pathdb/states.go +++ b/triedb/pathdb/states.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/pathdb/states_test.go b/triedb/pathdb/states_test.go index 30eb6ad6c8..4d181cc914 100644 --- a/triedb/pathdb/states_test.go +++ b/triedb/pathdb/states_test.go @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package pathdb diff --git a/triedb/states.go b/triedb/states.go index 9fabdb088d..c5aa7c64b4 100644 --- a/triedb/states.go +++ b/triedb/states.go @@ -1,4 +1,4 @@ -// Copyright 2023 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -12,7 +12,7 @@ // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see +// along with the go-ethereum library. If not, see . package triedb diff --git a/version/version.go b/version/version.go index 01bc97df51..9153e0308a 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2024 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify From 9e33b29c740cce33b0b8747e8a772e7995cbd022 Mon Sep 17 00:00:00 2001 From: ericxtheodore Date: Thu, 6 Feb 2025 06:19:09 +0800 Subject: [PATCH 12/17] build: update to Go 1.23.6 (#31130) Co-authored-by: Felix Lange --- build/checksums.txt | 92 +++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 49 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 7270492ec9..9a7101a3d5 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -5,56 +5,50 @@ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz -# version:golang 1.23.5 +# version:golang 1.23.6 # https://go.dev/dl/ -a6f3f4bbd3e6bdd626f79b668f212fbb5649daf75084fb79b678a0ae4d97423b go1.23.5.src.tar.gz -8d8bc7d1b362dd91426da9352741db298ff73e3e0a3ccbe6f607f80ba17647a4 go1.23.5.aix-ppc64.tar.gz -d8b310b0b6bd6a630307579165cfac8a37571483c7d6804a10dd73bbefb0827f go1.23.5.darwin-amd64.tar.gz -d2b06bf0b8299e0187dfe2d8ad39bd3dd96a6d93fe4d1cfd42c7872452f4a0a2 go1.23.5.darwin-amd64.pkg -047bfce4fbd0da6426bd30cd19716b35a466b1c15a45525ce65b9824acb33285 go1.23.5.darwin-arm64.tar.gz -f819ed94939e08a5016b9a607ec84ebbde6cb3fe59750c59d97aa300c3fd02df go1.23.5.darwin-arm64.pkg -2dec52821e1f04a538d00b2cafe70fa506f2eea94a551bfe3ce1238f1bd4966f go1.23.5.dragonfly-amd64.tar.gz -7204e7bc62913b12f18c61afe0bc1a92fd192c0e45a54125978592296cb84e49 go1.23.5.freebsd-386.tar.gz -90a119995ebc3e36082874df5fa8fe6da194946679d01ae8bef33c87aab99391 go1.23.5.freebsd-amd64.tar.gz -255d26d873e41ff2fc278013bb2e5f25cf2ebe8d0ec84c07e3bb1436216020d3 go1.23.5.freebsd-arm.tar.gz -2785d9122654980b59ca38305a11b34f2a1e12d9f7eb41d52efc137c1fc29e61 go1.23.5.freebsd-arm64.tar.gz -8f66a94018ab666d56868f61c579aa81e549ac9700979ce6004445d315be2d37 go1.23.5.freebsd-riscv64.tar.gz -4b7a69928385ec512a4e77a547e24118adbb92301d2be36187ff0852ba9e6303 go1.23.5.illumos-amd64.tar.gz -6ecf6a41d0925358905fa2641db0e1c9037aa5b5bcd26ca6734caf50d9196417 go1.23.5.linux-386.tar.gz -cbcad4a6482107c7c7926df1608106c189417163428200ce357695cc7e01d091 go1.23.5.linux-amd64.tar.gz -47c84d332123883653b70da2db7dd57d2a865921ba4724efcdf56b5da7021db0 go1.23.5.linux-arm64.tar.gz -04e0b5cf5c216f0aa1bf8204d49312ad0845800ab0702dfe4357c0b1241027a3 go1.23.5.linux-armv6l.tar.gz -e1d14ac2207c78d52b76ba086da18a004c70aeb58cba72cd9bef0da7d1602786 go1.23.5.linux-loong64.tar.gz -d9e937f2fac4fc863850fb4cc31ae76d5495029a62858ef09c78604472d354c0 go1.23.5.linux-mips.tar.gz -59710d0782abafd47e40d1cf96aafa596bbdee09ac7c61062404604f49bd523e go1.23.5.linux-mips64.tar.gz -bc528cd836b4aa6701a42093ed390ef9929639a0e2818759887dc5539e517cab go1.23.5.linux-mips64le.tar.gz -a0404764ea1fd4a175dc5193622b15be6ed1ab59cbfa478f5ae24531bafb6cbd go1.23.5.linux-mipsle.tar.gz -db110284a0c91d4545273f210ca95b9f89f6e3ac90f39eb819033a6b96f25897 go1.23.5.linux-ppc64.tar.gz -db268bf5710b5b1b82ab38722ba6e4427d9e4942aed78c7d09195a9dff329613 go1.23.5.linux-ppc64le.tar.gz -d9da15778442464f32acfa777ac731fd4d47362b233b83a0932380cb6d2d5dc8 go1.23.5.linux-riscv64.tar.gz -14924b917d35311eb130e263f34931043d4f9dc65f20684301bf8f60a72edcdf go1.23.5.linux-s390x.tar.gz -7b8074102e7f039bd6473c44f58cb323c98dcda48df98ad1f78aaa2664769c8f go1.23.5.netbsd-386.tar.gz -1a466b9c8900e66664b15c07548ecb156e8274cf1028ac5da84134728e6dbbed go1.23.5.netbsd-amd64.tar.gz -901c9e72038926e37a4dbde8f03d1d81fcb9992850901a3da1da5a25ef93e65b go1.23.5.netbsd-arm.tar.gz -221f69a7c3a920e3666633ee0b4e5c810176982e74339ba4693226996dc636e4 go1.23.5.netbsd-arm64.tar.gz -42e46cbf73febb8e6ddf848765ce1c39573736383b132402cdc487eb6be3ad06 go1.23.5.openbsd-386.tar.gz -f49e81fce17aab21800fab7c4b10c97ab02f8a9c807fdf8641ccf2f87d69289f go1.23.5.openbsd-amd64.tar.gz -d8bd7269d4670a46e702b64822254a654824347c35923ef1c444d2e8687381ea go1.23.5.openbsd-arm.tar.gz -9cb259adff431d4d28b18e3348e26fe07ea10380675051dcfd740934b5e8b9f2 go1.23.5.openbsd-arm64.tar.gz -72a03223c98fcecfb06e57c3edd584f99fb7f6574a42f59348473f354be1f379 go1.23.5.openbsd-ppc64.tar.gz -c06432b859afb36657207382b7bac03f961b8fafc18176b501d239575a9ace64 go1.23.5.openbsd-riscv64.tar.gz -b1f9b12b269ab5cd4aa7ae3dd3075c2407c1ea8bb1211e6835261f98931201cc go1.23.5.plan9-386.tar.gz -45b4026a103e2f6cd436e2b7ad24b24a40dd22c9903519b98b45c535574fa01a go1.23.5.plan9-amd64.tar.gz -6e28e26f8c1e8620006490260aa5743198843aa0003c400cb65cbf5e743b21c7 go1.23.5.plan9-arm.tar.gz -0496c9969f208bd597f3e63fb27068ce1c7ed776618da1007fcc1c8be83ca413 go1.23.5.solaris-amd64.tar.gz -8441605a005ea74c28d8c02ca5f2708c17b4df7e91796148b9f8760caafb05c1 go1.23.5.windows-386.zip -39962346d8d0cb0cc8716489ee33b08d7a220c24a9e45423487876dd4acbdac6 go1.23.5.windows-386.msi -96d74945d7daeeb98a7978d0cf099321d7eb821b45f5c510373d545162d39c20 go1.23.5.windows-amd64.zip -03e11a988a18ad7e3f9038cef836330af72ba0a454a502cda7b7faee07a0dd8a go1.23.5.windows-amd64.msi -0005b31dcf9732c280a5cceb6aa1c5ab8284bc2541d0256c221256080acf2a09 go1.23.5.windows-arm.zip -a8442de35cbac230db8c4b20e363055671f2295dc4d6b2b2dfec66b89a3c4bce go1.23.5.windows-arm.msi -4f20c2d8a5a387c227e3ef48c5506b22906139d8afd8d66a78ef3de8dda1d1c3 go1.23.5.windows-arm64.zip -6f54fb46b669345c734936c521f7e0f55555e63ed6e11efbbaaed06f9514773c go1.23.5.windows-arm64.msi +039c5b04e65279daceee8a6f71e70bd05cf5b801782b6f77c6e19e2ed0511222 go1.23.6.src.tar.gz +adec10f4ba56591f523aa04851f7f6900b1c61508dfa6b80e62717a8e6684a5c go1.23.6.aix-ppc64.tar.gz +782da50ce8ec5e98fac2cd3cdc6a1d7130d093294fc310038f651444232a3fb0 go1.23.6.darwin-amd64.tar.gz +5cae2450a1708aeb0333237a155640d5562abaf195defebc4306054565536221 go1.23.6.darwin-arm64.tar.gz +d52efb3020d9332477ade98163c03d2f2fe3e051b0e7e01f0e167412c66de0cb go1.23.6.dragonfly-amd64.tar.gz +d3287706b5823712ac6cf7dff684a556cff98163ef60e7b275abe3388c17aac7 go1.23.6.freebsd-386.tar.gz +ebb4c6a9b0673dbdabc439877779ed6add16575e21bd0a7955c33f692789aef6 go1.23.6.freebsd-amd64.tar.gz +b7241584afb0b161c09148f8fde16171bb743e47b99d451fbc5f5217ec7a88b6 go1.23.6.freebsd-arm.tar.gz +004718b53cedd7955d1b1dc4053539fcd1053c031f5f3374334a22befd1f8310 go1.23.6.freebsd-arm64.tar.gz +ca026ec8a30dd0c18164f40e1ce21bd725e2445f11699177d05815189a38de7a go1.23.6.freebsd-riscv64.tar.gz +7db973efa3fb2e48e45059b855721550fce8e90803e7373d3efd37b88dd821e8 go1.23.6.illumos-amd64.tar.gz +e61f87693169c0bbcc43363128f1e929b9dff0b7f448573f1bdd4e4a0b9687ba go1.23.6.linux-386.tar.gz +9379441ea310de000f33a4dc767bd966e72ab2826270e038e78b2c53c2e7802d go1.23.6.linux-amd64.tar.gz +561c780e8f4a8955d32bf72e46af0b5ee5e0debe1e4633df9a03781878219202 go1.23.6.linux-arm64.tar.gz +27a4611010c16b8c4f37ade3aada55bd5781998f02f348b164302fd5eea4eb74 go1.23.6.linux-armv6l.tar.gz +c459226424372abc2b35957cc8955dad348330714f7605093325dbb73e33c750 go1.23.6.linux-loong64.tar.gz +e2a0aff70b958a3463a7d47132a2d0238369f64578d4f7f95e679e3a5af05622 go1.23.6.linux-mips.tar.gz +7d30ec7db056311d420bf930c16abcae13c0f41c26a202868f279721ec3c2f2f go1.23.6.linux-mips64.tar.gz +74ca7bc475bcc084c6718b74df024d7de9612932cea8a6dc75e29d3a5315a23a go1.23.6.linux-mips64le.tar.gz +09bf935a14e9f59a20499989438b1655453480016bdbcb10406acf4df2678ccb go1.23.6.linux-mipsle.tar.gz +5cb2f6a5090276c72c5eda8a55896f5a3d6ea0f28d10fa1a50e8318640f02d6c go1.23.6.linux-ppc64.tar.gz +0f817201e83d78ddbfa27f5f78d9b72450b92cc21d5e045145efacd0d3244a99 go1.23.6.linux-ppc64le.tar.gz +f95f7f817ab22ecab4503d0704d6449ea1aa26a595f57bf9b9f94ddf2aa7c1f3 go1.23.6.linux-riscv64.tar.gz +321e7ed0d5416f731479c52fa7610b52b8079a8061967bd48cec6d66f671a60e go1.23.6.linux-s390x.tar.gz +92d678fb8e1eeeb8c6af6f22e4e5494652dcbb4a320113fc08325cb9956a2d4c go1.23.6.netbsd-386.tar.gz +86ba51e7bb26b30ea6a8d88ddb79d8e8c83b4116200040ecb7a5a44cf90a8c5c go1.23.6.netbsd-amd64.tar.gz +4b974c35345100f0be6ea66afab2781de91ee9882117314126eaf0ae90fd3816 go1.23.6.netbsd-arm.tar.gz +53e3589fc38e787a493ea038961f8e40803714dbb42754c1713b00099c12e9b9 go1.23.6.netbsd-arm64.tar.gz +6d2317b3a8505ccebff8f72d943f2ac9b82c115632e54a53a786eff24ced56d9 go1.23.6.openbsd-386.tar.gz +f699e707d95a984fcc00361d91aecdb413d3c75e18235156ffba7a89edf68aae go1.23.6.openbsd-amd64.tar.gz +3c1cf6ab893657d0bf1942e40ce115acfd27cbce1ccb9bc88fd9cd21ca3d489f go1.23.6.openbsd-arm.tar.gz +cc0875535d14001f2da23ae9af89025b28c466e8f4f4c63f991ebb6f4b02f66c go1.23.6.openbsd-arm64.tar.gz +64de80e29ca66cb566cbf8be030bf8599953af4e48402eab724cbe0a08b40602 go1.23.6.openbsd-ppc64.tar.gz +c398a6b43c569f34bb4a2d16b52f8010eaac9a2a82ecac0602b4338e35cef377 go1.23.6.openbsd-riscv64.tar.gz +10998b6b130bb7b542b407f0db42b86a913b111f8fa86d44394beaace4d45f01 go1.23.6.plan9-386.tar.gz +9fbe8065436d8d12c02f19f64f51c9107da3a7a4ac46ab5777e182e9fe88c32f go1.23.6.plan9-amd64.tar.gz +8e3c826b884daee2de37e3b070d7eac4cea5d68edab8db09910e22201c75db83 go1.23.6.plan9-arm.tar.gz +b619eff63fec86daaea92ca170559e448a58b8ba0b92eef1971bc14e92ea86a7 go1.23.6.solaris-amd64.tar.gz +96820c0f5d464dd694543329e9b4d413b17c821c03a055717a29e6735b44c2d8 go1.23.6.windows-386.zip +53fec1586850b2cf5ad6438341ff7adc5f6700dd3ec1cfa3f5e8b141df190243 go1.23.6.windows-amd64.zip +22c2518c45c20018afa20d5376dc9fd7a7e74367240ed7b5209e79a30b5c4218 go1.23.6.windows-arm.zip +a2d2ec1b3759552bdd9cdf58858f91dfbfd6ab3a472f00b5255acbed30b1aa41 go1.23.6.windows-arm64.zip # version:golangci 1.63.4 # https://github.com/golangci/golangci-lint/releases/ From 1847b5f63552edeff1c5bbf6a281527d1b119269 Mon Sep 17 00:00:00 2001 From: Sina M <1591639+s1na@users.noreply.github.com> Date: Thu, 6 Feb 2025 12:33:40 +0100 Subject: [PATCH 13/17] build: update EEST fixtures to prague devnet-6 (#31088) Co-authored-by: lightclient --- build/checksums.txt | 6 +- build/ci.go | 4 +- tests/init_test.go | 21 ++-- tests/transaction_test.go | 32 ++++++ tests/transaction_test_util.go | 173 +++++++++++++++++++-------------- 5 files changed, 148 insertions(+), 88 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 9a7101a3d5..76f05e5b63 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,9 +1,9 @@ # This file contains sha256 checksums of optional build dependencies. -# version:spec-tests 2.1.0 +# version:spec-tests pectra-devnet-6@v1.0.0 # https://github.com/ethereum/execution-spec-tests/releases -# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ -ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz +# https://github.com/ethereum/execution-spec-tests/releases/download/pectra-devnet-6%40v1.0.0/fixtures_pectra-devnet-6.tar.gz +b69211752a3029083c020dc635fe12156ca1a6725a08559da540a0337586a77e fixtures_pectra-devnet-6.tar.gz # version:golang 1.23.6 # https://go.dev/dl/ diff --git a/build/ci.go b/build/ci.go index 02a4d771f6..6aa57f9cd5 100644 --- a/build/ci.go +++ b/build/ci.go @@ -338,8 +338,8 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string { log.Fatal(err) } ext := ".tar.gz" - base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename - url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext) + base := "fixtures_pectra-devnet-6" // TODO(s1na) rename once the version becomes part of the filename + url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/%s/%s%s", executionSpecTestsVersion, base, ext) archivePath := filepath.Join(cachedir, base+ext) if err := csdb.DownloadFile(url, archivePath); err != nil { log.Fatal(err) diff --git a/tests/init_test.go b/tests/init_test.go index effeec2b86..b933c9808c 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -34,16 +34,17 @@ import ( ) var ( - baseDir = filepath.Join(".", "testdata") - blockTestDir = filepath.Join(baseDir, "BlockchainTests") - stateTestDir = filepath.Join(baseDir, "GeneralStateTests") - legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests") - transactionTestDir = filepath.Join(baseDir, "TransactionTests") - rlpTestDir = filepath.Join(baseDir, "RLPTests") - difficultyTestDir = filepath.Join(baseDir, "BasicTests") - executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests") - executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests") - benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks") + baseDir = filepath.Join(".", "testdata") + blockTestDir = filepath.Join(baseDir, "BlockchainTests") + stateTestDir = filepath.Join(baseDir, "GeneralStateTests") + legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests") + transactionTestDir = filepath.Join(baseDir, "TransactionTests") + rlpTestDir = filepath.Join(baseDir, "RLPTests") + difficultyTestDir = filepath.Join(baseDir, "BasicTests") + executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests") + executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests") + executionSpecTransactionTestDir = filepath.Join(".", "spec-tests", "fixtures", "transaction_tests") + benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks") ) func readJSON(reader io.Reader, value interface{}) error { diff --git a/tests/transaction_test.go b/tests/transaction_test.go index 5179fc9afe..8147173905 100644 --- a/tests/transaction_test.go +++ b/tests/transaction_test.go @@ -19,6 +19,7 @@ package tests import ( "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" ) @@ -42,6 +43,20 @@ func TestTransaction(t *testing.T) { // Geth accepts it, which is not a consensus issue since we use big.Int's // internally to calculate the cost txt.skipLoad("^ttValue/TransactionWithHighValueOverflow.json") + + // The size of a create tx's initcode is only checked during the state + // transition + txt.skipLoad("^ttEIP3860/DataTestInitCodeTooBig.json") + + // The following tests require the tx precheck to be performed + // TODO(s1na): expose stateTransition.precheck publicly to be able to run these tests + txt.skipLoad("^ttEIP1559/maxPriorityFeePerGass32BytesValue.json") + txt.skipLoad("^ttEIP1559/maxPriorityFeePerGasOverflow.json") + txt.skipLoad("^ttEIP1559/maxFeePerGas32BytesValue.json") + txt.skipLoad("^ttEIP1559/maxFeePerGasOverflow.json") + txt.skipLoad("^ttEIP1559/GasLimitPriceProductPlusOneOverflow.json") + txt.skipLoad("^ttEIP1559/GasLimitPriceProductOverflow.json") + txt.walk(t, transactionTestDir, func(t *testing.T, name string, test *TransactionTest) { cfg := params.MainnetChainConfig if err := txt.checkFailure(t, test.Run(cfg)); err != nil { @@ -49,3 +64,20 @@ func TestTransaction(t *testing.T) { } }) } + +func TestExecutionSpecTransaction(t *testing.T) { + if !common.FileExist(executionSpecStateTestDir) { + t.Skipf("directory %s does not exist", executionSpecStateTestDir) + } + st := new(testMatcher) + + // Emptiness of authorization list is only validated during the tx precheck + st.skipLoad("^prague/eip7702_set_code_tx/invalid_tx/empty_authorization_list.json") + + st.walk(t, executionSpecTransactionTestDir, func(t *testing.T, name string, test *TransactionTest) { + cfg := params.MainnetChainConfig + if err := st.checkFailure(t, test.Run(cfg)); err != nil { + t.Error(err) + } + }) +} diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go index 55b76df89c..3f91a82ec5 100644 --- a/tests/transaction_test_util.go +++ b/tests/transaction_test_util.go @@ -21,93 +21,120 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" ) // TransactionTest checks RLP decoding and sender derivation of transactions. type TransactionTest struct { Txbytes hexutil.Bytes `json:"txbytes"` - Result ttResult -} - -type ttResult struct { - Byzantium ttFork - Constantinople ttFork - Istanbul ttFork - EIP150 ttFork - EIP158 ttFork - Frontier ttFork - Homestead ttFork + Result map[string]*ttFork } type ttFork struct { - Sender common.UnprefixedAddress `json:"sender"` - Hash common.UnprefixedHash `json:"hash"` + Sender *common.UnprefixedAddress `json:"sender"` + Hash *common.UnprefixedHash `json:"hash"` + Exception *string `json:"exception"` + IntrinsicGas math.HexOrDecimal64 `json:"intrinsicGas"` } -func (tt *TransactionTest) Run(config *params.ChainConfig) error { - validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) { - tx := new(types.Transaction) - if err := rlp.DecodeBytes(rlpData, tx); err != nil { - return nil, nil, err - } - sender, err := types.Sender(signer, tx) - if err != nil { - return nil, nil, err - } - // Intrinsic gas - requiredGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.SetCodeAuthorizations(), tx.To() == nil, isHomestead, isIstanbul, false) - if err != nil { - return nil, nil, err - } - if requiredGas > tx.Gas() { - return nil, nil, fmt.Errorf("insufficient gas ( %d < %d )", tx.Gas(), requiredGas) - } - h := tx.Hash() - return &sender, &h, nil +func (tt *TransactionTest) validate() error { + if tt.Txbytes == nil { + return fmt.Errorf("missing txbytes") } - - for _, testcase := range []struct { - name string - signer types.Signer - fork ttFork - isHomestead bool - isIstanbul bool - }{ - {"Frontier", types.FrontierSigner{}, tt.Result.Frontier, false, false}, - {"Homestead", types.HomesteadSigner{}, tt.Result.Homestead, true, false}, - {"EIP150", types.HomesteadSigner{}, tt.Result.EIP150, true, false}, - {"EIP158", types.NewEIP155Signer(config.ChainID), tt.Result.EIP158, true, false}, - {"Byzantium", types.NewEIP155Signer(config.ChainID), tt.Result.Byzantium, true, false}, - {"Constantinople", types.NewEIP155Signer(config.ChainID), tt.Result.Constantinople, true, false}, - {"Istanbul", types.NewEIP155Signer(config.ChainID), tt.Result.Istanbul, true, true}, - } { - sender, txhash, err := validateTx(tt.Txbytes, testcase.signer, testcase.isHomestead, testcase.isIstanbul) - - if testcase.fork.Sender == (common.UnprefixedAddress{}) { - if err == nil { - return fmt.Errorf("expected error, got none (address %v)[%v]", sender.String(), testcase.name) - } - continue - } - // Should resolve the right address - if err != nil { - return fmt.Errorf("got error, expected none: %v", err) - } - if sender == nil { - return fmt.Errorf("sender was nil, should be %x", common.Address(testcase.fork.Sender)) - } - if *sender != common.Address(testcase.fork.Sender) { - return fmt.Errorf("sender mismatch: got %x, want %x", sender, testcase.fork.Sender) - } - if txhash == nil { - return fmt.Errorf("txhash was nil, should be %x", common.Hash(testcase.fork.Hash)) - } - if *txhash != common.Hash(testcase.fork.Hash) { - return fmt.Errorf("hash mismatch: got %x, want %x", *txhash, testcase.fork.Hash) + for name, fork := range tt.Result { + if err := tt.validateFork(fork); err != nil { + return fmt.Errorf("invalid %s: %v", name, err) + } + } + return nil +} + +func (tt *TransactionTest) validateFork(fork *ttFork) error { + if fork == nil { + return nil + } + if fork.Hash == nil && fork.Exception == nil { + return fmt.Errorf("missing hash and exception") + } + if fork.Hash != nil && fork.Sender == nil { + return fmt.Errorf("missing sender") + } + return nil +} + +func (tt *TransactionTest) Run(config *params.ChainConfig) error { + if err := tt.validate(); err != nil { + return err + } + validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead, isIstanbul, isShanghai bool) (sender common.Address, hash common.Hash, requiredGas uint64, err error) { + tx := new(types.Transaction) + if err = tx.UnmarshalBinary(rlpData); err != nil { + return + } + sender, err = types.Sender(signer, tx) + if err != nil { + return + } + // Intrinsic gas + requiredGas, err = core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.SetCodeAuthorizations(), tx.To() == nil, isHomestead, isIstanbul, isShanghai) + if err != nil { + return + } + if requiredGas > tx.Gas() { + return sender, hash, 0, fmt.Errorf("insufficient gas ( %d < %d )", tx.Gas(), requiredGas) + } + hash = tx.Hash() + return sender, hash, requiredGas, nil + } + for _, testcase := range []struct { + name string + signer types.Signer + fork *ttFork + isHomestead bool + isIstanbul bool + isShanghai bool + }{ + {"Frontier", types.FrontierSigner{}, tt.Result["Frontier"], false, false, false}, + {"Homestead", types.HomesteadSigner{}, tt.Result["Homestead"], true, false, false}, + {"EIP150", types.HomesteadSigner{}, tt.Result["EIP150"], true, false, false}, + {"EIP158", types.NewEIP155Signer(config.ChainID), tt.Result["EIP158"], true, false, false}, + {"Byzantium", types.NewEIP155Signer(config.ChainID), tt.Result["Byzantium"], true, false, false}, + {"Constantinople", types.NewEIP155Signer(config.ChainID), tt.Result["Constantinople"], true, false, false}, + {"Istanbul", types.NewEIP155Signer(config.ChainID), tt.Result["Istanbul"], true, true, false}, + {"Berlin", types.NewEIP2930Signer(config.ChainID), tt.Result["Berlin"], true, true, false}, + {"London", types.NewLondonSigner(config.ChainID), tt.Result["London"], true, true, false}, + {"Paris", types.NewLondonSigner(config.ChainID), tt.Result["Paris"], true, true, false}, + {"Shanghai", types.NewLondonSigner(config.ChainID), tt.Result["Shanghai"], true, true, true}, + {"Cancun", types.NewCancunSigner(config.ChainID), tt.Result["Cancun"], true, true, true}, + {"Prague", types.NewPragueSigner(config.ChainID), tt.Result["Prague"], true, true, true}, + } { + if testcase.fork == nil { + continue + } + sender, hash, gas, err := validateTx(tt.Txbytes, testcase.signer, testcase.isHomestead, testcase.isIstanbul, testcase.isShanghai) + if err != nil { + if testcase.fork.Hash != nil { + return fmt.Errorf("unexpected error: %v", err) + } + continue + } + if testcase.fork.Exception != nil { + return fmt.Errorf("expected error %v, got none (%v)", *testcase.fork.Exception, err) + } + if common.Hash(*testcase.fork.Hash) != hash { + return fmt.Errorf("hash mismatch: got %x, want %x", hash, common.Hash(*testcase.fork.Hash)) + } + if common.Address(*testcase.fork.Sender) != sender { + return fmt.Errorf("sender mismatch: got %x, want %x", sender, testcase.fork.Sender) + } + if hash != common.Hash(*testcase.fork.Hash) { + return fmt.Errorf("hash mismatch: got %x, want %x", hash, testcase.fork.Hash) + } + if uint64(testcase.fork.IntrinsicGas) != gas { + return fmt.Errorf("intrinsic gas mismatch: got %d, want %d", gas, uint64(testcase.fork.IntrinsicGas)) } } return nil From 756cca7c6f1324a5acf6d39d94033a6a5c7ce4c4 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 6 Feb 2025 15:05:41 +0100 Subject: [PATCH 14/17] version: release go-ethereum v1.15.0 --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index 9153e0308a..b6dfc70b12 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 15 // Minor version component of the current release - Patch = 0 // Patch version component of the current release - Meta = "unstable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 15 // Minor version component of the current release + Patch = 0 // Patch version component of the current release + Meta = "stable" // Version metadata to append to the version string ) From d74c47f8dbaf5af43c1a58e932a6849089d3fdf3 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 6 Feb 2025 15:06:46 +0100 Subject: [PATCH 15/17] version: begin v1.15.1 release cycle --- version/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/version/version.go b/version/version.go index b6dfc70b12..f700f9e973 100644 --- a/version/version.go +++ b/version/version.go @@ -17,8 +17,8 @@ package version const ( - Major = 1 // Major version component of the current release - Minor = 15 // Minor version component of the current release - Patch = 0 // Patch version component of the current release - Meta = "stable" // Version metadata to append to the version string + Major = 1 // Major version component of the current release + Minor = 15 // Minor version component of the current release + Patch = 1 // Patch version component of the current release + Meta = "unstable" // Version metadata to append to the version string ) From d11e9c0b517364ae3a372af48f0b34f754e1dbc0 Mon Sep 17 00:00:00 2001 From: Marcin Sobczak <77129288+marcindsobczak@users.noreply.github.com> Date: Fri, 7 Feb 2025 02:59:12 +0100 Subject: [PATCH 16/17] cmd/devp2p/internal/ethtest: remove TD from status validation (#31137) After recent changes in Geth (removing TD): https://github.com/ethereum/go-ethereum/commit/39638c81c56db2b2dfe6f51999ffd3029ee212cb#diff-d70a44d4b7a0e84fe9dcca25d368f626ae6c9bc0b8fe9690074ba92d298bcc0d Non-Geth clients are failing many devp2p tests with an error: `peering failed: status exchange failed: wrong TD in status: have 1 want 0` Right now only Geth is passing it - all other clients are affected by this change. I think there should be no validation of TD when checking `Status` message in hive tests. Now Geth has 0 (and hive tests requires 0) and all other clients have actual TD. And on real networks there is no validation of TD when peering --- cmd/devp2p/internal/ethtest/conn.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/conn.go b/cmd/devp2p/internal/ethtest/conn.go index 757b137aa1..b555b14784 100644 --- a/cmd/devp2p/internal/ethtest/conn.go +++ b/cmd/devp2p/internal/ethtest/conn.go @@ -316,9 +316,6 @@ loop: return fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", want, chain.blocks[chain.Len()-1].NumberU64(), have) } - if have, want := msg.TD.Cmp(chain.TD()), 0; have != want { - return fmt.Errorf("wrong TD in status: have %v want %v", have, want) - } if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) } From 1846cc9ef48ce364da57839c83f2d54f92d8d389 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Fri, 7 Feb 2025 09:28:50 +0100 Subject: [PATCH 17/17] core/tracing: add InvalidEoF hook --- core/state_transition.go | 2 +- core/tracing/hooks.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/core/state_transition.go b/core/state_transition.go index ee5ec85a27..bbc94ed0a3 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -491,7 +491,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) { // gas for initcode execution is not consumed. // Only intrinsic creation transaction costs are charged. if errors.Is(vmerr, vm.ErrInvalidEOFInitcode) { - st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) + st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1, tracing.NonceChangeInvalidEOF) } } else { // Increment the nonce for the next transaction. diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 4002b57207..68bfb2fedc 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -367,4 +367,8 @@ const ( // NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure. // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal). NonceChangeRevert NonceChangeReason = 6 + + // NonceChangeInvalidEOF is emitted when the nonce is changed when a new contract is created, + // but the creation fails because of an EOF error. + NonceChangeInvalidEOF NonceChangeReason = 6 )