mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-03-03 10:03:47 +00:00
The total difficulty is the sum of all block difficulties from genesis to a certain block. This value was used in PoW for deciding which chain is heavier, and thus which chain to select. Since PoS has a different fork selection algorithm, all blocks since the merge have a difficulty of 0, and all total difficulties are the same for the past 2 years. Whilst the TDs are mostly useless nowadays, there was never really a reason to mess around removing them since they are so tiny. This reasoning changes when we go down the path of pruned chain history. In order to reconstruct any TD, we **must** retrieve all the headers from chain head to genesis and then iterate all the difficulties to compute the TD. In a world where we completely prune past chain segments (bodies, receipts, headers), it is not possible to reconstruct the TD at all. In a world where we still keep chain headers and prune only the rest, reconstructing it possible as long as we process (or download) the chain forward from genesis, but trying to snap sync the head first and backfill later hits the same issue, the TD becomes impossible to calculate until genesis is backfilled. All in all, the TD is a messy out-of-state, out-of-consensus computed field that is overall useless nowadays, but code relying on it forces the client into certain modes of operation and prevents other modes or other optimizations. This PR completely nukes out the TD from the node. It doesn't compute it, it doesn't operate on it, it's as if it didn't even exist. Caveats: - Whenever we have APIs that return TD (devp2p handshake, tracer, etc.) we return a TD of 0. - For era files, we recompute the TD during export time (fairly quick) to retain the format content. - It is not possible to "verify" the merge point (i.e. with TD gone, TTD is useless). Since we're not verifying PoW any more, just blindly trust it, not verifying but blindly trusting the many year old merge point seems just the same trust model. - Our tests still need to be able to generate pre and post merge blocks, so they need a new way to split the merge without TTD. The PR introduces a settable ttdBlock field on the consensus object which is used by tests as the block where originally the TTD happened. This is not needed for live nodes, we never want to generate old blocks. - One merge transition consensus test was disabled. With a non-operational TD, testing how the client reacts to TTD is useless, it cannot react. Questions: - Should we also drop total terminal difficulty from the genesis json? It's a number we cannot react on any more, so maybe it would be cleaner to get rid of even more concepts. --------- Co-authored-by: Gary Rong <garyrong0905@gmail.com>
378 lines
12 KiB
Go
378 lines
12 KiB
Go
// Copyright 2020 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package eth
|
|
|
|
import (
|
|
"math/rand"
|
|
|
|
mapset "github.com/deckarep/golang-set/v2"
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/ethereum/go-ethereum/p2p"
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
)
|
|
|
|
const (
|
|
// maxKnownTxs is the maximum transactions hashes to keep in the known list
|
|
// before starting to randomly evict them.
|
|
maxKnownTxs = 32768
|
|
|
|
// maxQueuedTxs is the maximum number of transactions to queue up before dropping
|
|
// older broadcasts.
|
|
maxQueuedTxs = 4096
|
|
|
|
// maxQueuedTxAnns is the maximum number of transaction announcements to queue up
|
|
// before dropping older announcements.
|
|
maxQueuedTxAnns = 4096
|
|
)
|
|
|
|
// Peer is a collection of relevant information we have about a `eth` peer.
|
|
type Peer struct {
|
|
id string // Unique ID for the peer, cached
|
|
|
|
*p2p.Peer // The embedded P2P package peer
|
|
rw p2p.MsgReadWriter // Input/output streams for snap
|
|
version uint // Protocol version negotiated
|
|
|
|
txpool TxPool // Transaction pool used by the broadcasters for liveness checks
|
|
knownTxs *knownCache // Set of transaction hashes known to be known by this peer
|
|
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
|
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
|
|
|
reqDispatch chan *request // Dispatch channel to send requests and track then until fulfillment
|
|
reqCancel chan *cancel // Dispatch channel to cancel pending requests and untrack them
|
|
resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them
|
|
|
|
term chan struct{} // Termination channel to stop the broadcasters
|
|
}
|
|
|
|
// NewPeer creates a wrapper for a network connection and negotiated protocol
|
|
// version.
|
|
func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer {
|
|
peer := &Peer{
|
|
id: p.ID().String(),
|
|
Peer: p,
|
|
rw: rw,
|
|
version: version,
|
|
knownTxs: newKnownCache(maxKnownTxs),
|
|
txBroadcast: make(chan []common.Hash),
|
|
txAnnounce: make(chan []common.Hash),
|
|
reqDispatch: make(chan *request),
|
|
reqCancel: make(chan *cancel),
|
|
resDispatch: make(chan *response),
|
|
txpool: txpool,
|
|
term: make(chan struct{}),
|
|
}
|
|
// Start up all the broadcasters
|
|
go peer.broadcastTransactions()
|
|
go peer.announceTransactions()
|
|
go peer.dispatcher()
|
|
|
|
return peer
|
|
}
|
|
|
|
// Close signals the broadcast goroutine to terminate. Only ever call this if
|
|
// you created the peer yourself via NewPeer. Otherwise let whoever created it
|
|
// clean it up!
|
|
func (p *Peer) Close() {
|
|
close(p.term)
|
|
}
|
|
|
|
// ID retrieves the peer's unique identifier.
|
|
func (p *Peer) ID() string {
|
|
return p.id
|
|
}
|
|
|
|
// Version retrieves the peer's negotiated `eth` protocol version.
|
|
func (p *Peer) Version() uint {
|
|
return p.version
|
|
}
|
|
|
|
// KnownTransaction returns whether peer is known to already have a transaction.
|
|
func (p *Peer) KnownTransaction(hash common.Hash) bool {
|
|
return p.knownTxs.Contains(hash)
|
|
}
|
|
|
|
// markTransaction marks a transaction as known for the peer, ensuring that it
|
|
// will never be propagated to this particular peer.
|
|
func (p *Peer) markTransaction(hash common.Hash) {
|
|
// If we reached the memory allowance, drop a previously known transaction hash
|
|
p.knownTxs.Add(hash)
|
|
}
|
|
|
|
// SendTransactions sends transactions to the peer and includes the hashes
|
|
// in its transaction hash set for future reference.
|
|
//
|
|
// This method is a helper used by the async transaction sender. Don't call it
|
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
|
// not be managed directly.
|
|
//
|
|
// The reasons this is public is to allow packages using this protocol to write
|
|
// tests that directly send messages without having to do the async queueing.
|
|
func (p *Peer) SendTransactions(txs types.Transactions) error {
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
for _, tx := range txs {
|
|
p.knownTxs.Add(tx.Hash())
|
|
}
|
|
return p2p.Send(p.rw, TransactionsMsg, txs)
|
|
}
|
|
|
|
// AsyncSendTransactions queues a list of transactions (by hash) to eventually
|
|
// propagate to a remote peer. The number of pending sends are capped (new ones
|
|
// will force old sends to be dropped)
|
|
func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
|
|
select {
|
|
case p.txBroadcast <- hashes:
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
p.knownTxs.Add(hashes...)
|
|
case <-p.term:
|
|
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
|
|
}
|
|
}
|
|
|
|
// sendPooledTransactionHashes sends transaction hashes (tagged with their type
|
|
// and size) to the peer and includes them in its transaction hash set for future
|
|
// reference.
|
|
//
|
|
// This method is a helper used by the async transaction announcer. Don't call it
|
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
|
// not be managed directly.
|
|
func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash, types []byte, sizes []uint32) error {
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
p.knownTxs.Add(hashes...)
|
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket{Types: types, Sizes: sizes, Hashes: hashes})
|
|
}
|
|
|
|
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
|
// announce to a remote peer. The number of pending sends are capped (new ones
|
|
// will force old sends to be dropped)
|
|
func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
|
|
select {
|
|
case p.txAnnounce <- hashes:
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
p.knownTxs.Add(hashes...)
|
|
case <-p.term:
|
|
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
|
|
}
|
|
}
|
|
|
|
// ReplyPooledTransactionsRLP is the response to RequestTxs.
|
|
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
|
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
p.knownTxs.Add(hashes...)
|
|
|
|
// Not packed into PooledTransactionsResponse to avoid RLP decoding
|
|
return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{
|
|
RequestId: id,
|
|
PooledTransactionsRLPResponse: txs,
|
|
})
|
|
}
|
|
|
|
// ReplyBlockHeadersRLP is the response to GetBlockHeaders.
|
|
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
|
|
return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{
|
|
RequestId: id,
|
|
BlockHeadersRLPResponse: headers,
|
|
})
|
|
}
|
|
|
|
// ReplyBlockBodiesRLP is the response to GetBlockBodies.
|
|
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
|
|
// Not packed into BlockBodiesResponse to avoid RLP decoding
|
|
return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{
|
|
RequestId: id,
|
|
BlockBodiesRLPResponse: bodies,
|
|
})
|
|
}
|
|
|
|
// ReplyReceiptsRLP is the response to GetReceipts.
|
|
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
|
|
return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{
|
|
RequestId: id,
|
|
ReceiptsRLPResponse: receipts,
|
|
})
|
|
}
|
|
|
|
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
|
// single header. It is used solely by the fetcher.
|
|
func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request, error) {
|
|
p.Log().Debug("Fetching single header", "hash", hash)
|
|
id := rand.Uint64()
|
|
|
|
req := &Request{
|
|
id: id,
|
|
sink: sink,
|
|
code: GetBlockHeadersMsg,
|
|
want: BlockHeadersMsg,
|
|
data: &GetBlockHeadersPacket{
|
|
RequestId: id,
|
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
|
Origin: HashOrNumber{Hash: hash},
|
|
Amount: uint64(1),
|
|
Skip: uint64(0),
|
|
Reverse: false,
|
|
},
|
|
},
|
|
}
|
|
if err := p.dispatchRequest(req); err != nil {
|
|
return nil, err
|
|
}
|
|
return req, nil
|
|
}
|
|
|
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
|
// specified header query, based on the hash of an origin block.
|
|
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) {
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
|
id := rand.Uint64()
|
|
|
|
req := &Request{
|
|
id: id,
|
|
sink: sink,
|
|
code: GetBlockHeadersMsg,
|
|
want: BlockHeadersMsg,
|
|
data: &GetBlockHeadersPacket{
|
|
RequestId: id,
|
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
|
Origin: HashOrNumber{Hash: origin},
|
|
Amount: uint64(amount),
|
|
Skip: uint64(skip),
|
|
Reverse: reverse,
|
|
},
|
|
},
|
|
}
|
|
if err := p.dispatchRequest(req); err != nil {
|
|
return nil, err
|
|
}
|
|
return req, nil
|
|
}
|
|
|
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
|
// specified header query, based on the number of an origin block.
|
|
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) {
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
|
id := rand.Uint64()
|
|
|
|
req := &Request{
|
|
id: id,
|
|
sink: sink,
|
|
code: GetBlockHeadersMsg,
|
|
want: BlockHeadersMsg,
|
|
data: &GetBlockHeadersPacket{
|
|
RequestId: id,
|
|
GetBlockHeadersRequest: &GetBlockHeadersRequest{
|
|
Origin: HashOrNumber{Number: origin},
|
|
Amount: uint64(amount),
|
|
Skip: uint64(skip),
|
|
Reverse: reverse,
|
|
},
|
|
},
|
|
}
|
|
if err := p.dispatchRequest(req); err != nil {
|
|
return nil, err
|
|
}
|
|
return req, nil
|
|
}
|
|
|
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
|
// specified.
|
|
func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
|
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
|
id := rand.Uint64()
|
|
|
|
req := &Request{
|
|
id: id,
|
|
sink: sink,
|
|
code: GetBlockBodiesMsg,
|
|
want: BlockBodiesMsg,
|
|
data: &GetBlockBodiesPacket{
|
|
RequestId: id,
|
|
GetBlockBodiesRequest: hashes,
|
|
},
|
|
}
|
|
if err := p.dispatchRequest(req); err != nil {
|
|
return nil, err
|
|
}
|
|
return req, nil
|
|
}
|
|
|
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
|
func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
|
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
|
id := rand.Uint64()
|
|
|
|
req := &Request{
|
|
id: id,
|
|
sink: sink,
|
|
code: GetReceiptsMsg,
|
|
want: ReceiptsMsg,
|
|
data: &GetReceiptsPacket{
|
|
RequestId: id,
|
|
GetReceiptsRequest: hashes,
|
|
},
|
|
}
|
|
if err := p.dispatchRequest(req); err != nil {
|
|
return nil, err
|
|
}
|
|
return req, nil
|
|
}
|
|
|
|
// RequestTxs fetches a batch of transactions from a remote node.
|
|
func (p *Peer) RequestTxs(hashes []common.Hash) error {
|
|
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
|
|
id := rand.Uint64()
|
|
|
|
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
|
|
return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{
|
|
RequestId: id,
|
|
GetPooledTransactionsRequest: hashes,
|
|
})
|
|
}
|
|
|
|
// knownCache is a cache for known hashes.
|
|
type knownCache struct {
|
|
hashes mapset.Set[common.Hash]
|
|
max int
|
|
}
|
|
|
|
// newKnownCache creates a new knownCache with a max capacity.
|
|
func newKnownCache(max int) *knownCache {
|
|
return &knownCache{
|
|
max: max,
|
|
hashes: mapset.NewSet[common.Hash](),
|
|
}
|
|
}
|
|
|
|
// Add adds a list of elements to the set.
|
|
func (k *knownCache) Add(hashes ...common.Hash) {
|
|
for k.hashes.Cardinality() > max(0, k.max-len(hashes)) {
|
|
k.hashes.Pop()
|
|
}
|
|
for _, hash := range hashes {
|
|
k.hashes.Add(hash)
|
|
}
|
|
}
|
|
|
|
// Contains returns whether the given item is in the set.
|
|
func (k *knownCache) Contains(hash common.Hash) bool {
|
|
return k.hashes.Contains(hash)
|
|
}
|
|
|
|
// Cardinality returns the number of elements in the set.
|
|
func (k *knownCache) Cardinality() int {
|
|
return k.hashes.Cardinality()
|
|
}
|