eth/catalyst: implement getBlobsV3 (#33404)
Some checks are pending
/ Linux Build (arm) (push) Waiting to run
/ Keeper Build (push) Waiting to run
/ Linux Build (push) Waiting to run
/ Windows Build (push) Waiting to run
/ Docker Image (push) Waiting to run

This is used by cell-level dissemination (aka partial messages) to give
the CL all blobs the EL knows about and let CL communicate efficiently
about any other missing blobs. In other words, partial responses from
the EL is useful now.

See the related (closed) PR:
https://github.com/ethereum/execution-apis/pull/674 and the new PR:
https://github.com/ethereum/execution-apis/pull/719
This commit is contained in:
Marco Munizaga 2025-12-30 17:48:50 -08:00 committed by GitHub
parent 25439aac04
commit b2843a11d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 67 additions and 30 deletions

View file

@ -553,6 +553,23 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
if api.config().LatestFork(head.Time) < forks.Osaka { if api.config().LatestFork(head.Time) < forks.Osaka {
return nil, nil return nil, nil
} }
return api.getBlobs(hashes, true)
}
// GetBlobsV3 returns a set of blobs from the transaction pool. Same as
// GetBlobsV2, except will return partial responses in case there is a missing
// blob.
func (api *ConsensusAPI) GetBlobsV3(hashes []common.Hash) ([]*engine.BlobAndProofV2, error) {
head := api.eth.BlockChain().CurrentHeader()
if api.config().LatestFork(head.Time) < forks.Osaka {
return nil, nil
}
return api.getBlobs(hashes, false)
}
// getBlobs returns all available blobs. In v2, partial responses are not allowed,
// while v3 supports partial responses.
func (api *ConsensusAPI) getBlobs(hashes []common.Hash, v2 bool) ([]*engine.BlobAndProofV2, error) {
if len(hashes) > 128 { if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes))) return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
} }
@ -560,28 +577,30 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
getBlobsRequestedCounter.Inc(int64(len(hashes))) getBlobsRequestedCounter.Inc(int64(len(hashes)))
getBlobsAvailableCounter.Inc(int64(available)) getBlobsAvailableCounter.Inc(int64(available))
// Optimization: check first if all blobs are available, if not, return empty response // Short circuit if partial response is not allowed
if available != len(hashes) { if v2 && available != len(hashes) {
getBlobsV2RequestMiss.Inc(1) getBlobsRequestMiss.Inc(1)
return nil, nil return nil, nil
} }
// Retrieve blobs from the pool. This operation is expensive and may involve
// heavy disk I/O.
blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1) blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1)
if err != nil { if err != nil {
return nil, engine.InvalidParams.With(err) return nil, engine.InvalidParams.With(err)
} }
// Validate the blobs from the pool and assemble the response
// To comply with API spec, check again that we really got all data needed
for _, blob := range blobs {
if blob == nil {
getBlobsV2RequestMiss.Inc(1)
return nil, nil
}
}
getBlobsV2RequestHit.Inc(1)
res := make([]*engine.BlobAndProofV2, len(hashes)) res := make([]*engine.BlobAndProofV2, len(hashes))
for i := 0; i < len(blobs); i++ { for i := range blobs {
// The blob has been evicted since the last AvailableBlobs call.
// Return null if partial response is not allowed.
if blobs[i] == nil {
if !v2 {
continue
} else {
getBlobsRequestMiss.Inc(1)
return nil, nil
}
}
var cellProofs []hexutil.Bytes var cellProofs []hexutil.Bytes
for _, proof := range proofs[i] { for _, proof := range proofs[i] {
cellProofs = append(cellProofs, proof[:]) cellProofs = append(cellProofs, proof[:])
@ -591,6 +610,13 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
CellProofs: cellProofs, CellProofs: cellProofs,
} }
} }
if len(res) == len(hashes) {
getBlobsRequestCompleteHit.Inc(1)
} else if len(res) > 0 {
getBlobsRequestPartialHit.Inc(1)
} else {
getBlobsRequestMiss.Inc(1)
}
return res, nil return res, nil
} }

View file

@ -2016,7 +2016,7 @@ func TestGetBlobsV1AfterOsakaFork(t *testing.T) {
} }
} }
func TestGetBlobsV2(t *testing.T) { func TestGetBlobsV2And3(t *testing.T) {
n, api := newGetBlobEnv(t, 1) n, api := newGetBlobEnv(t, 1)
defer n.Close() defer n.Close()
@ -2045,7 +2045,8 @@ func TestGetBlobsV2(t *testing.T) {
}, },
} }
for i, suite := range suites { for i, suite := range suites {
runGetBlobsV2(t, api, suite.start, suite.limit, suite.fillRandom, fmt.Sprintf("suite=%d", i)) runGetBlobs(t, api.GetBlobsV2, suite.start, suite.limit, suite.fillRandom, false, fmt.Sprintf("GetBlobsV2 suite=%d", i))
runGetBlobs(t, api.GetBlobsV3, suite.start, suite.limit, suite.fillRandom, true, fmt.Sprintf("GetBlobsV3 suite=%d %v", i, suite))
} }
} }
@ -2060,22 +2061,20 @@ func BenchmarkGetBlobsV2(b *testing.B) {
name := fmt.Sprintf("blobs=%d", blobs) name := fmt.Sprintf("blobs=%d", blobs)
b.Run(name, func(b *testing.B) { b.Run(name, func(b *testing.B) {
for b.Loop() { for b.Loop() {
runGetBlobsV2(b, api, 0, blobs, false, name) runGetBlobs(b, api.GetBlobsV2, 0, blobs, false, false, name)
} }
}) })
} }
} }
func runGetBlobsV2(t testing.TB, api *ConsensusAPI, start, limit int, fillRandom bool, name string) { type getBlobsFn func(hashes []common.Hash) ([]*engine.BlobAndProofV2, error)
func runGetBlobs(t testing.TB, getBlobs getBlobsFn, start, limit int, fillRandom bool, expectPartialResponse bool, name string) {
// Fill the request for retrieving blobs // Fill the request for retrieving blobs
var ( var (
vhashes []common.Hash vhashes []common.Hash
expect []*engine.BlobAndProofV2 expect []*engine.BlobAndProofV2
) )
// fill missing blob
if fillRandom {
vhashes = append(vhashes, testrand.Hash())
}
for j := start; j < limit; j++ { for j := start; j < limit; j++ {
vhashes = append(vhashes, testBlobVHashes[j]) vhashes = append(vhashes, testBlobVHashes[j])
var cellProofs []hexutil.Bytes var cellProofs []hexutil.Bytes
@ -2087,13 +2086,21 @@ func runGetBlobsV2(t testing.TB, api *ConsensusAPI, start, limit int, fillRandom
CellProofs: cellProofs, CellProofs: cellProofs,
}) })
} }
result, err := api.GetBlobsV2(vhashes) // fill missing blob
if fillRandom {
vhashes = append(vhashes, testrand.Hash())
}
result, err := getBlobs(vhashes)
if err != nil { if err != nil {
t.Errorf("Unexpected error for case %s, %v", name, err) t.Errorf("Unexpected error for case %s, %v", name, err)
} }
// null is responded if any blob is missing
if fillRandom { if fillRandom {
expect = nil if expectPartialResponse {
expect = append(expect, nil)
} else {
// Nil is expected if getBlobs can not return a partial response
expect = nil
}
} }
if !reflect.DeepEqual(result, expect) { if !reflect.DeepEqual(result, expect) {
t.Fatalf("Unexpected result for case %s", name) t.Fatalf("Unexpected result for case %s", name)

View file

@ -25,9 +25,13 @@ var (
// Number of blobs requested via getBlobsV2 that are present in the blobpool // Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil) getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
// Number of times getBlobsV2 responded with “hit” // Number of times getBlobsV2/V3 responded with all blobs
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil) getBlobsRequestCompleteHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
// Number of times getBlobsV2 responded with “miss” // Number of times getBlobsV2/V3 responded with no blobs. V2 will return no
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil) // blobs if it doesn't have all the blobs (all or nothing).
getBlobsRequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
// Number of times getBlobsV3 responded with some, but not all, blobs
getBlobsRequestPartialHit = metrics.NewRegisteredCounter("engine/getblobs/partial", nil)
) )