1
0
Fork 0
forked from forks/go-ethereum

cmd/workload: fixed filter test request error handling (#31424)

This PR fixes the broken request error handling of the workload filter
tests. Until now `validateHistoryPruneErr` was invoked with `fq.Err` as
an input which was always nil and a timeout or http error was reported
as a result content mismatch.
Also, in case of `errPrunedHistory` it is wrong to return here without
setting an error because then it will look like a valid empty result and
the check will later fail. So instead `errPrunedHistory` is always
returned now (without printing an error message) and the callers of
`run` should handle this special case (typically ignore silently).
This commit is contained in:
Felföldi Zsolt 2025-03-20 09:23:10 +01:00 committed by GitHub
parent 8e3cd41b04
commit 03cc2942c2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 49 additions and 35 deletions

View file

@ -109,6 +109,9 @@ func (s *filterTestSuite) filterFullRange(t *utesting.T) {
func (s *filterTestSuite) queryAndCheck(t *utesting.T, query *filterQuery) {
query.run(s.cfg.client, s.cfg.historyPruneBlock)
if query.Err == errPrunedHistory {
return
}
if query.Err != nil {
t.Errorf("Filter query failed (fromBlock: %d toBlock: %d addresses: %v topics: %v error: %v)", query.FromBlock, query.ToBlock, query.Address, query.Topics, query.Err)
return
@ -126,6 +129,9 @@ func (s *filterTestSuite) fullRangeQueryAndCheck(t *utesting.T, query *filterQue
Topics: query.Topics,
}
frQuery.run(s.cfg.client, s.cfg.historyPruneBlock)
if frQuery.Err == errPrunedHistory {
return
}
if frQuery.Err != nil {
t.Errorf("Full range filter query failed (addresses: %v topics: %v error: %v)", frQuery.Address, frQuery.Topics, frQuery.Err)
return
@ -206,14 +212,11 @@ func (fq *filterQuery) run(client *client, historyPruneBlock *uint64) {
Addresses: fq.Address,
Topics: fq.Topics,
})
if err != nil {
if err = validateHistoryPruneErr(fq.Err, uint64(fq.FromBlock), historyPruneBlock); err == errPrunedHistory {
return
} else if err != nil {
fmt.Printf("Filter query failed: fromBlock: %d toBlock: %d addresses: %v topics: %v error: %v\n",
fq.FromBlock, fq.ToBlock, fq.Address, fq.Topics, err)
}
fq.Err = err
}
fq.results = logs
fq.Err = validateHistoryPruneErr(err, uint64(fq.FromBlock), historyPruneBlock)
}
func (fq *filterQuery) printError() {
fmt.Printf("Filter query failed: fromBlock: %d toBlock: %d addresses: %v topics: %v error: %v\n",
fq.FromBlock, fq.ToBlock, fq.Address, fq.Topics, fq.Err)
}

View file

@ -40,7 +40,6 @@ var (
Action: filterGenCmd,
Flags: []cli.Flag{
filterQueryFileFlag,
filterErrorFileFlag,
},
}
filterQueryFileFlag = &cli.StringFlag{
@ -72,8 +71,8 @@ func filterGenCmd(ctx *cli.Context) error {
query := f.newQuery()
query.run(f.client, nil)
if query.Err != nil {
f.errors = append(f.errors, query)
continue
query.printError()
exit("filter query failed")
}
if len(query.results) > 0 && len(query.results) <= maxFilterResultSize {
for {
@ -90,8 +89,8 @@ func filterGenCmd(ctx *cli.Context) error {
)
}
if extQuery.Err != nil {
f.errors = append(f.errors, extQuery)
break
extQuery.printError()
exit("filter query failed")
}
if len(extQuery.results) > maxFilterResultSize {
break
@ -101,7 +100,6 @@ func filterGenCmd(ctx *cli.Context) error {
f.storeQuery(query)
if time.Since(lastWrite) > time.Second*10 {
f.writeQueries()
f.writeErrors()
lastWrite = time.Now()
}
}
@ -112,18 +110,15 @@ func filterGenCmd(ctx *cli.Context) error {
type filterTestGen struct {
client *client
queryFile string
errorFile string
finalizedBlock int64
queries [filterBuckets][]*filterQuery
errors []*filterQuery
}
func newFilterTestGen(ctx *cli.Context) *filterTestGen {
return &filterTestGen{
client: makeClient(ctx),
queryFile: ctx.String(filterQueryFileFlag.Name),
errorFile: ctx.String(filterErrorFileFlag.Name),
}
}
@ -360,17 +355,6 @@ func (s *filterTestGen) writeQueries() {
file.Close()
}
// writeQueries serializes the generated errors to the error file.
func (s *filterTestGen) writeErrors() {
file, err := os.Create(s.errorFile)
if err != nil {
exit(fmt.Errorf("Error creating filter error file %s: %v", s.errorFile, err))
return
}
defer file.Close()
json.NewEncoder(file).Encode(s.errors)
}
func mustGetFinalizedBlock(client *client) int64 {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()

View file

@ -17,8 +17,10 @@
package main
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"slices"
"sort"
"time"
@ -41,7 +43,7 @@ var (
}
)
const passCount = 1
const passCount = 3
func filterPerfCmd(ctx *cli.Context) error {
cfg := testConfigFromCLI(ctx)
@ -61,7 +63,10 @@ func filterPerfCmd(ctx *cli.Context) error {
}
// Run test queries.
var failed, mismatch int
var (
failed, pruned, mismatch int
errors []*filterQuery
)
for i := 1; i <= passCount; i++ {
fmt.Println("Performance test pass", i, "/", passCount)
for len(queries) > 0 {
@ -71,27 +76,35 @@ func filterPerfCmd(ctx *cli.Context) error {
queries = queries[:len(queries)-1]
start := time.Now()
qt.query.run(cfg.client, cfg.historyPruneBlock)
if qt.query.Err == errPrunedHistory {
pruned++
continue
}
qt.runtime = append(qt.runtime, time.Since(start))
slices.Sort(qt.runtime)
qt.medianTime = qt.runtime[len(qt.runtime)/2]
if qt.query.Err != nil {
qt.query.printError()
errors = append(errors, qt.query)
failed++
continue
}
if rhash := qt.query.calculateHash(); *qt.query.ResultHash != rhash {
fmt.Printf("Filter query result mismatch: fromBlock: %d toBlock: %d addresses: %v topics: %v expected hash: %064x calculated hash: %064x\n", qt.query.FromBlock, qt.query.ToBlock, qt.query.Address, qt.query.Topics, *qt.query.ResultHash, rhash)
errors = append(errors, qt.query)
mismatch++
continue
}
processed = append(processed, qt)
if len(processed)%50 == 0 {
fmt.Println(" processed:", len(processed), "remaining", len(queries), "failed:", failed, "result mismatch:", mismatch)
fmt.Println(" processed:", len(processed), "remaining", len(queries), "failed:", failed, "pruned:", pruned, "result mismatch:", mismatch)
}
}
queries, processed = processed, nil
}
// Show results and stats.
fmt.Println("Performance test finished; processed:", len(queries), "failed:", failed, "result mismatch:", mismatch)
fmt.Println("Performance test finished; processed:", len(queries), "failed:", failed, "pruned:", pruned, "result mismatch:", mismatch)
stats := make([]bucketStats, len(f.queries))
var wildcardStats bucketStats
for _, qt := range queries {
@ -114,11 +127,14 @@ func filterPerfCmd(ctx *cli.Context) error {
sort.Slice(queries, func(i, j int) bool {
return queries[i].medianTime > queries[j].medianTime
})
for i := 0; i < 10; i++ {
q := queries[i]
for i, q := range queries {
if i >= 10 {
break
}
fmt.Printf("Most expensive query #%-2d median runtime: %13v max runtime: %13v result count: %4d fromBlock: %9d toBlock: %9d addresses: %v topics: %v\n",
i+1, q.medianTime, q.runtime[len(q.runtime)-1], len(q.query.results), q.query.FromBlock, q.query.ToBlock, q.query.Address, q.query.Topics)
}
writeErrors(ctx.String(filterErrorFileFlag.Name), errors)
return nil
}
@ -135,3 +151,14 @@ func (st *bucketStats) print(name string) {
fmt.Printf("%-20s queries: %4d average block length: %12.2f average log count: %7.2f average runtime: %13v\n",
name, st.count, float64(st.blocks)/float64(st.count), float64(st.logs)/float64(st.count), st.runtime/time.Duration(st.count))
}
// writeQueries serializes the generated errors to the error file.
func writeErrors(errorFile string, errors []*filterQuery) {
file, err := os.Create(errorFile)
if err != nil {
exit(fmt.Errorf("Error creating filter error file %s: %v", errorFile, err))
return
}
defer file.Close()
json.NewEncoder(file).Encode(errors)
}