eth/filters, cmd: add config of eth_getLogs address limit (#32327)

Add cli configurable limit for the number of addresses allowed in
eth_getLogs filter criteria:
https://github.com/ethereum/go-ethereum/issues/32264
 
 Key changes:
- Added --rpc.getlogmaxaddrs CLI flag (default: 1000) to configure the
maximum number of addresses
- Updated ethconfig.Config with FilterMaxAddresses field for
configuration management
- Modified filter system to use the configurable limit instead of the
hardcoded maxAddresses constant
- Enhanced test coverage with new test cases for address limit
validation
- Removed hardcoded validation from JSON unmarshaling, moving it to
runtime validation

Please notice that I remove the check at FilterCriteria UnmarshalJSON
because the runtime config can not pass into this validation.

Please help review this change!

---------

Co-authored-by: zsfelfoldi <zsfelfoldi@gmail.com>
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
This commit is contained in:
Long Vu 2025-09-19 05:53:40 +07:00 committed by GitHub
parent 2a82964727
commit dce511c1e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 148 additions and 39 deletions

View file

@ -182,6 +182,7 @@ var (
utils.RPCGlobalGasCapFlag,
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag,
utils.RPCGlobalLogQueryLimit,
utils.AllowUnprotectedTxs,
utils.BatchRequestLimit,
utils.BatchResponseMaxSize,

View file

@ -600,6 +600,12 @@ var (
Value: ethconfig.Defaults.RPCTxFeeCap,
Category: flags.APICategory,
}
RPCGlobalLogQueryLimit = &cli.IntFlag{
Name: "rpc.logquerylimit",
Usage: "Maximum number of alternative addresses or topics allowed per search position in eth_getLogs filter criteria (0 = no cap)",
Value: ethconfig.Defaults.LogQueryLimit,
Category: flags.APICategory,
}
// Authenticated RPC HTTP settings
AuthListenFlag = &cli.StringFlag{
Name: "authrpc.addr",
@ -1699,6 +1705,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheLogSizeFlag.Name) {
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
}
if ctx.IsSet(RPCGlobalLogQueryLimit.Name) {
cfg.LogQueryLimit = ctx.Int(RPCGlobalLogQueryLimit.Name)
}
if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 {
// If snap-sync is requested, this flag is also required
if cfg.SyncMode == ethconfig.SnapSync {
@ -2017,7 +2026,8 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
LogCacheSize: ethcfg.FilterLogCacheSize,
LogQueryLimit: ethcfg.LogQueryLimit,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",

View file

@ -62,6 +62,7 @@ var Defaults = Config{
TrieTimeout: 60 * time.Minute,
SnapshotCache: 102,
FilterLogCacheSize: 32,
LogQueryLimit: 1000,
Miner: miner.DefaultConfig,
TxPool: legacypool.DefaultConfig,
BlobPool: blobpool.DefaultConfig,
@ -131,6 +132,10 @@ type Config struct {
// This is the number of blocks for which logs will be cached in the filter system.
FilterLogCacheSize int
// This is the maximum number of addresses or topics allowed in filter criteria
// for eth_getLogs.
LogQueryLimit int
// Mining options
Miner miner.Config

View file

@ -44,6 +44,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
SnapshotCache int
Preimages bool
FilterLogCacheSize int
LogQueryLimit int
Miner miner.Config
TxPool legacypool.Config
BlobPool blobpool.Config
@ -88,6 +89,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.SnapshotCache = c.SnapshotCache
enc.Preimages = c.Preimages
enc.FilterLogCacheSize = c.FilterLogCacheSize
enc.LogQueryLimit = c.LogQueryLimit
enc.Miner = c.Miner
enc.TxPool = c.TxPool
enc.BlobPool = c.BlobPool
@ -136,6 +138,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
SnapshotCache *int
Preimages *bool
FilterLogCacheSize *int
LogQueryLimit *int
Miner *miner.Config
TxPool *legacypool.Config
BlobPool *blobpool.Config
@ -237,6 +240,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.FilterLogCacheSize != nil {
c.FilterLogCacheSize = *dec.FilterLogCacheSize
}
if dec.LogQueryLimit != nil {
c.LogQueryLimit = *dec.LogQueryLimit
}
if dec.Miner != nil {
c.Miner = *dec.Miner
}

View file

@ -42,12 +42,10 @@ var (
errBlockHashWithRange = errors.New("can't specify fromBlock/toBlock with blockHash")
errPendingLogsUnsupported = errors.New("pending logs are not supported")
errExceedMaxTopics = errors.New("exceed max topics")
errExceedMaxAddresses = errors.New("exceed max addresses")
errExceedLogQueryLimit = errors.New("exceed max addresses or topics per search position")
)
const (
// The maximum number of addresses allowed in a filter criteria
maxAddresses = 1000
// The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0
maxTopics = 4
// The maximum number of allowed topics within a topic criteria
@ -70,20 +68,22 @@ type filter struct {
// FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such as blocks, transactions and logs.
type FilterAPI struct {
sys *FilterSystem
events *EventSystem
filtersMu sync.Mutex
filters map[rpc.ID]*filter
timeout time.Duration
sys *FilterSystem
events *EventSystem
filtersMu sync.Mutex
filters map[rpc.ID]*filter
timeout time.Duration
logQueryLimit int
}
// NewFilterAPI returns a new FilterAPI instance.
func NewFilterAPI(system *FilterSystem) *FilterAPI {
api := &FilterAPI{
sys: system,
events: NewEventSystem(system),
filters: make(map[rpc.ID]*filter),
timeout: system.cfg.Timeout,
sys: system,
events: NewEventSystem(system),
filters: make(map[rpc.ID]*filter),
timeout: system.cfg.Timeout,
logQueryLimit: system.cfg.LogQueryLimit,
}
go api.timeoutLoop(system.cfg.Timeout)
@ -347,8 +347,15 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type
if len(crit.Topics) > maxTopics {
return nil, errExceedMaxTopics
}
if len(crit.Addresses) > maxAddresses {
return nil, errExceedMaxAddresses
if api.logQueryLimit != 0 {
if len(crit.Addresses) > api.logQueryLimit {
return nil, errExceedLogQueryLimit
}
for _, topics := range crit.Topics {
if len(topics) > api.logQueryLimit {
return nil, errExceedLogQueryLimit
}
}
}
var filter *Filter
@ -545,9 +552,6 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
// raw.Address can contain a single address or an array of addresses
switch rawAddr := raw.Addresses.(type) {
case []interface{}:
if len(rawAddr) > maxAddresses {
return errExceedMaxAddresses
}
for i, addr := range rawAddr {
if strAddr, ok := addr.(string); ok {
addr, err := decodeAddress(strAddr)

View file

@ -19,7 +19,6 @@ package filters
import (
"encoding/json"
"fmt"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
@ -183,15 +182,4 @@ func TestUnmarshalJSONNewFilterArgs(t *testing.T) {
if len(test7.Topics[2]) != 0 {
t.Fatalf("expected 0 topics, got %d topics", len(test7.Topics[2]))
}
// multiple address exceeding max
var test8 FilterCriteria
addresses := make([]string, maxAddresses+1)
for i := 0; i < maxAddresses+1; i++ {
addresses[i] = fmt.Sprintf(`"%s"`, common.HexToAddress(fmt.Sprintf("0x%x", i)).Hex())
}
vector = fmt.Sprintf(`{"address": [%s]}`, strings.Join(addresses, ", "))
if err := json.Unmarshal([]byte(vector), &test8); err != errExceedMaxAddresses {
t.Fatal("expected errExceedMaxAddresses, got", err)
}
}

View file

@ -41,8 +41,9 @@ import (
// Config represents the configuration of the filter system.
type Config struct {
LogCacheSize int // maximum number of cached blocks (default: 32)
Timeout time.Duration // how long filters stay active (default: 5min)
LogCacheSize int // maximum number of cached blocks (default: 32)
Timeout time.Duration // how long filters stay active (default: 5min)
LogQueryLimit int // maximum number of addresses allowed in filter criteria (default: 1000)
}
func (cfg Config) withDefaults() Config {
@ -291,8 +292,15 @@ func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*typ
if len(crit.Topics) > maxTopics {
return nil, errExceedMaxTopics
}
if len(crit.Addresses) > maxAddresses {
return nil, errExceedMaxAddresses
if es.sys.cfg.LogQueryLimit != 0 {
if len(crit.Addresses) > es.sys.cfg.LogQueryLimit {
return nil, errExceedLogQueryLimit
}
for _, topics := range crit.Topics {
if len(topics) > es.sys.cfg.LogQueryLimit {
return nil, errExceedLogQueryLimit
}
}
}
var from, to rpc.BlockNumber
if crit.FromBlock == nil {

View file

@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/triedb"
)
type testBackend struct {
@ -424,7 +425,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
_, sys = newTestFilterSystem(db, Config{})
_, sys = newTestFilterSystem(db, Config{LogQueryLimit: 1000})
api = NewFilterAPI(sys)
)
@ -435,7 +436,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
3: {Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
4: {Addresses: make([]common.Address, maxAddresses+1)},
4: {Addresses: make([]common.Address, api.logQueryLimit+1)},
}
for i, test := range testCases {
@ -455,7 +456,7 @@ func TestInvalidGetLogsRequest(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
}
db, blocks, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {})
_, sys = newTestFilterSystem(db, Config{})
_, sys = newTestFilterSystem(db, Config{LogQueryLimit: 10})
api = NewFilterAPI(sys)
blockHash = blocks[0].Hash()
unknownBlockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
@ -500,8 +501,8 @@ func TestInvalidGetLogsRequest(t *testing.T) {
err: errExceedMaxTopics,
},
{
f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, maxAddresses+1)},
err: errExceedMaxAddresses,
f: FilterCriteria{BlockHash: &blockHash, Addresses: make([]common.Address, api.logQueryLimit+1)},
err: errExceedLogQueryLimit,
},
}
@ -528,6 +529,92 @@ func TestInvalidGetRangeLogsRequest(t *testing.T) {
}
}
// TestExceedLogQueryLimit tests getLogs with too many addresses or topics
func TestExceedLogQueryLimit(t *testing.T) {
t.Parallel()
// Test with custom config (LogQueryLimit = 5 for easier testing)
var (
db = rawdb.NewMemoryDatabase()
backend, sys = newTestFilterSystem(db, Config{LogQueryLimit: 5})
api = NewFilterAPI(sys)
gspec = &core.Genesis{
Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{},
BaseFee: big.NewInt(params.InitialBaseFee),
}
)
_, err := gspec.Commit(db, triedb.NewDatabase(db, nil))
if err != nil {
t.Fatal(err)
}
chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {})
options := core.DefaultConfig().WithStateScheme(rawdb.HashScheme)
options.TxLookupLimit = 0 // index all txs
bc, err := core.NewBlockChain(db, gspec, ethash.NewFaker(), options)
if err != nil {
t.Fatal(err)
}
_, err = bc.InsertChain(chain[:600])
if err != nil {
t.Fatal(err)
}
backend.startFilterMaps(200, false, filtermaps.RangeTestParams)
defer backend.stopFilterMaps()
addresses := make([]common.Address, 6)
for i := range addresses {
addresses[i] = common.HexToAddress("0x1234567890123456789012345678901234567890")
}
topics := make([]common.Hash, 6)
for i := range topics {
topics[i] = common.HexToHash("0x123456789012345678901234567890123456789001234567890012345678901234")
}
// Test that 5 addresses do not result in error
// Add FromBlock and ToBlock to make it similar to other invalid tests
if _, err := api.GetLogs(context.Background(), FilterCriteria{
FromBlock: big.NewInt(0),
ToBlock: big.NewInt(100),
Addresses: addresses[:5],
}); err != nil {
t.Errorf("Expected GetLogs with 5 addresses to return with no error, got: %v", err)
}
// Test that 6 addresses fails with correct error
if _, err := api.GetLogs(context.Background(), FilterCriteria{
FromBlock: big.NewInt(0),
ToBlock: big.NewInt(100),
Addresses: addresses,
}); err != errExceedLogQueryLimit {
t.Errorf("Expected GetLogs with 6 addresses to return errExceedLogQueryLimit, got: %v", err)
}
// Test that 5 topics at one position do not result in error
if _, err := api.GetLogs(context.Background(), FilterCriteria{
FromBlock: big.NewInt(0),
ToBlock: big.NewInt(100),
Addresses: addresses[:1],
Topics: [][]common.Hash{topics[:5]},
}); err != nil {
t.Errorf("Expected GetLogs with 5 topics at one position to return with no error, got: %v", err)
}
// Test that 6 topics at one position fails with correct error
if _, err := api.GetLogs(context.Background(), FilterCriteria{
FromBlock: big.NewInt(0),
ToBlock: big.NewInt(100),
Addresses: addresses[:1],
Topics: [][]common.Hash{topics},
}); err != errExceedLogQueryLimit {
t.Errorf("Expected GetLogs with 6 topics at one position to return errExceedLogQueryLimit, got: %v", err)
}
}
// TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
func TestLogFilter(t *testing.T) {
t.Parallel()