core/filtermaps: define APIs for map, epoch calculation (#31659)

This pull request refines the filtermap implementation, defining key
APIs for map and
epoch calculations to improve readability.

This pull request doesn't change any logic, it's a pure cleanup.

---------

Co-authored-by: zsfelfoldi <zsfelfoldi@gmail.com>
This commit is contained in:
rjl493456442 2025-07-01 22:31:09 +08:00 committed by GitHub
parent cc8d58fdc0
commit cbd6ed9e0b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 160 additions and 85 deletions

View file

@ -185,11 +185,14 @@ type filterMapsRange struct {
initialized bool
headIndexed bool
headDelimiter uint64 // zero if headIndexed is false
// if initialized then all maps are rendered in the maps range
maps common.Range[uint32]
// if tailPartialEpoch > 0 then maps between firstRenderedMap-mapsPerEpoch and
// firstRenderedMap-mapsPerEpoch+tailPartialEpoch-1 are rendered
tailPartialEpoch uint32
// if initialized then all log values in the blocks range are fully
// rendered
// blockLvPointers are available in the blocks range
@ -223,13 +226,15 @@ type Config struct {
}
// NewFilterMaps creates a new FilterMaps and starts the indexer.
func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, finalBlock uint64, params Params, config Config) *FilterMaps {
func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, finalBlock uint64, params Params, config Config) (*FilterMaps, error) {
rs, initialized, err := rawdb.ReadFilterMapsRange(db)
if err != nil || (initialized && rs.Version != databaseVersion) {
rs, initialized = rawdb.FilterMapsRange{}, false
log.Warn("Invalid log index database version; resetting log index")
}
params.deriveFields()
if err := params.sanitize(); err != nil {
return nil, err
}
f := &FilterMaps{
db: db,
closeCh: make(chan struct{}),
@ -254,15 +259,14 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
},
// deleting last unindexed epoch might have been interrupted by shutdown
cleanedEpochsBefore: max(rs.MapsFirst>>params.logMapsPerEpoch, 1) - 1,
historyCutoff: historyCutoff,
finalBlock: finalBlock,
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
historyCutoff: historyCutoff,
finalBlock: finalBlock,
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
}
f.checkRevertRange() // revert maps that are inconsistent with the current chain view
@ -272,7 +276,7 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
"firstmap", f.indexedRange.maps.First(), "lastmap", f.indexedRange.maps.Last(),
"headindexed", f.indexedRange.headIndexed)
}
return f
return f, nil
}
// Start starts the indexer.
@ -399,7 +403,7 @@ func (f *FilterMaps) init() error {
batch := f.db.NewBatch()
for epoch := range bestLen {
cp := checkpoints[bestIdx][epoch]
f.storeLastBlockOfMap(batch, (uint32(epoch+1)<<f.logMapsPerEpoch)-1, cp.BlockNumber, cp.BlockId)
f.storeLastBlockOfMap(batch, f.lastEpochMap(uint32(epoch)), cp.BlockNumber, cp.BlockId)
f.storeBlockLvPointer(batch, cp.BlockNumber, cp.FirstIndex)
}
fmr := filterMapsRange{
@ -408,7 +412,7 @@ func (f *FilterMaps) init() error {
if bestLen > 0 {
cp := checkpoints[bestIdx][bestLen-1]
fmr.blocks = common.NewRange(cp.BlockNumber+1, 0)
fmr.maps = common.NewRange(uint32(bestLen)<<f.logMapsPerEpoch, 0)
fmr.maps = common.NewRange(f.firstEpochMap(uint32(bestLen)), 0)
}
f.setRange(batch, f.targetView, fmr, false)
return batch.Write()
@ -578,9 +582,11 @@ func (f *FilterMaps) getFilterMapRows(mapIndices []uint32, rowIndex uint32, base
rows := make([]FilterRow, len(mapIndices))
var ptr int
for len(mapIndices) > ptr {
baseRowGroup := mapIndices[ptr] / f.baseRowGroupLength
groupLength := 1
for ptr+groupLength < len(mapIndices) && mapIndices[ptr+groupLength]/f.baseRowGroupLength == baseRowGroup {
var (
groupIndex = f.mapGroupIndex(mapIndices[ptr])
groupLength = 1
)
for ptr+groupLength < len(mapIndices) && f.mapGroupIndex(mapIndices[ptr+groupLength]) == groupIndex {
groupLength++
}
if err := f.getFilterMapRowsOfGroup(rows[ptr:ptr+groupLength], mapIndices[ptr:ptr+groupLength], rowIndex, baseLayerOnly); err != nil {
@ -594,17 +600,19 @@ func (f *FilterMaps) getFilterMapRows(mapIndices []uint32, rowIndex uint32, base
// getFilterMapRowsOfGroup fetches a set of filter map rows at map indices
// belonging to the same base row group.
func (f *FilterMaps) getFilterMapRowsOfGroup(target []FilterRow, mapIndices []uint32, rowIndex uint32, baseLayerOnly bool) error {
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
baseMapRowIndex := f.mapRowIndex(baseRowGroup*f.baseRowGroupLength, rowIndex)
baseRows, err := rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
var (
groupIndex = f.mapGroupIndex(mapIndices[0])
mapRowIndex = f.mapRowIndex(groupIndex, rowIndex)
)
baseRows, err := rawdb.ReadFilterMapBaseRows(f.db, mapRowIndex, f.baseRowGroupSize, f.logMapWidth)
if err != nil {
return fmt.Errorf("failed to retrieve base row group %d of row %d: %v", baseRowGroup, rowIndex, err)
return fmt.Errorf("failed to retrieve base row group %d of row %d: %v", groupIndex, rowIndex, err)
}
for i, mapIndex := range mapIndices {
if mapIndex/f.baseRowGroupLength != baseRowGroup {
panic("mapIndices are not in the same base row group")
if f.mapGroupIndex(mapIndex) != groupIndex {
return fmt.Errorf("maps are not in the same base row group, index: %d, group: %d", mapIndex, groupIndex)
}
row := baseRows[mapIndex&(f.baseRowGroupLength-1)]
row := baseRows[f.mapGroupOffset(mapIndex)]
if !baseLayerOnly {
extRow, err := rawdb.ReadFilterMapExtRow(f.db, f.mapRowIndex(mapIndex, rowIndex), f.logMapWidth)
if err != nil {
@ -621,15 +629,17 @@ func (f *FilterMaps) getFilterMapRowsOfGroup(target []FilterRow, mapIndices []ui
// indices and a shared row index.
func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
for len(mapIndices) > 0 {
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
groupLength := 1
for groupLength < len(mapIndices) && mapIndices[groupLength]/f.baseRowGroupLength == baseRowGroup {
groupLength++
var (
pos = 1
groupIndex = f.mapGroupIndex(mapIndices[0])
)
for pos < len(mapIndices) && f.mapGroupIndex(mapIndices[pos]) == groupIndex {
pos++
}
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:groupLength], rowIndex, rows[:groupLength]); err != nil {
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:pos], rowIndex, rows[:pos]); err != nil {
return err
}
mapIndices, rows = mapIndices[groupLength:], rows[groupLength:]
mapIndices, rows = mapIndices[pos:], rows[pos:]
}
return nil
}
@ -637,21 +647,23 @@ func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32,
// storeFilterMapRowsOfGroup stores a set of filter map rows at map indices
// belonging to the same base row group.
func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
baseRowGroup := mapIndices[0] / f.baseRowGroupLength
baseMapRowIndex := f.mapRowIndex(baseRowGroup*f.baseRowGroupLength, rowIndex)
var baseRows [][]uint32
if uint32(len(mapIndices)) != f.baseRowGroupLength { // skip base rows read if all rows are replaced
var (
baseRows [][]uint32
groupIndex = f.mapGroupIndex(mapIndices[0])
mapRowIndex = f.mapRowIndex(groupIndex, rowIndex)
)
if uint32(len(mapIndices)) != f.baseRowGroupSize { // skip base rows read if all rows are replaced
var err error
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, mapRowIndex, f.baseRowGroupSize, f.logMapWidth)
if err != nil {
return fmt.Errorf("failed to retrieve base row group %d of row %d for modification: %v", baseRowGroup, rowIndex, err)
return fmt.Errorf("failed to retrieve filter map %d base rows %d for modification: %v", groupIndex, rowIndex, err)
}
} else {
baseRows = make([][]uint32, f.baseRowGroupLength)
baseRows = make([][]uint32, f.baseRowGroupSize)
}
for i, mapIndex := range mapIndices {
if mapIndex/f.baseRowGroupLength != baseRowGroup {
panic("mapIndices are not in the same base row group")
if f.mapGroupIndex(mapIndex) != groupIndex {
return fmt.Errorf("maps are not in the same base row group, index: %d, group: %d", mapIndex, groupIndex)
}
baseRow := []uint32(rows[i])
var extRow FilterRow
@ -659,10 +671,10 @@ func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []u
extRow = baseRow[f.baseRowLength:]
baseRow = baseRow[:f.baseRowLength]
}
baseRows[mapIndex&(f.baseRowGroupLength-1)] = baseRow
baseRows[f.mapGroupOffset(mapIndex)] = baseRow
rawdb.WriteFilterMapExtRow(batch, f.mapRowIndex(mapIndex, rowIndex), extRow, f.logMapWidth)
}
rawdb.WriteFilterMapBaseRows(batch, baseMapRowIndex, baseRows, f.logMapWidth)
rawdb.WriteFilterMapBaseRows(batch, mapRowIndex, baseRows, f.logMapWidth)
return nil
}
@ -747,12 +759,12 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
defer f.indexLock.Unlock()
// determine epoch boundaries
firstMap := epoch << f.logMapsPerEpoch
lastBlock, _, err := f.getLastBlockOfMap(firstMap + f.mapsPerEpoch - 1)
lastBlock, _, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
if err != nil {
return false, fmt.Errorf("failed to retrieve last block of deleted epoch %d: %v", epoch, err)
}
var firstBlock uint64
firstMap := f.firstEpochMap(epoch)
if epoch > 0 {
firstBlock, _, err = f.getLastBlockOfMap(firstMap - 1)
if err != nil {
@ -763,8 +775,8 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
// update rendered range if necessary
var (
fmr = f.indexedRange
firstEpoch = f.indexedRange.maps.First() >> f.logMapsPerEpoch
afterLastEpoch = (f.indexedRange.maps.AfterLast() + f.mapsPerEpoch - 1) >> f.logMapsPerEpoch
firstEpoch = f.mapEpoch(f.indexedRange.maps.First())
afterLastEpoch = f.mapEpoch(f.indexedRange.maps.AfterLast() + f.mapsPerEpoch - 1)
)
if f.indexedRange.tailPartialEpoch != 0 && firstEpoch > 0 {
firstEpoch--
@ -776,7 +788,7 @@ func (f *FilterMaps) deleteTailEpoch(epoch uint32) (bool, error) {
// first fully or partially rendered epoch and there is at least one
// rendered map in the next epoch; remove from indexed range
fmr.tailPartialEpoch = 0
fmr.maps.SetFirst((epoch + 1) << f.logMapsPerEpoch)
fmr.maps.SetFirst(f.firstEpochMap(epoch + 1))
fmr.blocks.SetFirst(lastBlock + 1)
f.setRange(f.db, f.indexedView, fmr, false)
default:
@ -857,7 +869,7 @@ func (f *FilterMaps) exportCheckpoints() {
w.WriteString("[\n")
comma := ","
for epoch := uint32(0); epoch < epochCount; epoch++ {
lastBlock, lastBlockId, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
lastBlock, lastBlockId, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
if err != nil {
log.Error("Error fetching last block of epoch", "epoch", epoch, "error", err)
return

View file

@ -281,7 +281,7 @@ func (f *FilterMaps) tryIndexHead() error {
// is changed.
func (f *FilterMaps) tryIndexTail() (bool, error) {
for {
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
if firstEpoch == 0 || !f.needTailEpoch(firstEpoch-1) {
break
}
@ -359,7 +359,7 @@ func (f *FilterMaps) tryIndexTail() (bool, error) {
// Note that unindexing is very quick as it only removes continuous ranges of
// data from the database and is also called while running head indexing.
func (f *FilterMaps) tryUnindexTail() (bool, error) {
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
if f.indexedRange.tailPartialEpoch > 0 && firstEpoch > 0 {
firstEpoch--
}
@ -392,11 +392,11 @@ func (f *FilterMaps) tryUnindexTail() (bool, error) {
// needTailEpoch returns true if the given tail epoch needs to be kept
// according to the current tail target, false if it can be removed.
func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
firstEpoch := f.indexedRange.maps.First() >> f.logMapsPerEpoch
firstEpoch := f.mapEpoch(f.indexedRange.maps.First())
if epoch > firstEpoch {
return true
}
if (epoch+1)<<f.logMapsPerEpoch >= f.indexedRange.maps.AfterLast() {
if f.firstEpochMap(epoch+1) >= f.indexedRange.maps.AfterLast() {
return true
}
if epoch+1 < firstEpoch {
@ -405,7 +405,7 @@ func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
var lastBlockOfPrevEpoch uint64
if epoch > 0 {
var err error
lastBlockOfPrevEpoch, _, err = f.getLastBlockOfMap(epoch<<f.logMapsPerEpoch - 1)
lastBlockOfPrevEpoch, _, err = f.getLastBlockOfMap(f.lastEpochMap(epoch - 1))
if err != nil {
log.Error("Could not get last block of previous epoch", "epoch", epoch-1, "error", err)
return epoch >= firstEpoch
@ -414,7 +414,7 @@ func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
if f.historyCutoff > lastBlockOfPrevEpoch {
return false
}
lastBlockOfEpoch, _, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
lastBlockOfEpoch, _, err := f.getLastBlockOfMap(f.lastEpochMap(epoch))
if err != nil {
log.Error("Could not get last block of epoch", "epoch", epoch, "error", err)
return epoch >= firstEpoch

View file

@ -41,7 +41,7 @@ var testParams = Params{
logMapWidth: 24,
logMapsPerEpoch: 4,
logValuesPerMap: 4,
baseRowGroupLength: 4,
baseRowGroupSize: 4,
baseRowLengthRatio: 2,
logLayerDiff: 2,
}
@ -370,7 +370,7 @@ func (ts *testSetup) setHistory(history uint64, noHistory bool) {
History: history,
Disabled: noHistory,
}
ts.fm = NewFilterMaps(ts.db, view, 0, 0, ts.params, config)
ts.fm, _ = NewFilterMaps(ts.db, view, 0, 0, ts.params, config)
ts.fm.testDisableSnapshots = ts.testDisableSnapshots
ts.fm.Start()
}

View file

@ -284,7 +284,7 @@ func (r *mapRenderer) run(stopCb func() bool, writeCb func()) (bool, error) {
// map finished
r.finishedMaps[r.currentMap.mapIndex] = r.currentMap
r.finished.SetLast(r.finished.AfterLast())
if len(r.finishedMaps) >= maxMapsPerBatch || r.finished.AfterLast()&(r.f.baseRowGroupLength-1) == 0 {
if len(r.finishedMaps) >= maxMapsPerBatch || r.f.mapGroupOffset(r.finished.AfterLast()) == 0 {
if err := r.writeFinishedMaps(stopCb); err != nil {
return false, err
}

View file

@ -844,7 +844,7 @@ func (m *matchSequenceInstance) dropNext(mapIndex uint32) bool {
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
// skipped and it can be substituted with an empty list if baseRes has no potential
// matches that could be sequence matched with anything that could be in nextNextRes.
func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes potentialMatches) potentialMatches {
func (p *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes potentialMatches) potentialMatches {
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) {
// if nextRes is a wild card or baseRes is empty then the sequence matcher
// result equals baseRes.
@ -854,7 +854,7 @@ func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, next
// if baseRes is a wild card or nextRes is empty then the sequence matcher
// result is the items of nextRes with a negative offset applied.
result := make(potentialMatches, 0, len(nextRes))
min := (uint64(mapIndex) << params.logValuesPerMap) + offset
min := (uint64(mapIndex) << p.logValuesPerMap) + offset
for _, v := range nextRes {
if v >= min {
result = append(result, v-offset)

View file

@ -19,6 +19,7 @@ package filtermaps
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"hash/fnv"
"math"
"sort"
@ -28,19 +29,36 @@ import (
// Params defines the basic parameters of the log index structure.
type Params struct {
logMapHeight uint // log2(mapHeight)
logMapWidth uint // log2(mapWidth)
logMapsPerEpoch uint // log2(mapsPerEpoch)
logValuesPerMap uint // log2(logValuesPerMap)
baseRowLengthRatio uint // baseRowLength / average row length
logLayerDiff uint // maxRowLength log2 growth per layer
// derived fields
mapHeight uint32 // filter map height (number of rows)
mapsPerEpoch uint32 // number of maps in an epoch
logMapHeight uint // The number of bits required to represent the map height
logMapWidth uint // The number of bits required to represent the map width
logMapsPerEpoch uint // The number of bits required to represent the number of maps per epoch
logValuesPerMap uint // The number of bits required to represent the number of log values per map
// baseRowLengthRatio represents the ratio of base row length
// to the average row length.
baseRowLengthRatio uint
// logLayerDiff defines the logarithmic growth factor (base 2) of
// the maximum row length per layer. It indicates how much the maximum
// row length increases as the layer depth increases.
//
// Specifically:
// - the row length in base layer (layer == 0) is baseRowLength
// - the row length in layer x is baseRowLength << (logLayerDiff * x)
logLayerDiff uint
// These fields can be derived with the information above
mapHeight uint32 // The number of rows in the filter map
mapsPerEpoch uint32 // The number of maps in an epoch
valuesPerMap uint64 // The number of log values marked on each filter map
baseRowLength uint32 // maximum number of log values per row on layer 0
valuesPerMap uint64 // number of log values marked on each filter map
// not affecting consensus
baseRowGroupLength uint32 // length of base row groups in local database
// baseRowGroupSize defines the number of base row entries grouped together
// as a single database entry in the local database to optimize storage
// and retrieval efficiency.
//
// This value can be configured based on the specific implementation.
baseRowGroupSize uint32
}
// DefaultParams is the set of parameters used on mainnet.
@ -49,7 +67,7 @@ var DefaultParams = Params{
logMapWidth: 24,
logMapsPerEpoch: 10,
logValuesPerMap: 16,
baseRowGroupLength: 32,
baseRowGroupSize: 32,
baseRowLengthRatio: 8,
logLayerDiff: 4,
}
@ -60,7 +78,7 @@ var RangeTestParams = Params{
logMapWidth: 24,
logMapsPerEpoch: 0,
logValuesPerMap: 0,
baseRowGroupLength: 32,
baseRowGroupSize: 32,
baseRowLengthRatio: 16, // baseRowLength >= 1
logLayerDiff: 4,
}
@ -91,6 +109,44 @@ func topicValue(topic common.Hash) common.Hash {
return result
}
// sanitize derives any missing fields and validates the parameter values.
func (p *Params) sanitize() error {
p.deriveFields()
if p.logMapWidth%8 != 0 {
return fmt.Errorf("invalid configuration: logMapWidth (%d) must be a multiple of 8", p.logMapWidth)
}
if p.baseRowGroupSize == 0 || (p.baseRowGroupSize&(p.baseRowGroupSize-1)) != 0 {
return fmt.Errorf("invalid configuration: baseRowGroupSize (%d) must be a power of 2", p.baseRowGroupSize)
}
return nil
}
// mapGroupIndex returns the start index of the base row group that contains the
// given map index. Assumes baseRowGroupSize is a power of 2.
func (p *Params) mapGroupIndex(index uint32) uint32 {
return index & ^(p.baseRowGroupSize - 1)
}
// mapGroupOffset returns the offset of the given map index within its base row group.
func (p *Params) mapGroupOffset(index uint32) uint32 {
return index & (p.baseRowGroupSize - 1)
}
// mapEpoch returns the epoch number that the given map index belongs to.
func (p *Params) mapEpoch(index uint32) uint32 {
return index >> p.logMapsPerEpoch
}
// firstEpochMap returns the index of the first map in the specified epoch.
func (p *Params) firstEpochMap(epoch uint32) uint32 {
return epoch << p.logMapsPerEpoch
}
// lastEpochMap returns the index of the last map in the specified epoch.
func (p *Params) lastEpochMap(epoch uint32) uint32 {
return (epoch+1)<<p.logMapsPerEpoch - 1
}
// rowIndex returns the row index in which the given log value should be marked
// on the given map and mapping layer. Note that row assignments are re-shuffled
// with a different frequency on each mapping layer, allowing efficient disk
@ -125,17 +181,20 @@ func (p *Params) columnIndex(lvIndex uint64, logValue *common.Hash) uint32 {
return uint32(lvIndex%p.valuesPerMap)<<hashBits + (uint32(hash>>(64-hashBits)) ^ uint32(hash)>>(32-hashBits))
}
// maxRowLength returns the maximum length filter rows are populated up to
// when using the given mapping layer. A log value can be marked on the map
// according to a given mapping layer if the row mapping on that layer points
// to a row that has not yet reached the maxRowLength belonging to that layer.
// maxRowLength returns the maximum length filter rows are populated up to when
// using the given mapping layer.
//
// A log value can be marked on the map according to a given mapping layer if
// the row mapping on that layer points to a row that has not yet reached the
// maxRowLength belonging to that layer.
//
// This means that a row that is considered full on a given layer may still be
// extended further on a higher order layer.
//
// Each value is marked on the lowest order layer possible, assuming that marks
// are added in ascending log value index order.
// When searching for a log value one should consider all layers and process
// corresponding rows up until the first one where the row mapped to the given
// layer is not full.
// are added in ascending log value index order. When searching for a log value
// one should consider all layers and process corresponding rows up until the
// first one where the row mapped to the given layer is not full.
func (p *Params) maxRowLength(layerIndex uint32) uint32 {
logLayerDiff := uint(layerIndex) * p.logLayerDiff
if logLayerDiff > p.logMapsPerEpoch {

View file

@ -388,7 +388,7 @@ func ReadFilterMapBaseRows(db ethdb.KeyValueReader, mapRowIndex uint64, rowCount
headerBits--
}
if headerLen+byteLength*entryCount > encLen {
return nil, errors.New("Invalid encoded base filter rows length")
return nil, errors.New("invalid encoded base filter rows length")
}
if entriesInRow > 0 {
rows[rowIndex] = make([]uint32, entriesInRow)
@ -405,8 +405,8 @@ func ReadFilterMapBaseRows(db ethdb.KeyValueReader, mapRowIndex uint64, rowCount
return rows, nil
}
// WriteFilterMapExtRow stores a filter map row at the given mapRowIndex or deletes
// any existing entry if the row is empty.
// WriteFilterMapExtRow stores an extended filter map row at the given mapRowIndex
// or deletes any existing entry if the row is empty.
func WriteFilterMapExtRow(db ethdb.KeyValueWriter, mapRowIndex uint64, row []uint32, bitLength uint) {
byteLength := int(bitLength) / 8
if int(bitLength) != byteLength*8 {

View file

@ -278,7 +278,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if fb := eth.blockchain.CurrentFinalBlock(); fb != nil {
finalBlock = fb.Number.Uint64()
}
eth.filterMaps = filtermaps.NewFilterMaps(chainDb, chainView, historyCutoff, finalBlock, filtermaps.DefaultParams, fmConfig)
filterMaps, err := filtermaps.NewFilterMaps(chainDb, chainView, historyCutoff, finalBlock, filtermaps.DefaultParams, fmConfig)
if err != nil {
return nil, err
}
eth.filterMaps = filterMaps
eth.closeFilterMaps = make(chan chan struct{})
// TxPool

View file

@ -175,7 +175,7 @@ func (b *testBackend) startFilterMaps(history uint64, disabled bool, params filt
Disabled: disabled,
ExportFileName: "",
}
b.fm = filtermaps.NewFilterMaps(b.db, chainView, 0, 0, params, config)
b.fm, _ = filtermaps.NewFilterMaps(b.db, chainView, 0, 0, params, config)
b.fm.Start()
b.fm.WaitIdle()
}