1
0
Fork 0
forked from forks/go-ethereum

core/filtermaps: FilterMaps log index generator and search logic (#31079)

This PR is #1 of a 3-part series that implements the new log index
intended to replace core/bloombits.
Replaces https://github.com/ethereum/go-ethereum/pull/30370

This part implements the new data structure, the log index generator and
the search logic. This PR has most of the complexity but it does not
affect any existing code yet so maybe it is easier to review separately.

FilterMaps data structure explanation:
https://gist.github.com/zsfelfoldi/a60795f9da7ae6422f28c7a34e02a07e

Log index generator code overview:
https://gist.github.com/zsfelfoldi/97105dff0b1a4f5ed557924a24b9b9e7

Search pattern matcher code overview:
https://gist.github.com/zsfelfoldi/5981735641c956afb18065e84f8aff34

Note that the possibility of a tree hashing scheme and remote proof
protocol are mentioned in the documents above but they are not exactly
specified yet. These specs are WIP and will be finalized after the local
log indexer/filter code is finalized and merged.

---------

Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
Felföldi Zsolt 2025-03-13 19:04:16 +01:00 committed by GitHub
parent 78be413ca9
commit f9f1172d59
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 4825 additions and 0 deletions

View file

@ -0,0 +1,156 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// blockchain represents the underlying blockchain of ChainView.
type blockchain interface {
GetHeader(hash common.Hash, number uint64) *types.Header
GetCanonicalHash(number uint64) common.Hash
GetReceiptsByHash(hash common.Hash) types.Receipts
}
// ChainView represents an immutable view of a chain with a block id and a set
// of receipts associated to each block number and a block hash associated with
// all block numbers except the head block. This is because in the future
// ChainView might represent a view where the head block is currently being
// created. Block id is a unique identifier that can also be calculated for the
// head block.
// Note that the view's head does not have to be the current canonical head
// of the underlying blockchain, it should only possess the block headers
// and receipts up until the expected chain view head.
type ChainView struct {
chain blockchain
headNumber uint64
hashes []common.Hash // block hashes starting backwards from headNumber until first canonical hash
}
// NewChainView creates a new ChainView.
func NewChainView(chain blockchain, number uint64, hash common.Hash) *ChainView {
cv := &ChainView{
chain: chain,
headNumber: number,
hashes: []common.Hash{hash},
}
cv.extendNonCanonical()
return cv
}
// getBlockHash returns the block hash belonging to the given block number.
// Note that the hash of the head block is not returned because ChainView might
// represent a view where the head block is currently being created.
func (cv *ChainView) getBlockHash(number uint64) common.Hash {
if number >= cv.headNumber {
panic("invalid block number")
}
return cv.blockHash(number)
}
// getBlockId returns the unique block id belonging to the given block number.
// Note that it is currently equal to the block hash. In the future it might
// be a different id for future blocks if the log index root becomes part of
// consensus and therefore rendering the index with the new head will happen
// before the hash of that new head is available.
func (cv *ChainView) getBlockId(number uint64) common.Hash {
if number > cv.headNumber {
panic("invalid block number")
}
return cv.blockHash(number)
}
// getReceipts returns the set of receipts belonging to the block at the given
// block number.
func (cv *ChainView) getReceipts(number uint64) types.Receipts {
if number > cv.headNumber {
panic("invalid block number")
}
return cv.chain.GetReceiptsByHash(cv.blockHash(number))
}
// limitedView returns a new chain view that is a truncated version of the parent view.
func (cv *ChainView) limitedView(newHead uint64) *ChainView {
if newHead >= cv.headNumber {
return cv
}
return NewChainView(cv.chain, newHead, cv.blockHash(newHead))
}
// equalViews returns true if the two chain views are equivalent.
func equalViews(cv1, cv2 *ChainView) bool {
if cv1 == nil || cv2 == nil {
return false
}
return cv1.headNumber == cv2.headNumber && cv1.getBlockId(cv1.headNumber) == cv2.getBlockId(cv2.headNumber)
}
// matchViews returns true if the two chain views are equivalent up until the
// specified block number. If the specified number is higher than one of the
// heads then false is returned.
func matchViews(cv1, cv2 *ChainView, number uint64) bool {
if cv1 == nil || cv2 == nil {
return false
}
if cv1.headNumber < number || cv2.headNumber < number {
return false
}
if number == cv1.headNumber || number == cv2.headNumber {
return cv1.getBlockId(number) == cv2.getBlockId(number)
}
return cv1.getBlockHash(number) == cv2.getBlockHash(number)
}
// extendNonCanonical checks whether the previously known reverse list of head
// hashes still ends with one that is canonical on the underlying blockchain.
// If necessary then it traverses further back on the header chain and adds
// more hashes to the list.
func (cv *ChainView) extendNonCanonical() bool {
for {
hash, number := cv.hashes[len(cv.hashes)-1], cv.headNumber-uint64(len(cv.hashes)-1)
if cv.chain.GetCanonicalHash(number) == hash {
return true
}
if number == 0 {
log.Error("Unknown genesis block hash found")
return false
}
header := cv.chain.GetHeader(hash, number)
if header == nil {
log.Error("Header not found", "number", number, "hash", hash)
return false
}
cv.hashes = append(cv.hashes, header.ParentHash)
}
}
// blockHash returns the given block hash without doing the head number check.
func (cv *ChainView) blockHash(number uint64) common.Hash {
if number+uint64(len(cv.hashes)) <= cv.headNumber {
hash := cv.chain.GetCanonicalHash(number)
if !cv.extendNonCanonical() {
return common.Hash{}
}
if number+uint64(len(cv.hashes)) <= cv.headNumber {
return hash
}
}
return cv.hashes[cv.headNumber-number]
}

View file

@ -0,0 +1,63 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
_ "embed"
"encoding/json"
"github.com/ethereum/go-ethereum/common"
)
// checkpointList lists checkpoints for finalized epochs of a given chain.
// This allows the indexer to start indexing from the latest available
// checkpoint and then index tail epochs in reverse order.
type checkpointList []epochCheckpoint
// epochCheckpoint specified the last block of the epoch and the first log
// value index where that block starts. This allows a log value iterator to
// be initialized at the epoch boundary.
type epochCheckpoint struct {
BlockNumber uint64 // block that generated the last log value of the given epoch
BlockId common.Hash
FirstIndex uint64 // first log value index of the given block
}
//go:embed checkpoints_mainnet.json
var checkpointsMainnetJSON []byte
//go:embed checkpoints_sepolia.json
var checkpointsSepoliaJSON []byte
//go:embed checkpoints_holesky.json
var checkpointsHoleskyJSON []byte
// checkpoints lists sets of checkpoints for multiple chains. The matching
// checkpoint set is autodetected by the indexer once the canonical chain is
// known.
var checkpoints = []checkpointList{
decodeCheckpoints(checkpointsMainnetJSON),
decodeCheckpoints(checkpointsSepoliaJSON),
decodeCheckpoints(checkpointsHoleskyJSON),
}
func decodeCheckpoints(encoded []byte) (result checkpointList) {
if err := json.Unmarshal(encoded, &result); err != nil {
panic(err)
}
return
}

View file

@ -0,0 +1,20 @@
[
{"blockNumber": 814411, "blockId": "0xf763e96fc3920359c5f706803024b78e83796a3a8563bb5a83c3ddd7cbfde287", "firstIndex": 67107637},
{"blockNumber": 914278, "blockId": "0x0678cf8d53c0d6d27896df657d98cc73bc63ca468b6295068003938ef9b0f927", "firstIndex": 134217671},
{"blockNumber": 1048874, "blockId": "0x3620c3d52a40ff4d9fc58c3104cfa2f327f55592caf6a2394c207a5e00b4f740", "firstIndex": 201326382},
{"blockNumber": 1144441, "blockId": "0x438fb42850f5a0d8e1666de598a4d0106b62da0f7448c62fe029b8cbad35d08d", "firstIndex": 268435440},
{"blockNumber": 1230411, "blockId": "0xf0ee07e60a93910723b259473a253dd9cf674e8b78c4f153b32ad7032efffeeb", "firstIndex": 335543079},
{"blockNumber": 1309112, "blockId": "0xc1646e5ef4b4343880a85b1a4111e3321d609a1225e9cebbe10d1c7abf99e58d", "firstIndex": 402653100},
{"blockNumber": 1380522, "blockId": "0x1617cae91989d97ac6335c4217aa6cc7f7f4c2837e20b3b5211d98d6f9e97e44", "firstIndex": 469761917},
{"blockNumber": 1476962, "blockId": "0xd978455d2618d093dfc685d7f43f61be6dae0fa8a9cb915ae459aa6e0a5525f0", "firstIndex": 536870773},
{"blockNumber": 1533518, "blockId": "0xe7d39d71bd9d5f1f3157c35e0329531a7950a19e3042407e38948b89b5384f78", "firstIndex": 603979664},
{"blockNumber": 1613787, "blockId": "0xa793168d135c075732a618ec367faaed5f359ffa81898c73cb4ec54ec2caa696", "firstIndex": 671088003},
{"blockNumber": 1719099, "blockId": "0xc4394c71a8a24efe64c5ff2afcdd1594f3708524e6084aa7dadd862bd704ab03", "firstIndex": 738196914},
{"blockNumber": 1973165, "blockId": "0xee3a9e959a437c707a3036736ec8d42a9261ac6100972c26f65eedcde315a81d", "firstIndex": 805306333},
{"blockNumber": 2274844, "blockId": "0x76e2d33653ed9282c63ad09d721e1f2e29064aa9c26202e20fc4cc73e8dfe5f6", "firstIndex": 872415141},
{"blockNumber": 2530503, "blockId": "0x59f4e45345f8b8f848be5004fe75c4a28f651864256c3aa9b2da63369432b718", "firstIndex": 939523693},
{"blockNumber": 2781903, "blockId": "0xc981e91c6fb69c5e8146ead738fcfc561831f11d7786d39c7fa533966fc37675", "firstIndex": 1006632906},
{"blockNumber": 3101713, "blockId": "0xc7baa577c91d8439e3fc79002d2113d07ca54a4724bf2f1f5af937b7ba8e1f32", "firstIndex": 1073741382},
{"blockNumber": 3221770, "blockId": "0xa6b8240b7883fcc71aa5001b5ba66c889975c5217e14c16edebdd6f6e23a9424", "firstIndex": 1140850360}
]

View file

@ -0,0 +1,256 @@
[
{"blockNumber": 4166218, "blockId": "0xdd767e0426256179125551e8e40f33565a96d1c94076c7746fa79d767ed4ad65", "firstIndex": 67108680},
{"blockNumber": 4514014, "blockId": "0x33a0879bdabea4a7a3f2b424388cbcbf2fbd519bddadf13752a259049c78e95d", "firstIndex": 134217343},
{"blockNumber": 4817415, "blockId": "0x4f0e8c7dd04fbe0985b9394575b19f13ea66a2a628fa5b08178ce4b138c6db80", "firstIndex": 201326352},
{"blockNumber": 5087733, "blockId": "0xc84cd5e9cda999c919803c7a53a23bb77a18827fbde401d3463f1e9e52536424", "firstIndex": 268435343},
{"blockNumber": 5306107, "blockId": "0x13f028b5fc055d23f55a92a2eeecfbcfbda8a08e4cd519ce451ba2e70428f5f9", "firstIndex": 335544094},
{"blockNumber": 5509918, "blockId": "0x1ed770a58a7b4d4a828b7bb44c8820a674d562b23a6a0139981abe4c489d4dad", "firstIndex": 402652853},
{"blockNumber": 5670390, "blockId": "0x3923ee6a62e6cc5132afdadf1851ae4e73148e6fbe0a8319cafd2a120c98efa3", "firstIndex": 469761897},
{"blockNumber": 5826139, "blockId": "0xe61bc6ef03c333805f26319e1688f82553f98aa5e902b200e0621a3371b69050", "firstIndex": 536870853},
{"blockNumber": 5953029, "blockId": "0x43d710b1b7243b848400975048ccefdfaba091c692c7f01c619d988886cc160f", "firstIndex": 603979580},
{"blockNumber": 6102846, "blockId": "0xa100b2018f6545cc689656b4b846677b138955b7efd30e850cd14c246430ba18", "firstIndex": 671088291},
{"blockNumber": 6276718, "blockId": "0xb832ac448b06c104ba50faefd58b0b94d53c0fba5cb268086adad4db99c2f35f", "firstIndex": 738197399},
{"blockNumber": 6448696, "blockId": "0x48e8ae6f729ad6c76b6cf632bd52a6df7886ed55be09d43c5004fcc1463e533b", "firstIndex": 805305988},
{"blockNumber": 6655974, "blockId": "0xac395971a6ffc30f807848f68b97b2834f8ea13478a7615860b6a69e3d0823ca", "firstIndex": 872415033},
{"blockNumber": 6873949, "blockId": "0xc522ddb1113b1e9a87b2bdcb11ce78756beba6454a890122f121a032b5769354", "firstIndex": 939523784},
{"blockNumber": 7080953, "blockId": "0x3606de577d80120d1edbb64bad7fa6795e788bae342866a98cc58ce2f7575045", "firstIndex": 1006632796},
{"blockNumber": 7267002, "blockId": "0xad770882a69d216e955e34fef84851e56c0de82deacd6187a7a41f6170cd6c6d", "firstIndex": 1073741045},
{"blockNumber": 7466708, "blockId": "0x17a48817b3a65aba333a5b56f3ff2e86fbcc19e184b046a5305a5182fdd8eb8a", "firstIndex": 1140850680},
{"blockNumber": 7661807, "blockId": "0xa74731ee775fbd3f4d9313c68562737dd7c8d2c9eb968791d8abe167e16ddc96", "firstIndex": 1207959112},
{"blockNumber": 7834556, "blockId": "0xe4b4812448075508cb05a0e3257f91b49509dc78cd963676a633864db6e78956", "firstIndex": 1275068095},
{"blockNumber": 7990068, "blockId": "0x07bd4ca38abb4584a6209e04035646aa545ebbb6c948d438d4c25bfd9cb205fa", "firstIndex": 1342176620},
{"blockNumber": 8143032, "blockId": "0x0e3149e9637290b044ee693b8fcb66e23d22db3ad0bdda32962138ba18e59f3f", "firstIndex": 1409285949},
{"blockNumber": 8297660, "blockId": "0x34cd24f80247f7dfaf316b2e637f4b62f72ecc90703014fb25cb98ad044fc2c0", "firstIndex": 1476394911},
{"blockNumber": 8465137, "blockId": "0x4452fa296498248d7f10c9dc6ec1e4ae7503aa07f491e6d38b21aea5d2c658a8", "firstIndex": 1543503744},
{"blockNumber": 8655820, "blockId": "0x7bdb9008b30be420f7152cc294ac6e5328eed5b4abd954a34105de3da24f3cc6", "firstIndex": 1610612619},
{"blockNumber": 8807187, "blockId": "0xde03e3bfddc722c019f0b59bc55efabcd5ab68c6711f4c08d0390a56f396590d", "firstIndex": 1677721589},
{"blockNumber": 8911171, "blockId": "0xe44f342de74ab05a2a994f8841bdf88f720b9dc260177ba4030d0f7077901324", "firstIndex": 1744830310},
{"blockNumber": 8960320, "blockId": "0x79764f9ff6e0fe4848eda1805687872021076e4e603112861af84181395ac559", "firstIndex": 1811938893},
{"blockNumber": 9085994, "blockId": "0x24a101d1c8a63367a0953d10dc79c3b587a93bd7fd382084708adefce0b8363f", "firstIndex": 1879047965},
{"blockNumber": 9230924, "blockId": "0xb176a98d3acd855cbb75265fb6be955a8d51abc771e021e13275d5b3ecb07eeb", "firstIndex": 1946156668},
{"blockNumber": 9390535, "blockId": "0x640f5e2d511a5141878d57ae7a619f19b72a2bd3ef019cf0a22d74d93d9acf07", "firstIndex": 2013265733},
{"blockNumber": 9515674, "blockId": "0xff4a7b6b21aeaeb6e1a75ecd22b1f34c058a0ce1477ce90a8ce78165fd1d0941", "firstIndex": 2080374553},
{"blockNumber": 9659426, "blockId": "0xc351455249343b41e9171e183612b68c3c895271c62bd2c53d9e3ab1aa865aa1", "firstIndex": 2147483567},
{"blockNumber": 9794018, "blockId": "0xde98035b4b7f9449c256239b65c7ff2c0330de44dee190106d0a96fb6f683238", "firstIndex": 2214592213},
{"blockNumber": 9923840, "blockId": "0x881da313a1e2b6fab58a1d6fa65b5dacfdc9d68a3112a647104955b5233f84e3", "firstIndex": 2281701302},
{"blockNumber": 10042435, "blockId": "0x451f6459640a6f54e2a535cc3a49cfc469861da3ddc101840ab3aef9e17fa424", "firstIndex": 2348810174},
{"blockNumber": 10168883, "blockId": "0x5d16ff5adf0df1e4dc810da60af37399ef733be7870f21112b8c2cfff4995dd9", "firstIndex": 2415918783},
{"blockNumber": 10289554, "blockId": "0x85d5690f15a787c43b9a49e8dd6e324f0b3e0c9796d07c0cfb128e5c168f5488", "firstIndex": 2483027930},
{"blockNumber": 10386676, "blockId": "0x20f675ea72db448024a8a0b8e3ec180cac37a5910575bc32f8d9f5cdfe3c2649", "firstIndex": 2550136212},
{"blockNumber": 10479675, "blockId": "0x014abb07acf2330cc78800ca1f564928f2daccca4b389bf5c59f4b840d843ec0", "firstIndex": 2617245218},
{"blockNumber": 10562661, "blockId": "0xd437607a3f81ce8b7c605e167ce5e52bf8a3e02cdc646997bd0ccc57a50ad7d1", "firstIndex": 2684354520},
{"blockNumber": 10641508, "blockId": "0x2e8ab6470c29f90ac23dcfc58310f0208f5d0e752a0c7982a77a223eca104082", "firstIndex": 2751462730},
{"blockNumber": 10717156, "blockId": "0x8820447b6429dd12be603c1c130be532e9db065bb4bc6b2a9d4551794d63789a", "firstIndex": 2818571831},
{"blockNumber": 10784549, "blockId": "0xc557daab80a7cdc963d62aa881faf3ab1baceff8e027046bcd203e432e0983b3", "firstIndex": 2885680800},
{"blockNumber": 10848651, "blockId": "0xede1b0de5db6685a6f589096ceb8fccb08d3ff60e8b00a93caa4a775b48e07fc", "firstIndex": 2952789740},
{"blockNumber": 10909166, "blockId": "0x989db675899d13323006a4d6174557e3c5501c672afd60d8bd902fc98d37e92e", "firstIndex": 3019897599},
{"blockNumber": 10972902, "blockId": "0x5484050cc2c7d774bc5cd6af1c2ef8c19d1de12dabe25867c9b365924ea10434", "firstIndex": 3087007422},
{"blockNumber": 11036597, "blockId": "0x1e3686e19056587c385262d5b0a07b3ec04e804c2d59e9aaca1e5876e78f69ae", "firstIndex": 3154116231},
{"blockNumber": 11102520, "blockId": "0x339cf302fe813cce3bb9318b860dfa8be7f688413f38a6ea1987a1b84d742b4b", "firstIndex": 3221224863},
{"blockNumber": 11168162, "blockId": "0xc0fa21ea090627610bcac4732dff702633f310cabafc42bc500d3d4805198fe0", "firstIndex": 3288334273},
{"blockNumber": 11233707, "blockId": "0x491c37a479b8cf22eaa3654ae34c5ddc4627df8c58ca8a6979159e1710428576", "firstIndex": 3355442691},
{"blockNumber": 11300526, "blockId": "0xb7366d2a24df99002cffe0c9a00959c93ef0dcfc3fd17389e2020bf5caa788eb", "firstIndex": 3422551480},
{"blockNumber": 11367621, "blockId": "0xce53df5080c5b5238bb7717dfbfd88c2f574cfbb3d91f92b57171a00e9776cd2", "firstIndex": 3489660710},
{"blockNumber": 11431881, "blockId": "0x2a08ff9c4f6fd152166213d902f0870822429f01d5f90e384ac54a3eac0ceb3a", "firstIndex": 3556768626},
{"blockNumber": 11497107, "blockId": "0x1f99c6b65f2b1cb06ed1786c6a0274ff1b9eacab6cb729fcd386f10ebbd88123", "firstIndex": 3623878389},
{"blockNumber": 11560104, "blockId": "0xebe6924817bbdfe52af49667da1376bae5a2994b375d4b996e8ff2683744e37a", "firstIndex": 3690986640},
{"blockNumber": 11625129, "blockId": "0xbe6eee325329ee2fe632d8576864c29dd1c79bab891dc0a22d5b2ac87618d26e", "firstIndex": 3758095773},
{"blockNumber": 11690397, "blockId": "0xc28bf55f858ddf5b82d1ceb3b5258b90a9ca34df8863a1c652c4d359f5748fdf", "firstIndex": 3825204492},
{"blockNumber": 11755087, "blockId": "0x0c10cde6ce1bbe24dc57347fe4aaebc17b7d8e8d7d97e3db573133477f494740", "firstIndex": 3892314051},
{"blockNumber": 11819674, "blockId": "0x36b694a1776c94e4c6ae4a410931b2086de47a83e437517040e3290ce9afff67", "firstIndex": 3959422445},
{"blockNumber": 11883358, "blockId": "0x21f447aca9ddf94ed71df9fa3648a12acc2ba603f89f24c4784936864c41945f", "firstIndex": 4026531743},
{"blockNumber": 11948524, "blockId": "0x71a52b6cce80d3a552b0daa18beb952facf81a89bc7ca769d08ac297f317507a", "firstIndex": 4093640009},
{"blockNumber": 12013168, "blockId": "0x9a7fb369b8d8cd0edd0d890d636096f20c63abb7eb5798ad1e578cac599e3db8", "firstIndex": 4160748475},
{"blockNumber": 12078711, "blockId": "0x5de09329413b0c2f58d926f225197552a335ba3d5544d7bdb45e7574f78c9b8d", "firstIndex": 4227858275},
{"blockNumber": 12143640, "blockId": "0xbeafc0e1e0586f5a95f00f2a796d7df122c79c187aa2d917129297f24b8306bd", "firstIndex": 4294967145},
{"blockNumber": 12208005, "blockId": "0x052487095cdd4a604808e6c14e30fb68b3fa546d35585b315f287219d38ef77c", "firstIndex": 4362075289},
{"blockNumber": 12272465, "blockId": "0x82c8a50413bd67a0d6f53b085adcd9ae8c25ecc07ed766fa80297a8dcae63b29", "firstIndex": 4429184610},
{"blockNumber": 12329418, "blockId": "0x294c147e48d32c217ff3f27a3c8c989f15eee57a911408ec4c28d4f13a36bb3b", "firstIndex": 4496292968},
{"blockNumber": 12382388, "blockId": "0x8c2555965ff735690d2d94ececc48df4700e079c7b21b8e601a30d4e99bc4b5b", "firstIndex": 4563401809},
{"blockNumber": 12437052, "blockId": "0x2e38362031f36a0f3394da619dcc03be03c19700594cbd1df84c2c476a87de63", "firstIndex": 4630511012},
{"blockNumber": 12490026, "blockId": "0x122749c02a55c9c2a1e69068f54b6c1d25419eb743e3553aba91acf1daeadc35", "firstIndex": 4697619920},
{"blockNumber": 12541747, "blockId": "0xfb9f12aa2902da798ac05fab425434f8c7ce98050d67d416dbb32f98c21f66f7", "firstIndex": 4764728267},
{"blockNumber": 12597413, "blockId": "0x9a7a399c2904ac8d0fec580550525e7e1a73d8f65f739bf7c05d86e389d0d3f7", "firstIndex": 4831837757},
{"blockNumber": 12651950, "blockId": "0xb78dcb572cdafb9c4e2f3863ef518a3b2df0cd4f76faa26a423b2ca0c1cde734", "firstIndex": 4898946491},
{"blockNumber": 12706472, "blockId": "0xfd21f41ec6b0c39287d7d48c134d1212a261c53d65db99739994b003150bbad1", "firstIndex": 4966054796},
{"blockNumber": 12762929, "blockId": "0xc94d994bc40b2ae7dc23cf2b92cc01e84915f090bb57c0d9a67584bd564d3916", "firstIndex": 5033164307},
{"blockNumber": 12816689, "blockId": "0x7770c72f22cbf6ccf7ab85d203088f7ede89632cf0042c690102f926a90bd09d", "firstIndex": 5100273412},
{"blockNumber": 12872408, "blockId": "0x2e008b8c952d828875d777f7912f472af96ffc977f2ceae884006682cab6b8ed", "firstIndex": 5167381625},
{"blockNumber": 12929718, "blockId": "0x85eb0ed3c5910c6a01b65ef0a5b76c59c2cdb5094e6e27eb87c751d77bcc2c88", "firstIndex": 5234491305},
{"blockNumber": 12988757, "blockId": "0xdf12045bea73af18d4e71f8be8e334160f78b85f96a3535a4056409d8b61355a", "firstIndex": 5301600237},
{"blockNumber": 13049172, "blockId": "0xf07608d97a101cd9a95fee9d9062a15bcb333263e555f8cfa31da037e0468f30", "firstIndex": 5368709080},
{"blockNumber": 13108936, "blockId": "0x42739341db582d2f39b91ec9e8cc758777ca3f6ff9f25cd98883619fd5f026a7", "firstIndex": 5435817013},
{"blockNumber": 13175495, "blockId": "0x564f25eacb229350b7c648b5828169e7a0344ae62e866206828e2cfad8947f10", "firstIndex": 5502926476},
{"blockNumber": 13237721, "blockId": "0x0973425abec0fa6319701b46e07c2373b0580e3adbed6900aad27d5bf26dcb95", "firstIndex": 5570035419},
{"blockNumber": 13298771, "blockId": "0xf3a16fec5be808c9f7782fb578dc8cef7f8e2110f7289bd03c0cc13977dd1518", "firstIndex": 5637143840},
{"blockNumber": 13361281, "blockId": "0x3c0b6364201ca9221b61af3de27a3a87e111870b8c7efc43a6d8496e98c68690", "firstIndex": 5704253046},
{"blockNumber": 13421819, "blockId": "0x2f472e57997b95558b99e3e5e7e0e8d4dbf8b71c081aac6536c9ff5925dac2ce", "firstIndex": 5771361231},
{"blockNumber": 13480620, "blockId": "0xc4d689e87464a0c83c661c8e3a0614c370631de857f7e385b161dfe8bacd3e71", "firstIndex": 5838469468},
{"blockNumber": 13535793, "blockId": "0xe7674bacc8edce9fb3efd59b92c97da48fe7ace1de314b4a67d7d032fc3bb680", "firstIndex": 5905578026},
{"blockNumber": 13590588, "blockId": "0x6a3e86bdce7dd7d8792e1af9156edd8c3ffee7c20fed97001f58a9a2699f6594", "firstIndex": 5972687757},
{"blockNumber": 13646707, "blockId": "0xab404a5d3709cf571b04e9493f37116eeb5dd2bc9dc10c48387c1e0199013d69", "firstIndex": 6039797165},
{"blockNumber": 13703025, "blockId": "0x20e2fde15b8fe56f5dd7ab0f324c552038167ed44864bf3978e531ae68d6d138", "firstIndex": 6106905803},
{"blockNumber": 13761024, "blockId": "0x2ae49275e13e780f1d29aea8507b2a708ff7bfe977efac93e050273b8b3a8164", "firstIndex": 6174015107},
{"blockNumber": 13819468, "blockId": "0xb9d19cb31dedb1128b11cad9ffd6e58c70fe7ba65ba68f1ac63668ac5160ad85", "firstIndex": 6241124350},
{"blockNumber": 13877932, "blockId": "0x80b1ff0bb069a8479360a15eaa84ba30da02cfacadc564837f4b1c90478addb8", "firstIndex": 6308232256},
{"blockNumber": 13935384, "blockId": "0xe1f5469a559a6114dd469af61b118b9d9551a69bbd49a4e88f2a2d724830c871", "firstIndex": 6375341632},
{"blockNumber": 13994042, "blockId": "0x25188fb75f2328c870ade7c38ef42ff5fddef9c4e364eebe4c5d8d9cc3ecabab", "firstIndex": 6442449799},
{"blockNumber": 14051123, "blockId": "0xf4ef2bce9ee9222bdcf6b3a0c204676d9345e211e10c983e523930274e041ef1", "firstIndex": 6509559107},
{"blockNumber": 14109189, "blockId": "0x80b730c28f75d8cb5ec2fb736341cd87cb4ecb2c9c614e0a4ecc0f9812675d50", "firstIndex": 6576667347},
{"blockNumber": 14166822, "blockId": "0xf662a24b91684fa8ac462b31071f406de8d6183dba46d30d690f4407bc6af36f", "firstIndex": 6643777079},
{"blockNumber": 14222488, "blockId": "0x7333e324c96b12f11a38d1fc2ddb4860e018b90f5dc10f3dbe19f7679bb95535", "firstIndex": 6710885890},
{"blockNumber": 14277180, "blockId": "0x4373c1000e8e10179657689e2f0e42f88bd1601ecb4a5d83970d10287f6654cc", "firstIndex": 6777994595},
{"blockNumber": 14331080, "blockId": "0x9c708a750a3f284ec0ee950110b36fd488cb1ec24cd0c2ea72c19551ec5c42a5", "firstIndex": 6845103719},
{"blockNumber": 14384243, "blockId": "0x34ce7503b76335aa18dec880b0cefd388a29e0fcff6f2e1ddda8fb8c0ac1daf0", "firstIndex": 6912212376},
{"blockNumber": 14437670, "blockId": "0x79842efd3e406b41f51935fe2e6ad20a7dd5a9db2280ebd7f602ed93da1e3c24", "firstIndex": 6979320543},
{"blockNumber": 14489204, "blockId": "0xcd12addf0afdc229e9fe3bd0a34677a3826c5e78d4baf715f8ed36b736d6627a", "firstIndex": 7046430591},
{"blockNumber": 14541688, "blockId": "0x55f617abf208a73fc467e8cb5feead586b671dbb0f6281570b3c44b8eabb2b9e", "firstIndex": 7113538755},
{"blockNumber": 14594551, "blockId": "0xc7211bf772e93c8c2f945fcb6098b47c3455604cb8b94a505cb5cb720914c369", "firstIndex": 7180646025},
{"blockNumber": 14645065, "blockId": "0x6d5b0326f4b22e2b0196986a514f23ec6e9a62f70f53300a22b21ff661a6ef7e", "firstIndex": 7247756883},
{"blockNumber": 14695926, "blockId": "0x0a77272250e43b4bb46c02eb76944881a3c6b00a21bb9086a8229199bd62d97a", "firstIndex": 7314865843},
{"blockNumber": 14746330, "blockId": "0xd677fdbaf8efb1bfdc138ac6b2bd5d0e890a29acb1f52f40169181ad517b0d31", "firstIndex": 7381974956},
{"blockNumber": 14798546, "blockId": "0xbb277e8623acd2ce2340cf32f6c0ddab70fd95d862287f68a3c37250a70619cd", "firstIndex": 7449082890},
{"blockNumber": 14848230, "blockId": "0x587b39f11bdaa2091291c7c3947e88df2e91e7997f2375dfd43b6e310a538582", "firstIndex": 7516192636},
{"blockNumber": 14897646, "blockId": "0xf5b5c9d0c024ca0c0f0c6171871f609687f4ccb064ededbd61176cf23a9011e8", "firstIndex": 7583299602},
{"blockNumber": 14950782, "blockId": "0x50549486afaf92a4c3520012b325e914ef77a82e4d6530a71f9b1cca31bfae18", "firstIndex": 7650409868},
{"blockNumber": 15004101, "blockId": "0x7edac55dea3ee4308db60b9bc0524836226fe301e085b3ce39105bd145ba7fc3", "firstIndex": 7717517503},
{"blockNumber": 15056903, "blockId": "0xb4cfd02d435718598179cdba3f5c11eb8653fe97ec8d89c60673e3e07b8dfc94", "firstIndex": 7784627997},
{"blockNumber": 15108302, "blockId": "0x53c77a7de4515e9e93467a76f04cc401834bcdd64e9dfa03cf6d2844a6930293", "firstIndex": 7851736988},
{"blockNumber": 15159526, "blockId": "0x1a31ad84b423254d7ff24e7eca54048ed8cc13cec5eb7289bf3f98ed4de9f724", "firstIndex": 7918844431},
{"blockNumber": 15211013, "blockId": "0xe5d491e1d6cc5322454143b915c106be1bf28114a41b054ba5e5cfe0abecafba", "firstIndex": 7985953942},
{"blockNumber": 15264389, "blockId": "0xd9939bb9e58e95d2672c1148b4ec5730204527d3f3fc98ca03a67dc85cf3d710", "firstIndex": 8053063187},
{"blockNumber": 15315862, "blockId": "0x7254f99c4bb05235d5b437984c9132164e33182d4ce11a3847999da5c28b4092", "firstIndex": 8120172147},
{"blockNumber": 15364726, "blockId": "0x11b57547579d9009679e327f57e308fe86856391805bc3c86e7b39daae890f52", "firstIndex": 8187281042},
{"blockNumber": 15412886, "blockId": "0xbe3602b1dbef9015a3ec7968ac7652edf4424934b6bf7b713b99d8556f1d9444", "firstIndex": 8254390023},
{"blockNumber": 15462792, "blockId": "0x3348ca4e14ac8d3c6ac6df676deaf3e3b5e0a11b599f73bd9739b74ebd693efe", "firstIndex": 8321499024},
{"blockNumber": 15509914, "blockId": "0xbc98fd6b71438d5a169f9373172fea799fa3d22a8e6fe648d35e1070f2261113", "firstIndex": 8388606521},
{"blockNumber": 15558748, "blockId": "0x5fa2cf499276ae74a5b8618990e71ed11a063619afe25c01b46e6252eba14c19", "firstIndex": 8455716577},
{"blockNumber": 15604217, "blockId": "0x78a608e13d2eb3c5fed81a19b829ede88071cf01ea9ff58112a7472435f97c30", "firstIndex": 8522825668},
{"blockNumber": 15651869, "blockId": "0xd465d861d925d1475440782ff16c2b3361ba3c8e169d7cc90eb8dfc0f31b0aac", "firstIndex": 8589934080},
{"blockNumber": 15700968, "blockId": "0x71e3def131271e02c06ca945d14a995703a48faac1334a9e2e2321edd0b504d0", "firstIndex": 8657043390},
{"blockNumber": 15762986, "blockId": "0x9b1b51dca2eae29162ca66968a77b45175f134b44aea3defadcb924f83e0b944", "firstIndex": 8724151376},
{"blockNumber": 15814455, "blockId": "0x3c04a509cb6304d3df4bef57e0119d9e615ab737ec0b4a7deada6e5f57d9f873", "firstIndex": 8791260562},
{"blockNumber": 15865639, "blockId": "0x9e9e26148c774518ecf362c0e7c65a5c1b054a8a3e4e36036c70e273fac6147c", "firstIndex": 8858368894},
{"blockNumber": 15920564, "blockId": "0x9efe1d4dbfd9aa891ac0cffd3e1422a27ba2ea4add211b6900a2242cdb0f0ca0", "firstIndex": 8925477950},
{"blockNumber": 15974371, "blockId": "0xc63ccef7bc35a0b431a411f99fe581b322d00cfc6422d078696808a5658a32ac", "firstIndex": 8992587107},
{"blockNumber": 16032913, "blockId": "0x3e60957224964669a8646914e3166553b9f4256d5be160b17995d838af3ef137", "firstIndex": 9059696632},
{"blockNumber": 16091057, "blockId": "0x12b346047bb49063ab6d9e737775924cf05c52114202ddb1a2bdaf9caabbfe0c", "firstIndex": 9126804912},
{"blockNumber": 16150977, "blockId": "0x49318a32ff0ce979c4061c1c34db2a94fb06e7669c93742b75aff14a134fa598", "firstIndex": 9193913896},
{"blockNumber": 16207432, "blockId": "0xf7870865edf81be4389a0be01468da959de703df0d431610814d16ed480176e4", "firstIndex": 9261019778},
{"blockNumber": 16262582, "blockId": "0x25818e0f4d54af6c44ef7b23add34409a47de3ab1c905889478f3ec8ad173ec3", "firstIndex": 9328131320},
{"blockNumber": 16319695, "blockId": "0x25de4b1c18cc503f5d12b4fa9072d33a11fa503a3dbeb9ab3d016b57c1e5cd4d", "firstIndex": 9395240790},
{"blockNumber": 16373605, "blockId": "0x3794a5e0d2aa10baf1e6a5ec623d6089fdd39799eff633017d8df5144526939f", "firstIndex": 9462349509},
{"blockNumber": 16423494, "blockId": "0xe0217d947ba3865dfc9288e0c890b0996457bb9d18467bd125e86bbb0052b57f", "firstIndex": 9529458033},
{"blockNumber": 16474853, "blockId": "0xd454f033d190f22f9e56f0209ea1eeb3b6257805d5d88650d2759eb4d24821b7", "firstIndex": 9596567055},
{"blockNumber": 16525689, "blockId": "0x8a23cbbf3e258e13f5a1ada434366796cb4a3e5b1062455582fb2bc3ab991541", "firstIndex": 9663674943},
{"blockNumber": 16574203, "blockId": "0xc1a5b7d26e8222bd2d56ef4108f75d69f7c116707d348950834e00962241a4f8", "firstIndex": 9730785112},
{"blockNumber": 16622622, "blockId": "0x3ddb3ef7a4309bd788258fb0d62613c89a0b4de715f4e12f6017a194d19d6481", "firstIndex": 9797893665},
{"blockNumber": 16672585, "blockId": "0x8aa5e9f72b261f9e2a9eb768483d1bbd84d3a88fdb1346f6a9a7f262fd28ba41", "firstIndex": 9865002893},
{"blockNumber": 16720124, "blockId": "0x2128f8baf264166e37554d5c31a06de58d9ccfb663117358251da548a23a060f", "firstIndex": 9932111275},
{"blockNumber": 16769162, "blockId": "0x6b3e849482d3222032740ad6b8f98e24636c82682a6a3572b1ef76dfebc66821", "firstIndex": 9999217824},
{"blockNumber": 16818311, "blockId": "0xe45f57381978a2bfc85bd20af1c41e2b630412642ac4f606b477f05f030ef5d9", "firstIndex": 10066328668},
{"blockNumber": 16869531, "blockId": "0xa154555266d24dc1f4885af5fafcf8cab3de788998cf69e1d28f56aa13a40c43", "firstIndex": 10133437302},
{"blockNumber": 16921611, "blockId": "0xf1f829b4ab5eec6e243916dd530993fa11eef5510fd730e8d09ead6b380355a1", "firstIndex": 10200547185},
{"blockNumber": 16974870, "blockId": "0x1a33202b95926ae4cb8e6e99d8d150f3c50d817b3a316452bdf428c971dabde5", "firstIndex": 10267655914},
{"blockNumber": 17031277, "blockId": "0x706c9dd0dc81e7ac29d2ea0f826e6b8a1dcb5adb1b904ff6e43260729c9fd0a7", "firstIndex": 10334764934},
{"blockNumber": 17086330, "blockId": "0x085a80cafe96b520105b9a1f8e7a2bbc9474da24da7e6344ca7c4d32db822f92", "firstIndex": 10401871892},
{"blockNumber": 17141311, "blockId": "0x33ec6513dfa515bc5f6356476b4eb075a8064181d6aaf6aa1a1e18887e342f74", "firstIndex": 10468982364},
{"blockNumber": 17190907, "blockId": "0x6f41273d3bf30d3347e7eb68872a49b3ac947f314543478be7a28a55e5c41a3c", "firstIndex": 10536090817},
{"blockNumber": 17237199, "blockId": "0x9a87a14a128c0345a366940f821a14f16719de628658ac0628e410a72d723e90", "firstIndex": 10603200178},
{"blockNumber": 17287181, "blockId": "0x9c6e78adcf562ac63c103e3e5a02f025023079aca79bdd6ef18f7bd2a6271c29", "firstIndex": 10670309183},
{"blockNumber": 17338652, "blockId": "0x1b747da97b2397a293602af57514dab4ca1010bb6c601ff05cb2012dd1124ebb", "firstIndex": 10737418023},
{"blockNumber": 17389337, "blockId": "0xbc3c0ca1e5989605b9b59c94b418562eb17ccbce30e45ac8531cf0b3867a6b2c", "firstIndex": 10804522857},
{"blockNumber": 17442261, "blockId": "0x1ec341be1cbd09f559bfa3d3e39a341d8e21052eeb7880931d43d086651733b7", "firstIndex": 10871635535},
{"blockNumber": 17497787, "blockId": "0x6069880d486f2548599df1e14e12752d3eb9bc99843a98cd6631c22be1b58554", "firstIndex": 10938744657},
{"blockNumber": 17554322, "blockId": "0x69b2564bc00b1f310f6b416912869d7530d7864bf7d70d55c7ace554f129b989", "firstIndex": 11005852829},
{"blockNumber": 17608492, "blockId": "0x7d590653d5fa52c0d3ee453a77d2088504f57adcef35cd57c567afb554608457", "firstIndex": 11072961972},
{"blockNumber": 17664272, "blockId": "0xdc16159d3500cdc7410873102f41fc55de2a8a41e3779c4b70e6224a541e2b9e", "firstIndex": 11140070967},
{"blockNumber": 17715101, "blockId": "0x655e33c4e81182464ea0b0e1fdbc53ce53902431db5107326b816091a4564652", "firstIndex": 11207179487},
{"blockNumber": 17764042, "blockId": "0x54439184f31cd83ba06b48b6dbfdd744ae7246355be1327b44744058711d05c0", "firstIndex": 11274287303},
{"blockNumber": 17814383, "blockId": "0xfb453bc951360c76fb09bb1b9a3e39d23ececa0adb93368cc3f41f0457845089", "firstIndex": 11341397984},
{"blockNumber": 17864648, "blockId": "0x32a68823ef4ec0cbab2fe50c97e3f462b575e8b117da40d00c710b4c66ee1d6d", "firstIndex": 11408505657},
{"blockNumber": 17913366, "blockId": "0x04b944aab8a4ff91b77c2191817cf051766100c227616a3746af53407e740124", "firstIndex": 11475614351},
{"blockNumber": 17961690, "blockId": "0x08bee7cc0b764106ca01dd5370b617879487ffb423688c96e948dce125990f45", "firstIndex": 11542723488},
{"blockNumber": 18011048, "blockId": "0x94c39d3a64f3e9a91b1d98554cd29e1390e30fa61cfa4e909c503eee2fd9f165", "firstIndex": 11609833142},
{"blockNumber": 18061209, "blockId": "0x2ee9ade68955c030488c8a30537bdf948355f7dd5ae64942b5bfce1be6650e19", "firstIndex": 11676941316},
{"blockNumber": 18111692, "blockId": "0xd6c4fd0c1cc20ed5e7960bb5043e9e5e9c66a4d2ec5709ac9797fff678435640", "firstIndex": 11744050346},
{"blockNumber": 18166212, "blockId": "0x3262588c2ef79a3b3f6a3db6435202d22f5667cd48c136b0797404901525c9ff", "firstIndex": 11811159686},
{"blockNumber": 18218743, "blockId": "0x935bd9a4164ff7ecd09a37b916ce5bf78487bd19377b5b17be153e39318aee74", "firstIndex": 11878268593},
{"blockNumber": 18271236, "blockId": "0xe58ebb821f27e3665898f390802a3d129d217b3a3ee36d890a85cf22a0a8aa33", "firstIndex": 11945376750},
{"blockNumber": 18323007, "blockId": "0x3997a841468efa1bc614bfc3de4502274901b04b428f87a1f3086dfd78cda1eb", "firstIndex": 12012485748},
{"blockNumber": 18372443, "blockId": "0xc44a13a5d02e8dc39f355de5e21ce7bb311ce7f4d9114ff480dce235a169e416", "firstIndex": 12079595370},
{"blockNumber": 18421829, "blockId": "0x7da63a0b613d8745597b2ac64fd5cc8b2fb14b24d163b12a0a39d7d3d4ff7b5c", "firstIndex": 12146703582},
{"blockNumber": 18471706, "blockId": "0xd632a1893f415ff618f4b612a7687e6af1f12feeed81f46f0022090829c1eb4c", "firstIndex": 12213812677},
{"blockNumber": 18522301, "blockId": "0x44fa2cf08145ae40e8e42f4e6b4ab7df360a17c5a065ce45fcc41b51bee011f4", "firstIndex": 12280921639},
{"blockNumber": 18572935, "blockId": "0x72b8ab4c78c90425ee054b4806a8be703da0febdf1d51866358ec2bd21ba9529", "firstIndex": 12348029751},
{"blockNumber": 18623431, "blockId": "0x8c4cb2f13501d9788820280c6f16692d0737258c3896f1e4bded32d838febf7f", "firstIndex": 12415138965},
{"blockNumber": 18675470, "blockId": "0x523b73c19ea8b3ae32ef141a83ef9855e667ebf51443cfcabd1a06659359062a", "firstIndex": 12482247454},
{"blockNumber": 18725728, "blockId": "0x0cfbd131eb5dad51488238079fba29a63eebb5c32d1a543cb072e48dc2104ef3", "firstIndex": 12549356369},
{"blockNumber": 18778387, "blockId": "0xc4906c77af8058b9f172a4f0e8788c7887f05caa5ac752b38b5387080f74ae49", "firstIndex": 12616465992},
{"blockNumber": 18835044, "blockId": "0x49c5e07f409a841dc81f3ef8417f1951f8fcc13c90134f9d2a0cd11938f9fa36", "firstIndex": 12683575082},
{"blockNumber": 18883308, "blockId": "0x386a58dd5f79a419eeb05075b07b3ff3bc836a265c9688854a504223b1d6a830", "firstIndex": 12750683753},
{"blockNumber": 18933635, "blockId": "0xd3881292147589bd2e192769e5c9175b5d03a453fe1ef3c4b5b6858ac9402a2f", "firstIndex": 12817792470},
{"blockNumber": 18988254, "blockId": "0xcbe72dfa15428ac21b9c59c703ceaa0eb4b2205927687261d7aaed3dbb3783ea", "firstIndex": 12884882858},
{"blockNumber": 19041325, "blockId": "0x92b077e1c2f8819da728f0307c914fdcd57eba14ea07d9a45c28d1ed8ffff576", "firstIndex": 12952010530},
{"blockNumber": 19089163, "blockId": "0x43f8ab2d3dfc34c8e18cba903074d54e235dc546f19c4eb78245a522c266c84e", "firstIndex": 13019119228},
{"blockNumber": 19140629, "blockId": "0xab7b7ae5424b18105a13b657fa6099d4ab67fde5baff39fe6e4de707397e995c", "firstIndex": 13086228236},
{"blockNumber": 19192118, "blockId": "0x451327e6a5cf6ce1c8c14c01687dc5f719f3c2176f46bac4f264616256e30d1c", "firstIndex": 13153337116},
{"blockNumber": 19237836, "blockId": "0x9b260d6be369557d1dc88aca423e2697e697d941d1b726c183015b5649e248c8", "firstIndex": 13220445421},
{"blockNumber": 19291271, "blockId": "0x4878c28d79e1f71bc11e062eb61cb52ae6a18b670b0f9bea38b477944615078e", "firstIndex": 13287554254},
{"blockNumber": 19344448, "blockId": "0x56243b9ad863bf90953fe9aa6e64a426629384db1190e70dce79575d30595f7e", "firstIndex": 13354663659},
{"blockNumber": 19394948, "blockId": "0x195173b64dda7908d6aa39a63c8bdd29ec181d401e369d513be1308550d0ddcb", "firstIndex": 13421771935},
{"blockNumber": 19443075, "blockId": "0xd39c1d60996475e65d1ab5b4e755f510ca466564a8155d35db6667988d6c0e44", "firstIndex": 13488880427},
{"blockNumber": 19488383, "blockId": "0x28956eb8856fa8db59c02585016b8baf43bc44bc35b00bdaf8a6babe51101c5c", "firstIndex": 13555977105},
{"blockNumber": 19534584, "blockId": "0x2421c97b0f140185d4c20943cd4ed7d7424468482feb76e3003a1cc69da3fd7b", "firstIndex": 13623097580},
{"blockNumber": 19579602, "blockId": "0x25f96529028e9f51c59aec9ce8de282b7dd67066fd46a1694130698ed0f40d8b", "firstIndex": 13690207623},
{"blockNumber": 19621517, "blockId": "0x4f6f6e0a0488f3d51823bc4b07c292348c259b1866968f77ee76b66b37101c75", "firstIndex": 13757315529},
{"blockNumber": 19665085, "blockId": "0x00f9315f89681b44bff46f1bad8894bc6dfae1c459d3d6520f9881861304a496", "firstIndex": 13824425382},
{"blockNumber": 19709229, "blockId": "0x24e022b21ae1ba8a3e8c87cb9734aa1d1810fc4a69fe147d3ebb1ff0df8bcc15", "firstIndex": 13891534799},
{"blockNumber": 19755387, "blockId": "0x77f184b7183b1a351760d242041249464b42cfaa6fbc4326f352b06bb3b21a02", "firstIndex": 13958642483},
{"blockNumber": 19803894, "blockId": "0xf37eb1b054a6d61272940361f386eb744cded84d15c3250a7eabadede257371c", "firstIndex": 14025751618},
{"blockNumber": 19847885, "blockId": "0x4659649fa8a3b4bbe8978673ba9a22ae20352c7052b676d373b5a51b1967ffa4", "firstIndex": 14092848654},
{"blockNumber": 19894193, "blockId": "0x15606bdc0f1a710bd69443c7154d4979aece9329977b65990c9b39d6df84ed5c", "firstIndex": 14159970181},
{"blockNumber": 19938551, "blockId": "0x6a8f4571924ed902bd8e71bf8ed9cc9d72cabeabc410277c8f0fc2b477d00eb7", "firstIndex": 14227077892},
{"blockNumber": 19985354, "blockId": "0x7b6fb6376410b4d9e5d7ee02f78b2054e005dd2976eea47fc714f66b967dc285", "firstIndex": 14294187965},
{"blockNumber": 20028440, "blockId": "0x9b37440b71c24756b8855b8012432b84276ae94c80aa1ccc8b70a7705992103c", "firstIndex": 14361296503},
{"blockNumber": 20071780, "blockId": "0xa2ed129f343f3d60419772ec5635edcd36b8680c9419b6626e2bc84b230c709b", "firstIndex": 14428405230},
{"blockNumber": 20113832, "blockId": "0xe7a610e8bcbf8ded141ebc7142de03dfc54b1bcc79e3bf8d07fad4e42b665bba", "firstIndex": 14495512019},
{"blockNumber": 20156854, "blockId": "0xbe09704f65a70ef8843d9c8e511ddc989ea139dbe94cdfe37f52b03620d62385", "firstIndex": 14562622430},
{"blockNumber": 20200135, "blockId": "0x9a58c34d5f77342e94065d119905c000223cd988c4b11f1539fff20737159630", "firstIndex": 14629731923},
{"blockNumber": 20244389, "blockId": "0x1e733f0db9ef21183107259b3c2408c78fa5a01469928cd295f3ea7e8eedda45", "firstIndex": 14696840011},
{"blockNumber": 20288489, "blockId": "0xb5ad7edd86b181226c8c7be0a08069e3955234e797426843fff9de0f57ec59cc", "firstIndex": 14763949714},
{"blockNumber": 20333582, "blockId": "0x8040c209f5cd1738ee0f85c2f1db7c43a420d148680c7390fd1701b9f0bb671a", "firstIndex": 14831058335},
{"blockNumber": 20377087, "blockId": "0x08fdc4cd246b6ae9d4a45646b0aed6af3bb330eb6cd4c8b93646157e7b002b84", "firstIndex": 14898167722},
{"blockNumber": 20421699, "blockId": "0x5a2912b5fc2f02df33b655155990f92dcaacda5b75427fe3d87fb38f36b1c17d", "firstIndex": 14965275691},
{"blockNumber": 20467194, "blockId": "0x3deaf4325c461004b090b0261996c645ab529c1471feaf7dc2bbe1f128180297", "firstIndex": 15032385211},
{"blockNumber": 20512397, "blockId": "0x37e39697ec1b7683a6202be250ffaee7a1102e8030f87550b94af05ec66cec83", "firstIndex": 15099493973},
{"blockNumber": 20557443, "blockId": "0x8e9c04468f3111eab8b1f6a58b277862c624861c237cadecc53ec249bd811bda", "firstIndex": 15166602882},
{"blockNumber": 20595899, "blockId": "0x9787555fe57e4650002257eb2c88f1ef435b99d406e33fe2f889be180123ef25", "firstIndex": 15233709908},
{"blockNumber": 20638606, "blockId": "0x70681cffd159ce2e580dbbbe8fa6b5343dbcb081429cdda6c577e615bef4ef05", "firstIndex": 15300820678},
{"blockNumber": 20683605, "blockId": "0xb32662d5e241132ffe2249caea67f5746a6f4382297b2ac87c81e2794faf1f7a", "firstIndex": 15367929350},
{"blockNumber": 20728630, "blockId": "0x15a817c846928b673032d5eacd0cff7a04217d268457aa30a322ecca32be4d49", "firstIndex": 15435037830},
{"blockNumber": 20771519, "blockId": "0x542bc7b9804bbc45f4be470f4dc56f215a4dec71fed71eba2ffc804afd262b95", "firstIndex": 15502145990},
{"blockNumber": 20815097, "blockId": "0x798cdd51c964fcf18561d70095d9613b84ba836817972799c9dfd0bfbe1e042b", "firstIndex": 15569256033},
{"blockNumber": 20857859, "blockId": "0xfb5bb066d419a651d8e0186569eb4e8d8bcd5181d8f02e0d578b5dfe2fc738dd", "firstIndex": 15636364671},
{"blockNumber": 20896890, "blockId": "0x834b8d6fad779e4cf8214128f6c93d7387b6d6279e517f6f0a284b5d831cc3ae", "firstIndex": 15703472902},
{"blockNumber": 20939387, "blockId": "0x7adee7c78420c711efa216c61e0b561e581d7ff0331efd91ee16a609b34cfdc2", "firstIndex": 15770582325},
{"blockNumber": 20981303, "blockId": "0x6f5d7b0cc6dad5eb258176e07de21795a8347d68f7303f06934046e0236bea6d", "firstIndex": 15837691713},
{"blockNumber": 21023216, "blockId": "0x96cfe35a45df1297a36f42c59ebe706ab0473dfbf59ce910b5c5a8dbf696de1c", "firstIndex": 15904799667},
{"blockNumber": 21068378, "blockId": "0x93753875ff330d922b23f823203198f3b1bb8833367c6b6a8f896ff54be2c12d", "firstIndex": 15971909040},
{"blockNumber": 21112445, "blockId": "0x6ac02fa6ae486b86aba562eaf6f3d883befaa8ebedcfd8d74bdb7368d42deee3", "firstIndex": 16039003625},
{"blockNumber": 21155992, "blockId": "0x25f76896b4b693bafb79e9a535e2bf00ed62a577e35209749346e8e79a60bb71", "firstIndex": 16106126344},
{"blockNumber": 21200962, "blockId": "0x725f2befe913cb2659d262e2d3b6f79a706b31c557d52669471da22347ec8287", "firstIndex": 16173235265},
{"blockNumber": 21244663, "blockId": "0x6778c4194f54e70939da38853daddb22bfaf160d35617ab05d0f5c476741147b", "firstIndex": 16240344735},
{"blockNumber": 21290273, "blockId": "0x433ac819c40bd3061205fe0ece0645eec73f54a0a5c1559c981f983345bc0154", "firstIndex": 16307453543},
{"blockNumber": 21336156, "blockId": "0x261dc8c1639d505624150d2388d15ed10bfb4c3ce9c0c327a4ec26531689a097", "firstIndex": 16374562466},
{"blockNumber": 21378880, "blockId": "0x5c78b2b70553140dfdfdd4f415b98f88e74f74662315834038fd99042277d917", "firstIndex": 16441671104},
{"blockNumber": 21421613, "blockId": "0x854532f9d1c77627b763f9cbc7099a653d59554ed57fa763bc218834c82955fe", "firstIndex": 16508780351},
{"blockNumber": 21466875, "blockId": "0xb8b83cc62084e948235ef4b5973bf7fd988fa28bcaa72f7d38ad8e50de729618", "firstIndex": 16575888599},
{"blockNumber": 21511942, "blockId": "0xe806a28bc1b7f8cd752c8ceedbe081d49773d4558a9fb95e3357c0c07172522d", "firstIndex": 16642996907},
{"blockNumber": 21550291, "blockId": "0x1f3e26d303e7a2a9b0614f12f62b189da365b3947c5fe2d99ed2711b37fe7daa", "firstIndex": 16710106826},
{"blockNumber": 21592690, "blockId": "0xa1408cfbc693faee4425e8fd9e83a181be535c33f874b56c3a7a114404c4f686", "firstIndex": 16777215566},
{"blockNumber": 21636275, "blockId": "0x704734c2d0351f8ccd38721a9a4b80c063368afaaa857518d98498180a502bba", "firstIndex": 16844323959},
{"blockNumber": 21681066, "blockId": "0x1e738568ed393395c498b109ad61c0286747318aae0364936f19a7b6aba94aef", "firstIndex": 16911433076},
{"blockNumber": 21725592, "blockId": "0xee87b7948e25a7498a247c616a0fbaa27f21b004e11fc56f2a20c03791ed8122", "firstIndex": 16978540993}
]

View file

@ -0,0 +1,63 @@
[
{"blockNumber": 3246675, "blockId": "0x36bf7de9e1f151963088ca3efa206b6e78411d699d2f64f3bf86895294275e0b", "firstIndex": 67107286},
{"blockNumber": 3575582, "blockId": "0x08931012467636d3b67ae187790951daed2bb6423f9cd94e166df787b856788d", "firstIndex": 134217672},
{"blockNumber": 3694264, "blockId": "0x1f35f276a3c78e5942ee285fcbd0c687691853c599a2f5b174ea88f653bc9514", "firstIndex": 201326578},
{"blockNumber": 3725632, "blockId": "0x3bcb264c56c3eeab6c8588145f09dff3fb5f821d9fc1e7b92264b14314dae553", "firstIndex": 268433636},
{"blockNumber": 3795390, "blockId": "0x2d1ef2815bb8e018b275fa65540b98265285016aff12596bd89a3b1442d248eb", "firstIndex": 335542953},
{"blockNumber": 3856683, "blockId": "0x8a9a46d6f53975cd9ec829c3c307a99fb62b8428cefb63ffe06d17143649c3ee", "firstIndex": 402648835},
{"blockNumber": 3869370, "blockId": "0x2e8c04e7e5e96d09260b65d77b1770b4105b0db2ee7d638c48f086b8afac17db", "firstIndex": 469759276},
{"blockNumber": 3938357, "blockId": "0xf20f2cdbcc412d5340e31955d14a6526ea748ba99b5ec70b6615bdb18bcd4cfb", "firstIndex": 536868027},
{"blockNumber": 3984894, "blockId": "0x0bcd886b3cebb884d5beeaf5ad15ee1514968b5ad07177297c7d9c00f27aa406", "firstIndex": 603968430},
{"blockNumber": 4002664, "blockId": "0x7d3575b6ca685468fa5a5fa9ff9d5fac4415b0a67a3ed87d3530f127db32fff4", "firstIndex": 671088417},
{"blockNumber": 4113187, "blockId": "0x3a5313ac5b602134bb73535b22801261e891ccb7bd660ab20e0a536dc46d3e13", "firstIndex": 738197016},
{"blockNumber": 4260758, "blockId": "0xe30fb9a304d3602896a5716d310f67ba34ccef7f809a3ead4b2d991cb9ee4eb0", "firstIndex": 805306270},
{"blockNumber": 4391131, "blockId": "0x3958478c1c3be9b7caedbcc96230ed446d711e56580e324bc2fcf903fc87c90f", "firstIndex": 872415115},
{"blockNumber": 4515650, "blockId": "0x46a3a7b97a9dff4ef4dc2c1cc5cd501f2182d9548655b77b5e07a2dbb41071a4", "firstIndex": 939523930},
{"blockNumber": 4634818, "blockId": "0x2197d0dd3925c1d7ba3e2c4eef20035b68efc0a2506f76ddd9e481e0ce8ca6e1", "firstIndex": 1006628557},
{"blockNumber": 4718295, "blockId": "0xcce7bb4af1a41e6056ef68192e60c738be01ac3e071ed1ec52cead08a39995ce", "firstIndex": 1073734698},
{"blockNumber": 4753438, "blockId": "0xa60e043728a369cdf39a399bd7a903085ee9386f38176947578e5692b4b01f65", "firstIndex": 1140843192},
{"blockNumber": 4786522, "blockId": "0x10629cadc00e65f193fa4d10ecd2bf1855e442814c4a409d19aae9eb895dce13", "firstIndex": 1207956586},
{"blockNumber": 4811706, "blockId": "0xf1e94111f0086733bdcb4a653486a8b94ec998b61dda0af0fd465c9b4e344f87", "firstIndex": 1275058221},
{"blockNumber": 4841796, "blockId": "0xa530f7dd72881ac831affdc579c9d75f6d4b6853b1f1894d320bd9047df5f9eb", "firstIndex": 1342177155},
{"blockNumber": 4914835, "blockId": "0xbd8321e354f72c4190225f8ed63d4aba794b3b568677d985e099cb62d9d36bae", "firstIndex": 1409286143},
{"blockNumber": 4992519, "blockId": "0x4a06a5a4aa5bc52151937cc1c0f8da691a0282e94aab8b73b9faa87da8d028de", "firstIndex": 1476384367},
{"blockNumber": 5088668, "blockId": "0xb7d5ee03c08ed3936348eeb3931be8f804e61f2b09debf305967c6a7bbf007e0", "firstIndex": 1543502599},
{"blockNumber": 5155029, "blockId": "0x84f590dfc2e11f1ca53c1757ac3c508d56f55ee24d6ca5501895974be4250d76", "firstIndex": 1610605837},
{"blockNumber": 5204413, "blockId": "0xeaf2c3fb6f927c16d38fab08b34303867b87470558612404c7f9e3256b80c5b9", "firstIndex": 1677720841},
{"blockNumber": 5269957, "blockId": "0x596e0b2e8e4c18c803b61767320fe32c063153d870c94e4a08e9a68cbaa582a9", "firstIndex": 1744825147},
{"blockNumber": 5337678, "blockId": "0x7b2d54f8af1ecaaaab994e137d4421d8236c1c10d9a7bdcb9e5500db7a3fe9a3", "firstIndex": 1811939316},
{"blockNumber": 5399058, "blockId": "0xb61ef16d55c96682fb62b0110a2dbc50d8eff2526be4121ece3690700611c71b", "firstIndex": 1879046044},
{"blockNumber": 5422707, "blockId": "0xdabcab7c0cc9cb9f22f7507a1076c87831cb1afed9d0aa5bcd93f22266720c91", "firstIndex": 1946156915},
{"blockNumber": 5454264, "blockId": "0xe1bde812906605ce662f5fd9f01b49c7331fb25f52ab5b12d35ea2b4da5458fe", "firstIndex": 2013259168},
{"blockNumber": 5498898, "blockId": "0x9533d9c5353d22f8a235e95831cfbf4d5a7220a430ca23494526a9d3aa866fe8", "firstIndex": 2080374321},
{"blockNumber": 5554801, "blockId": "0xe7b320bbecb19f1e99dd6ce4aed1efc754d7b2022e1f80389e8a21413c465f55", "firstIndex": 2147476253},
{"blockNumber": 5594725, "blockId": "0xce6750be4a5b3e0fe152dd02308e94f7d56b254852a7e9acef6e14105053d7d1", "firstIndex": 2214591591},
{"blockNumber": 5645198, "blockId": "0x5d42d39999c546f37001d5f613732fb54032384dd71a686d3664d2c8a1337752", "firstIndex": 2281696503},
{"blockNumber": 5687659, "blockId": "0x3ed941be39a33ffa69cf3531a67f5a25f712ba05db890ff377f60d26842e4b1c", "firstIndex": 2348801751},
{"blockNumber": 5727823, "blockId": "0xaf699b6c4cd58181bd609a66990b8edb5d1b94d5ff1ab732ded35ce7b8373353", "firstIndex": 2415917178},
{"blockNumber": 5784505, "blockId": "0x621c740d04ea41f70a2f0537e21e5b96169aea8a8efee0ae5527717e5c40aa64", "firstIndex": 2483024581},
{"blockNumber": 5843958, "blockId": "0xec122204a4e4698748f55a1c9f8582c46bacda029aee4de1a234e67e3288e6b1", "firstIndex": 2550136761},
{"blockNumber": 5906359, "blockId": "0x8af5ce73fbd7a6110fb8b19b75a7322456ece88fcfa1614c745f1a65f4e915c1", "firstIndex": 2617245617},
{"blockNumber": 5977944, "blockId": "0xbc8186258298a4f376124989cfb7b22c2bea6603a5245bb6c505c5fc45844bbd", "firstIndex": 2684350982},
{"blockNumber": 6051571, "blockId": "0x54f9df9d9d73d1aa1cfcd6f372377c6013ecba2a1ed158d3c304f4fca51dae58", "firstIndex": 2751463209},
{"blockNumber": 6118461, "blockId": "0xfea757fad3f763c62a514a9c24924934539ca56620bd811f83e9cc2e671f0cf0", "firstIndex": 2818572283},
{"blockNumber": 6174385, "blockId": "0x2d8d0226e58f7516c13f9e1c9cf3ea65bb520fa1dfd7249dc9ea34a4e1fd430d", "firstIndex": 2885681036},
{"blockNumber": 6276318, "blockId": "0xa922e9d54fd062b658c4866ed218632ddd51f250d671628a42968bb912d3ed5d", "firstIndex": 2952789983},
{"blockNumber": 6368452, "blockId": "0x8d3d7466a7c9ca7298f82c37c38b0f64ec04522d2ed2e2349f8edc020c57f2c4", "firstIndex": 3019898695},
{"blockNumber": 6470810, "blockId": "0x9887c35542835ee81153fa0e4d8a9e6f170b6e14fc78d8c7f3d900d0a70434f1", "firstIndex": 3087007578},
{"blockNumber": 6553334, "blockId": "0x7b0d89a0282c18785fcc108dbdc9d45dd9d63b7084ddc676df9e9504585a5969", "firstIndex": 3154115987},
{"blockNumber": 6663825, "blockId": "0xff6cec99324a89d6d36275c17a4569f0cba203fe5b0350f155a7d5445e0ed419", "firstIndex": 3221224775},
{"blockNumber": 6767082, "blockId": "0xe10a96a7194f98bf262f0cb1cdfb4d3b9a2072139dfcbe3f1eb01419e353044e", "firstIndex": 3288334139},
{"blockNumber": 6886709, "blockId": "0x20f6a5d986913025ad5b6b6387d818e49a3caf838326f4002c1439ca61313be5", "firstIndex": 3355442979},
{"blockNumber": 6978948, "blockId": "0xd7c3024765245ec49e6a48b076d540bc91f57f2ccc125e17d60dd37bb669f843", "firstIndex": 3422551908},
{"blockNumber": 7098891, "blockId": "0x05114c037e1b4d69a46d74a974be9bce45e87ad2226a59b44dd17f98dd2fd0d1", "firstIndex": 3489659530},
{"blockNumber": 7203157, "blockId": "0xc0f610014fcd9f2850274b58179d474f0947676fd0639b2884316467c631811d", "firstIndex": 3556769512},
{"blockNumber": 7256735, "blockId": "0x0324c15b3b23fd82c2962dd167618e77e60ebeac5a2c87f672caddc9732337b3", "firstIndex": 3623876508},
{"blockNumber": 7307851, "blockId": "0x8e23280d1a3aec877d7758413ed20299d381aa43e7e2fc6f381ad96e8ff0acef", "firstIndex": 3690987098},
{"blockNumber": 7369389, "blockId": "0xbf6436eb2b88539945d6673141a14cb79ffc1e7db2b57176acf8e02ff3b6fcd3", "firstIndex": 3758096287},
{"blockNumber": 7445220, "blockId": "0x147619f74815283d834ac08ff494fb4791207b3949c64b2623f11ff6141ee7a7", "firstIndex": 3825204992},
{"blockNumber": 7511632, "blockId": "0x5094d64868f419e6ac3d253d19d5feda76564a0d56d7bbf8a822dff1c2261b30", "firstIndex": 3892314047},
{"blockNumber": 7557280, "blockId": "0x54aba9351a1ba51873645221aa7c991024da1fe468a600ddb6e2559351d9c28f", "firstIndex": 3959422859},
{"blockNumber": 7606304, "blockId": "0xbbe2fed08cf0b0ff2cb6ae9fd7257843f77a04a7d4cafb06d7a4bedea6ab0c98", "firstIndex": 4026531690}
]

View file

@ -0,0 +1,726 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"bytes"
"errors"
"fmt"
"os"
"slices"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/log"
)
const (
cachedLastBlocks = 1000 // last block of map pointers
cachedLvPointers = 1000 // first log value pointer of block pointers
cachedBaseRows = 100 // groups of base layer filter row data
cachedFilterMaps = 3 // complete filter maps (cached by map renderer)
cachedRenderSnapshots = 8 // saved map renderer data at block boundaries
)
// FilterMaps is the in-memory representation of the log index structure that is
// responsible for building and updating the index according to the canonical
// chain.
//
// Note that FilterMaps implements the same data structure as proposed in EIP-7745
// without the tree hashing and consensus changes:
// https://eips.ethereum.org/EIPS/eip-7745
type FilterMaps struct {
// If disabled is set, log indexing is fully disabled.
// This is configured by the --history.logs.disable Geth flag.
// We chose to implement disabling this way because it requires less special
// case logic in eth/filters.
disabled bool
closeCh chan struct{}
closeWg sync.WaitGroup
history uint64
exportFileName string
Params
db ethdb.KeyValueStore
// fields written by the indexer and read by matcher backend. Indexer can
// read them without a lock and write them under indexLock write lock.
// Matcher backend can read them under indexLock read lock.
indexLock sync.RWMutex
indexedRange filterMapsRange
indexedView *ChainView // always consistent with the log index
// also accessed by indexer and matcher backend but no locking needed.
filterMapCache *lru.Cache[uint32, filterMap]
lastBlockCache *lru.Cache[uint32, lastBlockOfMap]
lvPointerCache *lru.Cache[uint64, uint64]
baseRowsCache *lru.Cache[uint64, [][]uint32]
// the matchers set and the fields of FilterMapsMatcherBackend instances are
// read and written both by exported functions and the indexer.
// Note that if both indexLock and matchersLock needs to be locked then
// indexLock should be locked first.
matchersLock sync.Mutex
matchers map[*FilterMapsMatcherBackend]struct{}
// fields only accessed by the indexer (no mutex required).
renderSnapshots *lru.Cache[uint64, *renderedMap]
startedHeadIndex, startedTailIndex, startedTailUnindex bool
startedHeadIndexAt, startedTailIndexAt, startedTailUnindexAt time.Time
loggedHeadIndex, loggedTailIndex bool
lastLogHeadIndex, lastLogTailIndex time.Time
ptrHeadIndex, ptrTailIndex, ptrTailUnindexBlock uint64
ptrTailUnindexMap uint32
targetView *ChainView
matcherSyncRequest *FilterMapsMatcherBackend
finalBlock, lastFinal uint64
lastFinalEpoch uint32
stop bool
targetViewCh chan *ChainView
finalBlockCh chan uint64
blockProcessingCh chan bool
blockProcessing bool
matcherSyncCh chan *FilterMapsMatcherBackend
waitIdleCh chan chan bool
tailRenderer *mapRenderer
// test hooks
testDisableSnapshots, testSnapshotUsed bool
}
// filterMap is a full or partial in-memory representation of a filter map where
// rows are allowed to have a nil value meaning the row is not stored in the
// structure. Note that therefore a known empty row should be represented with
// a zero-length slice.
// It can be used as a memory cache or an overlay while preparing a batch of
// changes to the structure. In either case a nil value should be interpreted
// as transparent (uncached/unchanged).
type filterMap []FilterRow
// copy returns a copy of the given filter map. Note that the row slices are
// copied but their contents are not. This permits extending the rows further
// (which happens during map rendering) without affecting the validity of
// copies made for snapshots during rendering.
func (fm filterMap) copy() filterMap {
c := make(filterMap, len(fm))
copy(c, fm)
return c
}
// FilterRow encodes a single row of a filter map as a list of column indices.
// Note that the values are always stored in the same order as they were added
// and if the same column index is added twice, it is also stored twice.
// Order of column indices and potential duplications do not matter when searching
// for a value but leaving the original order makes reverting to a previous state
// simpler.
type FilterRow []uint32
// Equal returns true if the given filter rows are equivalent.
func (a FilterRow) Equal(b FilterRow) bool {
return slices.Equal(a, b)
}
// filterMapsRange describes the rendered range of filter maps and the range
// of fully rendered blocks.
type filterMapsRange struct {
initialized bool
headBlockIndexed bool
headBlockDelimiter uint64 // zero if afterLastIndexedBlock != targetBlockNumber
// if initialized then all maps are rendered between firstRenderedMap and
// afterLastRenderedMap-1
firstRenderedMap, afterLastRenderedMap uint32
// if tailPartialEpoch > 0 then maps between firstRenderedMap-mapsPerEpoch and
// firstRenderedMap-mapsPerEpoch+tailPartialEpoch-1 are rendered
tailPartialEpoch uint32
// if initialized then all log values belonging to blocks between
// firstIndexedBlock and afterLastIndexedBlock are fully rendered
// blockLvPointers are available between firstIndexedBlock and afterLastIndexedBlock-1
firstIndexedBlock, afterLastIndexedBlock uint64
}
// hasIndexedBlocks returns true if the range has at least one fully indexed block.
func (fmr *filterMapsRange) hasIndexedBlocks() bool {
return fmr.initialized && fmr.afterLastIndexedBlock > fmr.firstIndexedBlock
}
// lastBlockOfMap is used for caching the (number, id) pairs belonging to the
// last block of each map.
type lastBlockOfMap struct {
number uint64
id common.Hash
}
// Config contains the configuration options for NewFilterMaps.
type Config struct {
History uint64 // number of historical blocks to index
Disabled bool // disables indexing completely
// This option enables the checkpoint JSON file generator.
// If set, the given file will be updated with checkpoint information.
ExportFileName string
}
// NewFilterMaps creates a new FilterMaps and starts the indexer.
func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, params Params, config Config) *FilterMaps {
rs, initialized, err := rawdb.ReadFilterMapsRange(db)
if err != nil {
log.Error("Error reading log index range", "error", err)
}
params.deriveFields()
f := &FilterMaps{
db: db,
closeCh: make(chan struct{}),
waitIdleCh: make(chan chan bool),
targetViewCh: make(chan *ChainView, 1),
finalBlockCh: make(chan uint64, 1),
blockProcessingCh: make(chan bool, 1),
history: config.History,
disabled: config.Disabled,
exportFileName: config.ExportFileName,
Params: params,
indexedRange: filterMapsRange{
initialized: initialized,
headBlockIndexed: rs.HeadBlockIndexed,
headBlockDelimiter: rs.HeadBlockDelimiter,
firstIndexedBlock: rs.FirstIndexedBlock,
afterLastIndexedBlock: rs.AfterLastIndexedBlock,
firstRenderedMap: rs.FirstRenderedMap,
afterLastRenderedMap: rs.AfterLastRenderedMap,
tailPartialEpoch: rs.TailPartialEpoch,
},
matcherSyncCh: make(chan *FilterMapsMatcherBackend),
matchers: make(map[*FilterMapsMatcherBackend]struct{}),
filterMapCache: lru.NewCache[uint32, filterMap](cachedFilterMaps),
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](cachedLastBlocks),
lvPointerCache: lru.NewCache[uint64, uint64](cachedLvPointers),
baseRowsCache: lru.NewCache[uint64, [][]uint32](cachedBaseRows),
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRenderSnapshots),
}
// Set initial indexer target.
f.targetView = initView
if f.indexedRange.initialized {
f.indexedView = f.initChainView(f.targetView)
f.indexedRange.headBlockIndexed = f.indexedRange.afterLastIndexedBlock == f.indexedView.headNumber+1
if !f.indexedRange.headBlockIndexed {
f.indexedRange.headBlockDelimiter = 0
}
}
if f.indexedRange.hasIndexedBlocks() {
log.Info("Initialized log indexer",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"first map", f.indexedRange.firstRenderedMap, "last map", f.indexedRange.afterLastRenderedMap-1,
"head indexed", f.indexedRange.headBlockIndexed)
}
return f
}
// Start starts the indexer.
func (f *FilterMaps) Start() {
if !f.testDisableSnapshots && f.indexedRange.initialized && f.indexedRange.headBlockIndexed &&
f.indexedRange.firstRenderedMap < f.indexedRange.afterLastRenderedMap {
// previous target head rendered; load last map as snapshot
if err := f.loadHeadSnapshot(); err != nil {
log.Error("Could not load head filter map snapshot", "error", err)
}
}
f.closeWg.Add(1)
go f.indexerLoop()
}
// Stop ensures that the indexer is fully stopped before returning.
func (f *FilterMaps) Stop() {
close(f.closeCh)
f.closeWg.Wait()
}
// initChainView returns a chain view consistent with both the current target
// view and the current state of the log index as found in the database, based
// on the last block of stored maps.
// Note that the returned view might be shorter than the existing index if
// the latest maps are not consistent with targetView.
func (f *FilterMaps) initChainView(chainView *ChainView) *ChainView {
mapIndex := f.indexedRange.afterLastRenderedMap
for {
var ok bool
mapIndex, ok = f.lastMapBoundaryBefore(mapIndex)
if !ok {
break
}
lastBlockNumber, lastBlockId, err := f.getLastBlockOfMap(mapIndex)
if err != nil {
log.Error("Could not initialize indexed chain view", "error", err)
break
}
if lastBlockNumber <= chainView.headNumber && chainView.getBlockId(lastBlockNumber) == lastBlockId {
return chainView.limitedView(lastBlockNumber)
}
}
return chainView.limitedView(0)
}
// reset un-initializes the FilterMaps structure and removes all related data from
// the database. The function returns true if everything was successfully removed.
func (f *FilterMaps) reset() bool {
f.indexLock.Lock()
f.indexedRange = filterMapsRange{}
f.indexedView = nil
f.filterMapCache.Purge()
f.renderSnapshots.Purge()
f.lastBlockCache.Purge()
f.lvPointerCache.Purge()
f.baseRowsCache.Purge()
f.indexLock.Unlock()
// deleting the range first ensures that resetDb will be called again at next
// startup and any leftover data will be removed even if it cannot finish now.
rawdb.DeleteFilterMapsRange(f.db)
return f.removeDbWithPrefix([]byte(rawdb.FilterMapsPrefix), "Resetting log index database")
}
// init initializes an empty log index according to the current targetView.
func (f *FilterMaps) init() error {
f.indexLock.Lock()
defer f.indexLock.Unlock()
var bestIdx, bestLen int
for idx, checkpointList := range checkpoints {
// binary search for the last matching epoch head
min, max := 0, len(checkpointList)
for min < max {
mid := (min + max + 1) / 2
cp := checkpointList[mid-1]
if cp.BlockNumber <= f.targetView.headNumber && f.targetView.getBlockId(cp.BlockNumber) == cp.BlockId {
min = mid
} else {
max = mid - 1
}
}
if max > bestLen {
bestIdx, bestLen = idx, max
}
}
batch := f.db.NewBatch()
for epoch := range bestLen {
cp := checkpoints[bestIdx][epoch]
f.storeLastBlockOfMap(batch, (uint32(epoch+1)<<f.logMapsPerEpoch)-1, cp.BlockNumber, cp.BlockId)
f.storeBlockLvPointer(batch, cp.BlockNumber, cp.FirstIndex)
}
fmr := filterMapsRange{
initialized: true,
}
if bestLen > 0 {
cp := checkpoints[bestIdx][bestLen-1]
fmr.firstIndexedBlock = cp.BlockNumber + 1
fmr.afterLastIndexedBlock = cp.BlockNumber + 1
fmr.firstRenderedMap = uint32(bestLen) << f.logMapsPerEpoch
fmr.afterLastRenderedMap = uint32(bestLen) << f.logMapsPerEpoch
}
f.setRange(batch, f.targetView, fmr)
return batch.Write()
}
// removeDbWithPrefix removes data with the given prefix from the database and
// returns true if everything was successfully removed.
func (f *FilterMaps) removeDbWithPrefix(prefix []byte, action string) bool {
it := f.db.NewIterator(prefix, nil)
hasData := it.Next()
it.Release()
if !hasData {
return true
}
end := bytes.Clone(prefix)
end[len(end)-1]++
start := time.Now()
var retry bool
for {
err := f.db.DeleteRange(prefix, end)
if err == nil {
log.Info(action+" finished", "elapsed", time.Since(start))
return true
}
if err != leveldb.ErrTooManyKeys {
log.Error(action+" failed", "error", err)
return false
}
select {
case <-f.closeCh:
return false
default:
}
if !retry {
log.Info(action + " in progress...")
retry = true
}
}
}
// setRange updates the indexed chain view and covered range and also adds the
// changes to the given batch.
// Note that this function assumes that the index write lock is being held.
func (f *FilterMaps) setRange(batch ethdb.KeyValueWriter, newView *ChainView, newRange filterMapsRange) {
f.indexedView = newView
f.indexedRange = newRange
f.updateMatchersValidRange()
if newRange.initialized {
rs := rawdb.FilterMapsRange{
HeadBlockIndexed: newRange.headBlockIndexed,
HeadBlockDelimiter: newRange.headBlockDelimiter,
FirstIndexedBlock: newRange.firstIndexedBlock,
AfterLastIndexedBlock: newRange.afterLastIndexedBlock,
FirstRenderedMap: newRange.firstRenderedMap,
AfterLastRenderedMap: newRange.afterLastRenderedMap,
TailPartialEpoch: newRange.tailPartialEpoch,
}
rawdb.WriteFilterMapsRange(batch, rs)
} else {
rawdb.DeleteFilterMapsRange(batch)
}
}
// getLogByLvIndex returns the log at the given log value index. If the index does
// not point to the first log value entry of a log then no log and no error are
// returned as this can happen when the log value index was a false positive.
// Note that this function assumes that the log index structure is consistent
// with the canonical chain at the point where the given log value index points.
// If this is not the case then an invalid result or an error may be returned.
// Note that this function assumes that the indexer read lock is being held when
// called from outside the indexerLoop goroutine.
func (f *FilterMaps) getLogByLvIndex(lvIndex uint64) (*types.Log, error) {
mapIndex := uint32(lvIndex >> f.logValuesPerMap)
if mapIndex < f.indexedRange.firstRenderedMap || mapIndex >= f.indexedRange.afterLastRenderedMap {
return nil, nil
}
// find possible block range based on map to block pointers
lastBlockNumber, _, err := f.getLastBlockOfMap(mapIndex)
if err != nil {
return nil, fmt.Errorf("failed to retrieve last block of map %d containing searched log value index %d: %v", mapIndex, lvIndex, err)
}
var firstBlockNumber uint64
if mapIndex > 0 {
firstBlockNumber, _, err = f.getLastBlockOfMap(mapIndex - 1)
if err != nil {
return nil, fmt.Errorf("failed to retrieve last block of map %d before searched log value index %d: %v", mapIndex, lvIndex, err)
}
}
if firstBlockNumber < f.indexedRange.firstIndexedBlock {
firstBlockNumber = f.indexedRange.firstIndexedBlock
}
// find block with binary search based on block to log value index pointers
for firstBlockNumber < lastBlockNumber {
midBlockNumber := (firstBlockNumber + lastBlockNumber + 1) / 2
midLvPointer, err := f.getBlockLvPointer(midBlockNumber)
if err != nil {
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d while binary searching log value index %d: %v", midBlockNumber, lvIndex, err)
}
if lvIndex < midLvPointer {
lastBlockNumber = midBlockNumber - 1
} else {
firstBlockNumber = midBlockNumber
}
}
// get block receipts
receipts := f.indexedView.getReceipts(firstBlockNumber)
if receipts == nil {
return nil, fmt.Errorf("failed to retrieve receipts for block %d containing searched log value index %d: %v", firstBlockNumber, lvIndex, err)
}
lvPointer, err := f.getBlockLvPointer(firstBlockNumber)
if err != nil {
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d containing searched log value index %d: %v", firstBlockNumber, lvIndex, err)
}
// iterate through receipts to find the exact log starting at lvIndex
for _, receipt := range receipts {
for _, log := range receipt.Logs {
if lvPointer > lvIndex {
// lvIndex does not point to the first log value (address value)
// generated by a log as true matches should always do, so it
// is considered a false positive (no log and no error returned).
return nil, nil
}
if lvPointer == lvIndex {
return log, nil // potential match
}
lvPointer += uint64(len(log.Topics) + 1)
}
}
return nil, nil
}
// getFilterMap fetches an entire filter map from the database.
func (f *FilterMaps) getFilterMap(mapIndex uint32) (filterMap, error) {
if fm, ok := f.filterMapCache.Get(mapIndex); ok {
return fm, nil
}
fm := make(filterMap, f.mapHeight)
for rowIndex := range fm {
var err error
fm[rowIndex], err = f.getFilterMapRow(mapIndex, uint32(rowIndex), false)
if err != nil {
return nil, fmt.Errorf("failed to load filter map %d from database: %v", mapIndex, err)
}
}
f.filterMapCache.Add(mapIndex, fm)
return fm, nil
}
// getFilterMapRow fetches the given filter map row. If baseLayerOnly is true
// then only the first baseRowLength entries are returned.
func (f *FilterMaps) getFilterMapRow(mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) {
baseMapRowIndex := f.mapRowIndex(mapIndex&-f.baseRowGroupLength, rowIndex)
baseRows, ok := f.baseRowsCache.Get(baseMapRowIndex)
if !ok {
var err error
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
if err != nil {
return nil, fmt.Errorf("failed to retrieve filter map %d base rows %d: %v", mapIndex, rowIndex, err)
}
f.baseRowsCache.Add(baseMapRowIndex, baseRows)
}
baseRow := baseRows[mapIndex&(f.baseRowGroupLength-1)]
if baseLayerOnly {
return baseRow, nil
}
extRow, err := rawdb.ReadFilterMapExtRow(f.db, f.mapRowIndex(mapIndex, rowIndex), f.logMapWidth)
if err != nil {
return nil, fmt.Errorf("failed to retrieve filter map %d extended row %d: %v", mapIndex, rowIndex, err)
}
return FilterRow(append(baseRow, extRow...)), nil
}
// storeFilterMapRows stores a set of filter map rows at the corresponding map
// indices and a shared row index.
func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
for len(mapIndices) > 0 {
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength
groupLength := 1
for groupLength < len(mapIndices) && mapIndices[groupLength]&-f.baseRowGroupLength == baseMapIndex {
groupLength++
}
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:groupLength], rowIndex, rows[:groupLength]); err != nil {
return err
}
mapIndices, rows = mapIndices[groupLength:], rows[groupLength:]
}
return nil
}
// storeFilterMapRowsOfGroup stores a set of filter map rows at map indices
// belonging to the same base row group.
func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error {
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength
baseMapRowIndex := f.mapRowIndex(baseMapIndex, rowIndex)
var baseRows [][]uint32
if uint32(len(mapIndices)) != f.baseRowGroupLength { // skip base rows read if all rows are replaced
var ok bool
baseRows, ok = f.baseRowsCache.Get(baseMapRowIndex)
if !ok {
var err error
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength, f.logMapWidth)
if err != nil {
return fmt.Errorf("failed to retrieve filter map %d base rows %d for modification: %v", mapIndices[0]&-f.baseRowGroupLength, rowIndex, err)
}
}
} else {
baseRows = make([][]uint32, f.baseRowGroupLength)
}
for i, mapIndex := range mapIndices {
if mapIndex&-f.baseRowGroupLength != baseMapIndex {
panic("mapIndices are not in the same base row group")
}
baseRow := []uint32(rows[i])
var extRow FilterRow
if uint32(len(rows[i])) > f.baseRowLength {
extRow = baseRow[f.baseRowLength:]
baseRow = baseRow[:f.baseRowLength]
}
baseRows[mapIndex&(f.baseRowGroupLength-1)] = baseRow
rawdb.WriteFilterMapExtRow(batch, f.mapRowIndex(mapIndex, rowIndex), extRow, f.logMapWidth)
}
f.baseRowsCache.Add(baseMapRowIndex, baseRows)
rawdb.WriteFilterMapBaseRows(batch, baseMapRowIndex, baseRows, f.logMapWidth)
return nil
}
// mapRowIndex calculates the unified storage index where the given row of the
// given map is stored. Note that this indexing scheme is the same as the one
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
// same data proximity reasons it is also suitable for database representation.
// See also:
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
func (f *FilterMaps) mapRowIndex(mapIndex, rowIndex uint32) uint64 {
epochIndex, mapSubIndex := mapIndex>>f.logMapsPerEpoch, mapIndex&(f.mapsPerEpoch-1)
return (uint64(epochIndex)<<f.logMapHeight+uint64(rowIndex))<<f.logMapsPerEpoch + uint64(mapSubIndex)
}
// getBlockLvPointer returns the starting log value index where the log values
// generated by the given block are located. If blockNumber is beyond the current
// head then the first unoccupied log value index is returned.
// Note that this function assumes that the indexer read lock is being held when
// called from outside the indexerLoop goroutine.
func (f *FilterMaps) getBlockLvPointer(blockNumber uint64) (uint64, error) {
if blockNumber >= f.indexedRange.afterLastIndexedBlock && f.indexedRange.headBlockIndexed {
return f.indexedRange.headBlockDelimiter, nil
}
if lvPointer, ok := f.lvPointerCache.Get(blockNumber); ok {
return lvPointer, nil
}
lvPointer, err := rawdb.ReadBlockLvPointer(f.db, blockNumber)
if err != nil {
return 0, fmt.Errorf("failed to retrieve log value pointer of block %d: %v", blockNumber, err)
}
f.lvPointerCache.Add(blockNumber, lvPointer)
return lvPointer, nil
}
// storeBlockLvPointer stores the starting log value index where the log values
// generated by the given block are located.
func (f *FilterMaps) storeBlockLvPointer(batch ethdb.Batch, blockNumber, lvPointer uint64) {
f.lvPointerCache.Add(blockNumber, lvPointer)
rawdb.WriteBlockLvPointer(batch, blockNumber, lvPointer)
}
// deleteBlockLvPointer deletes the starting log value index where the log values
// generated by the given block are located.
func (f *FilterMaps) deleteBlockLvPointer(batch ethdb.Batch, blockNumber uint64) {
f.lvPointerCache.Remove(blockNumber)
rawdb.DeleteBlockLvPointer(batch, blockNumber)
}
// getLastBlockOfMap returns the number and id of the block that generated the
// last log value entry of the given map.
func (f *FilterMaps) getLastBlockOfMap(mapIndex uint32) (uint64, common.Hash, error) {
if lastBlock, ok := f.lastBlockCache.Get(mapIndex); ok {
return lastBlock.number, lastBlock.id, nil
}
number, id, err := rawdb.ReadFilterMapLastBlock(f.db, mapIndex)
if err != nil {
return 0, common.Hash{}, fmt.Errorf("failed to retrieve last block of map %d: %v", mapIndex, err)
}
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id})
return number, id, nil
}
// storeLastBlockOfMap stores the number of the block that generated the last
// log value entry of the given map.
func (f *FilterMaps) storeLastBlockOfMap(batch ethdb.Batch, mapIndex uint32, number uint64, id common.Hash) {
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id})
rawdb.WriteFilterMapLastBlock(batch, mapIndex, number, id)
}
// deleteLastBlockOfMap deletes the number of the block that generated the last
// log value entry of the given map.
func (f *FilterMaps) deleteLastBlockOfMap(batch ethdb.Batch, mapIndex uint32) {
f.lastBlockCache.Remove(mapIndex)
rawdb.DeleteFilterMapLastBlock(batch, mapIndex)
}
// deleteTailEpoch deletes index data from the earliest, either fully or partially
// indexed epoch. The last block pointer for the last map of the epoch and the
// corresponding block log value pointer are retained as these are always assumed
// to be available for each epoch.
func (f *FilterMaps) deleteTailEpoch(epoch uint32) error {
f.indexLock.Lock()
defer f.indexLock.Unlock()
firstMap := epoch << f.logMapsPerEpoch
lastBlock, _, err := f.getLastBlockOfMap(firstMap + f.mapsPerEpoch - 1)
if err != nil {
return fmt.Errorf("failed to retrieve last block of deleted epoch %d: %v", epoch, err)
}
var firstBlock uint64
if epoch > 0 {
firstBlock, _, err = f.getLastBlockOfMap(firstMap - 1)
if err != nil {
return fmt.Errorf("failed to retrieve last block before deleted epoch %d: %v", epoch, err)
}
firstBlock++
}
fmr := f.indexedRange
if f.indexedRange.firstRenderedMap == firstMap &&
f.indexedRange.afterLastRenderedMap > firstMap+f.mapsPerEpoch &&
f.indexedRange.tailPartialEpoch == 0 {
fmr.firstRenderedMap = firstMap + f.mapsPerEpoch
fmr.firstIndexedBlock = lastBlock + 1
} else if f.indexedRange.firstRenderedMap == firstMap+f.mapsPerEpoch {
fmr.tailPartialEpoch = 0
} else {
return errors.New("invalid tail epoch number")
}
f.setRange(f.db, f.indexedView, fmr)
rawdb.DeleteFilterMapRows(f.db, f.mapRowIndex(firstMap, 0), f.mapRowIndex(firstMap+f.mapsPerEpoch, 0))
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch; mapIndex++ {
f.filterMapCache.Remove(mapIndex)
}
rawdb.DeleteFilterMapLastBlocks(f.db, firstMap, firstMap+f.mapsPerEpoch-1) // keep last enrty
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch-1; mapIndex++ {
f.lastBlockCache.Remove(mapIndex)
}
rawdb.DeleteBlockLvPointers(f.db, firstBlock, lastBlock) // keep last enrty
for blockNumber := firstBlock; blockNumber < lastBlock; blockNumber++ {
f.lvPointerCache.Remove(blockNumber)
}
return nil
}
// exportCheckpoints exports epoch checkpoints in the format used by checkpoints.go.
func (f *FilterMaps) exportCheckpoints() {
finalLvPtr, err := f.getBlockLvPointer(f.finalBlock + 1)
if err != nil {
log.Error("Error fetching log value pointer of finalized block", "block", f.finalBlock, "error", err)
return
}
epochCount := uint32(finalLvPtr >> (f.logValuesPerMap + f.logMapsPerEpoch))
if epochCount == f.lastFinalEpoch {
return
}
w, err := os.Create(f.exportFileName)
if err != nil {
log.Error("Error creating checkpoint export file", "name", f.exportFileName, "error", err)
return
}
defer w.Close()
log.Info("Exporting log index checkpoints", "epochs", epochCount, "file", f.exportFileName)
w.WriteString("[\n")
comma := ","
for epoch := uint32(0); epoch < epochCount; epoch++ {
lastBlock, lastBlockId, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1)
if err != nil {
log.Error("Error fetching last block of epoch", "epoch", epoch, "error", err)
return
}
lvPtr, err := f.getBlockLvPointer(lastBlock)
if err != nil {
log.Error("Error fetching log value pointer of last block", "block", lastBlock, "error", err)
return
}
if epoch == epochCount-1 {
comma = ""
}
w.WriteString(fmt.Sprintf("{\"blockNumber\": %d, \"blockId\": \"0x%064x\", \"firstIndex\": %d}%s\n", lastBlock, lastBlockId, lvPtr, comma))
}
w.WriteString("]\n")
f.lastFinalEpoch = epochCount
}

395
core/filtermaps/indexer.go Normal file
View file

@ -0,0 +1,395 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"math"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
const (
logFrequency = time.Second * 20 // log info frequency during long indexing/unindexing process
headLogDelay = time.Second // head indexing log info delay (do not log if finished faster)
)
// updateLoop initializes and updates the log index structure according to the
// current targetView.
func (f *FilterMaps) indexerLoop() {
defer f.closeWg.Done()
if f.disabled {
f.reset()
return
}
log.Info("Started log indexer")
for !f.stop {
if !f.indexedRange.initialized {
if err := f.init(); err != nil {
log.Error("Error initializing log index", "error", err)
f.waitForEvent()
continue
}
}
if !f.targetHeadIndexed() {
if !f.tryIndexHead() {
f.waitForEvent()
}
} else {
if f.finalBlock != f.lastFinal {
if f.exportFileName != "" {
f.exportCheckpoints()
}
f.lastFinal = f.finalBlock
}
if f.tryIndexTail() && f.tryUnindexTail() {
f.waitForEvent()
}
}
}
}
// SetTargetView sets a new target chain view for the indexer to render.
// Note that SetTargetView never blocks.
func (f *FilterMaps) SetTargetView(targetView *ChainView) {
if targetView == nil {
panic("nil targetView")
}
for {
select {
case <-f.targetViewCh:
case f.targetViewCh <- targetView:
return
}
}
}
// SetFinalBlock sets the finalized block number used for exporting checkpoints.
// Note that SetFinalBlock never blocks.
func (f *FilterMaps) SetFinalBlock(finalBlock uint64) {
for {
select {
case <-f.finalBlockCh:
case f.finalBlockCh <- finalBlock:
return
}
}
}
// SetBlockProcessing sets the block processing flag that temporarily suspends
// log index rendering.
// Note that SetBlockProcessing never blocks.
func (f *FilterMaps) SetBlockProcessing(blockProcessing bool) {
for {
select {
case <-f.blockProcessingCh:
case f.blockProcessingCh <- blockProcessing:
return
}
}
}
// WaitIdle blocks until the indexer is in an idle state while synced up to the
// latest targetView.
func (f *FilterMaps) WaitIdle() {
if f.disabled {
f.closeWg.Wait()
return
}
for {
ch := make(chan bool)
f.waitIdleCh <- ch
if <-ch {
return
}
}
}
// waitForEvent blocks until an event happens that the indexer might react to.
func (f *FilterMaps) waitForEvent() {
for !f.stop && (f.blockProcessing || f.targetHeadIndexed()) {
f.processSingleEvent(true)
}
}
// processEvents processes all events, blocking only if a block processing is
// happening and indexing should be suspended.
func (f *FilterMaps) processEvents() {
for !f.stop && f.processSingleEvent(f.blockProcessing) {
}
}
// processSingleEvent processes a single event either in a blocking or
// non-blocking manner.
func (f *FilterMaps) processSingleEvent(blocking bool) bool {
if f.matcherSyncRequest != nil && f.targetHeadIndexed() {
f.matcherSyncRequest.synced()
f.matcherSyncRequest = nil
}
if blocking {
select {
case targetView := <-f.targetViewCh:
f.setTargetView(targetView)
case f.finalBlock = <-f.finalBlockCh:
case f.matcherSyncRequest = <-f.matcherSyncCh:
case f.blockProcessing = <-f.blockProcessingCh:
case <-f.closeCh:
f.stop = true
case ch := <-f.waitIdleCh:
select {
case targetView := <-f.targetViewCh:
f.setTargetView(targetView)
default:
}
ch <- !f.blockProcessing && f.targetHeadIndexed()
}
} else {
select {
case targetView := <-f.targetViewCh:
f.setTargetView(targetView)
case f.finalBlock = <-f.finalBlockCh:
case f.matcherSyncRequest = <-f.matcherSyncCh:
case f.blockProcessing = <-f.blockProcessingCh:
case <-f.closeCh:
f.stop = true
default:
return false
}
}
return true
}
// setTargetView updates the target chain view of the iterator.
func (f *FilterMaps) setTargetView(targetView *ChainView) {
f.targetView = targetView
}
// tryIndexHead tries to render head maps according to the current targetView
// and returns true if successful.
func (f *FilterMaps) tryIndexHead() bool {
headRenderer, err := f.renderMapsBefore(math.MaxUint32)
if err != nil {
log.Error("Error creating log index head renderer", "error", err)
return false
}
if headRenderer == nil {
return true
}
if !f.startedHeadIndex {
f.lastLogHeadIndex = time.Now()
f.startedHeadIndexAt = f.lastLogHeadIndex
f.startedHeadIndex = true
f.ptrHeadIndex = f.indexedRange.afterLastIndexedBlock
}
if _, err := headRenderer.run(func() bool {
f.processEvents()
return f.stop
}, func() {
f.tryUnindexTail()
if f.indexedRange.hasIndexedBlocks() && f.indexedRange.afterLastIndexedBlock >= f.ptrHeadIndex &&
((!f.loggedHeadIndex && time.Since(f.startedHeadIndexAt) > headLogDelay) ||
time.Since(f.lastLogHeadIndex) > logFrequency) {
log.Info("Log index head rendering in progress",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"processed", f.indexedRange.afterLastIndexedBlock-f.ptrHeadIndex,
"remaining", f.indexedView.headNumber+1-f.indexedRange.afterLastIndexedBlock,
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt)))
f.loggedHeadIndex = true
f.lastLogHeadIndex = time.Now()
}
}); err != nil {
log.Error("Log index head rendering failed", "error", err)
return false
}
if f.loggedHeadIndex {
log.Info("Log index head rendering finished",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"processed", f.indexedRange.afterLastIndexedBlock-f.ptrHeadIndex,
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt)))
}
f.loggedHeadIndex, f.startedHeadIndex = false, false
return true
}
// tryIndexTail tries to render tail epochs until the tail target block is
// indexed and returns true if successful.
// Note that tail indexing is only started if the log index head is fully
// rendered according to targetView and is suspended as soon as the targetView
// is changed.
func (f *FilterMaps) tryIndexTail() bool {
for firstEpoch := f.indexedRange.firstRenderedMap >> f.logMapsPerEpoch; firstEpoch > 0 && f.needTailEpoch(firstEpoch-1); {
f.processEvents()
if f.stop || !f.targetHeadIndexed() {
return false
}
// resume process if tail rendering was interrupted because of head rendering
tailRenderer := f.tailRenderer
f.tailRenderer = nil
if tailRenderer != nil && tailRenderer.afterLastMap != f.indexedRange.firstRenderedMap {
tailRenderer = nil
}
if tailRenderer == nil {
var err error
tailRenderer, err = f.renderMapsBefore(f.indexedRange.firstRenderedMap)
if err != nil {
log.Error("Error creating log index tail renderer", "error", err)
return false
}
}
if tailRenderer == nil {
return true
}
if !f.startedTailIndex {
f.lastLogTailIndex = time.Now()
f.startedTailIndexAt = f.lastLogTailIndex
f.startedTailIndex = true
f.ptrTailIndex = f.indexedRange.firstIndexedBlock - f.tailPartialBlocks()
}
done, err := tailRenderer.run(func() bool {
f.processEvents()
return f.stop || !f.targetHeadIndexed()
}, func() {
tpb, ttb := f.tailPartialBlocks(), f.tailTargetBlock()
remaining := uint64(1)
if f.indexedRange.firstIndexedBlock > ttb+tpb {
remaining = f.indexedRange.firstIndexedBlock - ttb - tpb
}
if f.indexedRange.hasIndexedBlocks() && f.ptrTailIndex >= f.indexedRange.firstIndexedBlock &&
(!f.loggedTailIndex || time.Since(f.lastLogTailIndex) > logFrequency) {
log.Info("Log index tail rendering in progress",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"processed", f.ptrTailIndex-f.indexedRange.firstIndexedBlock+tpb,
"remaining", remaining,
"next tail epoch percentage", f.indexedRange.tailPartialEpoch*100/f.mapsPerEpoch,
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt)))
f.loggedTailIndex = true
f.lastLogTailIndex = time.Now()
}
})
if err != nil {
log.Error("Log index tail rendering failed", "error", err)
}
if !done {
f.tailRenderer = tailRenderer // only keep tail renderer if interrupted by stopCb
return false
}
}
if f.loggedTailIndex {
log.Info("Log index tail rendering finished",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"processed", f.ptrTailIndex-f.indexedRange.firstIndexedBlock,
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt)))
f.loggedTailIndex = false
}
return true
}
// tryUnindexTail removes entire epochs of log index data as long as the first
// fully indexed block is at least as old as the tail target.
// Note that unindexing is very quick as it only removes continuous ranges of
// data from the database and is also called while running head indexing.
func (f *FilterMaps) tryUnindexTail() bool {
for {
firstEpoch := (f.indexedRange.firstRenderedMap - f.indexedRange.tailPartialEpoch) >> f.logMapsPerEpoch
if f.needTailEpoch(firstEpoch) {
break
}
f.processEvents()
if f.stop {
return false
}
if !f.startedTailUnindex {
f.startedTailUnindexAt = time.Now()
f.startedTailUnindex = true
f.ptrTailUnindexMap = f.indexedRange.firstRenderedMap - f.indexedRange.tailPartialEpoch
f.ptrTailUnindexBlock = f.indexedRange.firstIndexedBlock - f.tailPartialBlocks()
}
if err := f.deleteTailEpoch(firstEpoch); err != nil {
log.Error("Log index tail epoch unindexing failed", "error", err)
return false
}
}
if f.startedTailUnindex {
log.Info("Log index tail unindexing finished",
"first block", f.indexedRange.firstIndexedBlock, "last block", f.indexedRange.afterLastIndexedBlock-1,
"removed maps", f.indexedRange.firstRenderedMap-f.ptrTailUnindexMap,
"removed blocks", f.indexedRange.firstIndexedBlock-f.tailPartialBlocks()-f.ptrTailUnindexBlock,
"elapsed", common.PrettyDuration(time.Since(f.startedTailUnindexAt)))
f.startedTailUnindex = false
}
return true
}
// needTailEpoch returns true if the given tail epoch needs to be kept
// according to the current tail target, false if it can be removed.
func (f *FilterMaps) needTailEpoch(epoch uint32) bool {
firstEpoch := f.indexedRange.firstRenderedMap >> f.logMapsPerEpoch
if epoch > firstEpoch {
return true
}
if epoch+1 < firstEpoch {
return false
}
tailTarget := f.tailTargetBlock()
if tailTarget < f.indexedRange.firstIndexedBlock {
return true
}
tailLvIndex, err := f.getBlockLvPointer(tailTarget)
if err != nil {
log.Error("Could not get log value index of tail block", "error", err)
return true
}
return uint64(epoch+1)<<(f.logValuesPerMap+f.logMapsPerEpoch) >= tailLvIndex
}
// tailTargetBlock returns the target value for the tail block number according
// to the log history parameter and the current index head.
func (f *FilterMaps) tailTargetBlock() uint64 {
if f.history == 0 || f.indexedView.headNumber < f.history {
return 0
}
return f.indexedView.headNumber + 1 - f.history
}
// tailPartialBlocks returns the number of rendered blocks in the partially
// rendered next tail epoch.
func (f *FilterMaps) tailPartialBlocks() uint64 {
if f.indexedRange.tailPartialEpoch == 0 {
return 0
}
end, _, err := f.getLastBlockOfMap(f.indexedRange.firstRenderedMap - f.mapsPerEpoch + f.indexedRange.tailPartialEpoch - 1)
if err != nil {
log.Error("Error fetching last block of map", "mapIndex", f.indexedRange.firstRenderedMap-f.mapsPerEpoch+f.indexedRange.tailPartialEpoch-1, "error", err)
}
var start uint64
if f.indexedRange.firstRenderedMap-f.mapsPerEpoch > 0 {
start, _, err = f.getLastBlockOfMap(f.indexedRange.firstRenderedMap - f.mapsPerEpoch - 1)
if err != nil {
log.Error("Error fetching last block of map", "mapIndex", f.indexedRange.firstRenderedMap-f.mapsPerEpoch-1, "error", err)
}
}
return end - start
}
// targetHeadIndexed returns true if the current log index is consistent with
// targetView with its head block fully rendered.
func (f *FilterMaps) targetHeadIndexed() bool {
return equalViews(f.targetView, f.indexedView) && f.indexedRange.headBlockIndexed
}

View file

@ -0,0 +1,444 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
crand "crypto/rand"
"crypto/sha256"
"math/big"
"math/rand"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
var testParams = Params{
logMapHeight: 2,
logMapWidth: 24,
logMapsPerEpoch: 4,
logValuesPerMap: 4,
baseRowGroupLength: 4,
baseRowLengthRatio: 2,
logLayerDiff: 2,
}
func TestIndexerRandomRange(t *testing.T) {
ts := newTestSetup(t)
defer ts.close()
forks := make([][]common.Hash, 10)
ts.chain.addBlocks(1000, 5, 2, 4, false) // 51 log values per block
for i := range forks {
if i != 0 {
forkBlock := rand.Intn(1000)
ts.chain.setHead(forkBlock)
ts.chain.addBlocks(1000-forkBlock, 5, 2, 4, false) // 51 log values per block
}
forks[i] = ts.chain.getCanonicalChain()
}
lvPerBlock := uint64(51)
ts.setHistory(0, false)
var (
history int
noHistory bool
fork, head = len(forks) - 1, 1000
checkSnapshot bool
)
ts.fm.WaitIdle()
for i := 0; i < 200; i++ {
switch rand.Intn(3) {
case 0:
// change history settings
switch rand.Intn(10) {
case 0:
history, noHistory = 0, false
case 1:
history, noHistory = 0, true
default:
history, noHistory = rand.Intn(1000)+1, false
}
ts.testDisableSnapshots = rand.Intn(2) == 0
ts.setHistory(uint64(history), noHistory)
case 1:
// change head to random position of random fork
fork, head = rand.Intn(len(forks)), rand.Intn(1001)
ts.chain.setCanonicalChain(forks[fork][:head+1])
case 2:
if head < 1000 {
checkSnapshot = !noHistory && head != 0 // no snapshot generated for block 0
// add blocks after the current head
head += rand.Intn(1000-head) + 1
ts.fm.testSnapshotUsed = false
ts.chain.setCanonicalChain(forks[fork][:head+1])
}
}
ts.fm.WaitIdle()
if checkSnapshot {
if ts.fm.testSnapshotUsed == ts.fm.testDisableSnapshots {
ts.t.Fatalf("Invalid snapshot used state after head extension (used: %v, disabled: %v)", ts.fm.testSnapshotUsed, ts.fm.testDisableSnapshots)
}
checkSnapshot = false
}
if noHistory {
if ts.fm.indexedRange.initialized {
t.Fatalf("filterMapsRange initialized while indexing is disabled")
}
continue
}
if !ts.fm.indexedRange.initialized {
t.Fatalf("filterMapsRange not initialized while indexing is enabled")
}
var tailBlock uint64
if history > 0 && history <= head {
tailBlock = uint64(head + 1 - history)
}
var tailEpoch uint32
if tailBlock > 0 {
tailLvPtr := (tailBlock - 1) * lvPerBlock // no logs in genesis block, only delimiter
tailEpoch = uint32(tailLvPtr >> (testParams.logValuesPerMap + testParams.logMapsPerEpoch))
}
var expTailBlock uint64
if tailEpoch > 0 {
tailLvPtr := uint64(tailEpoch) << (testParams.logValuesPerMap + testParams.logMapsPerEpoch) // first available lv ptr
// (expTailBlock-1)*lvPerBlock >= tailLvPtr
expTailBlock = (tailLvPtr + lvPerBlock*2 - 1) / lvPerBlock
}
if ts.fm.indexedRange.afterLastIndexedBlock != uint64(head+1) {
ts.t.Fatalf("Invalid index head (expected #%d, got #%d)", head, ts.fm.indexedRange.afterLastIndexedBlock-1)
}
if ts.fm.indexedRange.headBlockDelimiter != uint64(head)*lvPerBlock {
ts.t.Fatalf("Invalid index head delimiter pointer (expected %d, got %d)", uint64(head)*lvPerBlock, ts.fm.indexedRange.headBlockDelimiter)
}
if ts.fm.indexedRange.firstIndexedBlock != expTailBlock {
ts.t.Fatalf("Invalid index tail block (expected #%d, got #%d)", expTailBlock, ts.fm.indexedRange.firstIndexedBlock)
}
}
}
func TestIndexerCompareDb(t *testing.T) {
ts := newTestSetup(t)
defer ts.close()
ts.chain.addBlocks(500, 10, 3, 4, true)
ts.setHistory(0, false)
ts.fm.WaitIdle()
// revert points are stored after block 500
ts.chain.addBlocks(500, 10, 3, 4, true)
ts.fm.WaitIdle()
chain1 := ts.chain.getCanonicalChain()
ts.storeDbHash("chain 1 [0, 1000]")
ts.chain.setHead(600)
ts.fm.WaitIdle()
ts.storeDbHash("chain 1/2 [0, 600]")
ts.chain.addBlocks(600, 10, 3, 4, true)
ts.fm.WaitIdle()
chain2 := ts.chain.getCanonicalChain()
ts.storeDbHash("chain 2 [0, 1200]")
ts.chain.setHead(600)
ts.fm.WaitIdle()
ts.checkDbHash("chain 1/2 [0, 600]")
ts.setHistory(800, false)
ts.chain.setCanonicalChain(chain1)
ts.fm.WaitIdle()
ts.storeDbHash("chain 1 [201, 1000]")
ts.setHistory(0, false)
ts.fm.WaitIdle()
ts.checkDbHash("chain 1 [0, 1000]")
ts.setHistory(800, false)
ts.chain.setCanonicalChain(chain2)
ts.fm.WaitIdle()
ts.storeDbHash("chain 2 [401, 1200]")
ts.setHistory(0, true)
ts.fm.WaitIdle()
ts.storeDbHash("no index")
ts.chain.setCanonicalChain(chain2[:501])
ts.setHistory(0, false)
ts.fm.WaitIdle()
ts.chain.setCanonicalChain(chain2)
ts.fm.WaitIdle()
ts.checkDbHash("chain 2 [0, 1200]")
ts.chain.setCanonicalChain(chain1)
ts.fm.WaitIdle()
ts.setHistory(800, false)
ts.fm.WaitIdle()
ts.checkDbHash("chain 1 [201, 1000]")
ts.chain.setCanonicalChain(chain2)
ts.fm.WaitIdle()
ts.checkDbHash("chain 2 [401, 1200]")
ts.setHistory(0, true)
ts.fm.WaitIdle()
ts.checkDbHash("no index")
}
type testSetup struct {
t *testing.T
fm *FilterMaps
db ethdb.Database
chain *testChain
params Params
dbHashes map[string]common.Hash
testDisableSnapshots bool
}
func newTestSetup(t *testing.T) *testSetup {
params := testParams
params.deriveFields()
ts := &testSetup{
t: t,
db: rawdb.NewMemoryDatabase(),
params: params,
dbHashes: make(map[string]common.Hash),
}
ts.chain = ts.newTestChain()
return ts
}
func (ts *testSetup) setHistory(history uint64, noHistory bool) {
if ts.fm != nil {
ts.fm.Stop()
}
head := ts.chain.CurrentBlock()
view := NewChainView(ts.chain, head.Number.Uint64(), head.Hash())
config := Config{
History: history,
Disabled: noHistory,
}
ts.fm = NewFilterMaps(ts.db, view, ts.params, config)
ts.fm.testDisableSnapshots = ts.testDisableSnapshots
ts.fm.Start()
}
func (ts *testSetup) storeDbHash(id string) {
dbHash := ts.fmDbHash()
for otherId, otherHash := range ts.dbHashes {
if otherHash == dbHash {
ts.t.Fatalf("Unexpected equal database hashes `%s` and `%s`", id, otherId)
}
}
ts.dbHashes[id] = dbHash
}
func (ts *testSetup) checkDbHash(id string) {
if ts.fmDbHash() != ts.dbHashes[id] {
ts.t.Fatalf("Database `%s` hash mismatch", id)
}
}
func (ts *testSetup) fmDbHash() common.Hash {
hasher := sha256.New()
it := ts.db.NewIterator(nil, nil)
for it.Next() {
hasher.Write(it.Key())
hasher.Write(it.Value())
}
it.Release()
var result common.Hash
hasher.Sum(result[:0])
return result
}
func (ts *testSetup) close() {
if ts.fm != nil {
ts.fm.Stop()
}
ts.db.Close()
ts.chain.db.Close()
}
type testChain struct {
ts *testSetup
db ethdb.Database
lock sync.RWMutex
canonical []common.Hash
blocks map[common.Hash]*types.Block
receipts map[common.Hash]types.Receipts
}
func (ts *testSetup) newTestChain() *testChain {
return &testChain{
ts: ts,
blocks: make(map[common.Hash]*types.Block),
receipts: make(map[common.Hash]types.Receipts),
}
}
func (tc *testChain) CurrentBlock() *types.Header {
tc.lock.RLock()
defer tc.lock.RUnlock()
if len(tc.canonical) == 0 {
return nil
}
return tc.blocks[tc.canonical[len(tc.canonical)-1]].Header()
}
func (tc *testChain) GetHeader(hash common.Hash, number uint64) *types.Header {
tc.lock.RLock()
defer tc.lock.RUnlock()
if block := tc.blocks[hash]; block != nil {
return block.Header()
}
return nil
}
func (tc *testChain) GetCanonicalHash(number uint64) common.Hash {
tc.lock.RLock()
defer tc.lock.RUnlock()
if uint64(len(tc.canonical)) <= number {
return common.Hash{}
}
return tc.canonical[number]
}
func (tc *testChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
tc.lock.RLock()
defer tc.lock.RUnlock()
return tc.receipts[hash]
}
func (tc *testChain) addBlocks(count, maxTxPerBlock, maxLogsPerReceipt, maxTopicsPerLog int, random bool) {
tc.lock.Lock()
blockGen := func(i int, gen *core.BlockGen) {
var txCount int
if random {
txCount = rand.Intn(maxTxPerBlock + 1)
} else {
txCount = maxTxPerBlock
}
for k := txCount; k > 0; k-- {
receipt := types.NewReceipt(nil, false, 0)
var logCount int
if random {
logCount = rand.Intn(maxLogsPerReceipt + 1)
} else {
logCount = maxLogsPerReceipt
}
receipt.Logs = make([]*types.Log, logCount)
for i := range receipt.Logs {
log := &types.Log{}
receipt.Logs[i] = log
crand.Read(log.Address[:])
var topicCount int
if random {
topicCount = rand.Intn(maxTopicsPerLog + 1)
} else {
topicCount = maxTopicsPerLog
}
log.Topics = make([]common.Hash, topicCount)
for j := range log.Topics {
crand.Read(log.Topics[j][:])
}
}
gen.AddUncheckedReceipt(receipt)
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
}
}
var (
blocks []*types.Block
receipts []types.Receipts
engine = ethash.NewFaker()
)
if len(tc.canonical) == 0 {
gspec := &core.Genesis{
Alloc: types.GenesisAlloc{},
BaseFee: big.NewInt(params.InitialBaseFee),
Config: params.TestChainConfig,
}
tc.db, blocks, receipts = core.GenerateChainWithGenesis(gspec, engine, count, blockGen)
gblock := gspec.ToBlock()
ghash := gblock.Hash()
tc.canonical = []common.Hash{ghash}
tc.blocks[ghash] = gblock
tc.receipts[ghash] = types.Receipts{}
} else {
blocks, receipts = core.GenerateChain(params.TestChainConfig, tc.blocks[tc.canonical[len(tc.canonical)-1]], engine, tc.db, count, blockGen)
}
for i, block := range blocks {
num, hash := int(block.NumberU64()), block.Hash()
if len(tc.canonical) != num {
panic("canonical chain length mismatch")
}
tc.canonical = append(tc.canonical, hash)
tc.blocks[hash] = block
if receipts[i] != nil {
tc.receipts[hash] = receipts[i]
} else {
tc.receipts[hash] = types.Receipts{}
}
}
tc.lock.Unlock()
tc.setTargetHead()
}
func (tc *testChain) setHead(headNum int) {
tc.lock.Lock()
tc.canonical = tc.canonical[:headNum+1]
tc.lock.Unlock()
tc.setTargetHead()
}
func (tc *testChain) setTargetHead() {
head := tc.CurrentBlock()
if tc.ts.fm != nil {
if !tc.ts.fm.disabled {
//tc.ts.fm.targetViewCh <- NewChainView(tc, head.Number.Uint64(), head.Hash())
tc.ts.fm.SetTargetView(NewChainView(tc, head.Number.Uint64(), head.Hash()))
}
}
}
func (tc *testChain) getCanonicalChain() []common.Hash {
tc.lock.RLock()
defer tc.lock.RUnlock()
cc := make([]common.Hash, len(tc.canonical))
copy(cc, tc.canonical)
return cc
}
// restore an earlier state of the chain
func (tc *testChain) setCanonicalChain(cc []common.Hash) {
tc.lock.Lock()
tc.canonical = make([]common.Hash, len(cc))
copy(tc.canonical, cc)
tc.lock.Unlock()
tc.setTargetHead()
}

View file

@ -0,0 +1,768 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"errors"
"fmt"
"math"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
const (
maxMapsPerBatch = 32 // maximum number of maps rendered in memory
valuesPerCallback = 1024 // log values processed per event process callback
cachedRowMappings = 10000 // log value to row mappings cached during rendering
// Number of rows written to db in a single batch.
// The map renderer splits up writes like this to ensure that regular
// block processing latency is not affected by large batch writes.
rowsPerBatch = 1024
)
var (
errChainUpdate = errors.New("rendered section of chain updated")
)
// mapRenderer represents a process that renders filter maps in a specified
// range according to the actual targetView.
type mapRenderer struct {
f *FilterMaps
afterLastMap uint32
currentMap *renderedMap
finishedMaps map[uint32]*renderedMap
firstFinished, afterLastFinished uint32
iterator *logIterator
}
// renderedMap represents a single filter map that is being rendered in memory.
type renderedMap struct {
filterMap filterMap
mapIndex uint32
lastBlock uint64
lastBlockId common.Hash
blockLvPtrs []uint64 // start pointers of blocks starting in this map; last one is lastBlock
finished bool // iterator finished; all values rendered
headDelimiter uint64 // if finished then points to the future block delimiter of the head block
}
// firstBlock returns the first block number that starts in the given map.
func (r *renderedMap) firstBlock() uint64 {
return r.lastBlock + 1 - uint64(len(r.blockLvPtrs))
}
// renderMapsBefore creates a mapRenderer that renders the log index until the
// specified map index boundary, starting from the latest available starting
// point that is consistent with the current targetView.
// The renderer ensures that filterMapsRange, indexedView and the actual map
// data are always consistent with each other. If afterLastMap is greater than
// the latest existing rendered map then indexedView is updated to targetView,
// otherwise it is checked that the rendered range is consistent with both
// views.
func (f *FilterMaps) renderMapsBefore(afterLastMap uint32) (*mapRenderer, error) {
nextMap, startBlock, startLvPtr, err := f.lastCanonicalMapBoundaryBefore(afterLastMap)
if err != nil {
return nil, err
}
if snapshot := f.lastCanonicalSnapshotBefore(afterLastMap); snapshot != nil && snapshot.mapIndex >= nextMap {
return f.renderMapsFromSnapshot(snapshot)
}
if nextMap >= afterLastMap {
return nil, nil
}
return f.renderMapsFromMapBoundary(nextMap, afterLastMap, startBlock, startLvPtr)
}
// renderMapsFromSnapshot creates a mapRenderer that starts rendering from a
// snapshot made at a block boundary.
func (f *FilterMaps) renderMapsFromSnapshot(cp *renderedMap) (*mapRenderer, error) {
f.testSnapshotUsed = true
iter, err := f.newLogIteratorFromBlockDelimiter(cp.lastBlock)
if err != nil {
return nil, fmt.Errorf("failed to create log iterator from block delimiter %d: %v", cp.lastBlock, err)
}
return &mapRenderer{
f: f,
currentMap: &renderedMap{
filterMap: cp.filterMap.copy(),
mapIndex: cp.mapIndex,
lastBlock: cp.lastBlock,
blockLvPtrs: cp.blockLvPtrs,
},
finishedMaps: make(map[uint32]*renderedMap),
firstFinished: cp.mapIndex,
afterLastFinished: cp.mapIndex,
afterLastMap: math.MaxUint32,
iterator: iter,
}, nil
}
// renderMapsFromMapBoundary creates a mapRenderer that starts rendering at a
// map boundary.
func (f *FilterMaps) renderMapsFromMapBoundary(firstMap, afterLastMap uint32, startBlock, startLvPtr uint64) (*mapRenderer, error) {
iter, err := f.newLogIteratorFromMapBoundary(firstMap, startBlock, startLvPtr)
if err != nil {
return nil, fmt.Errorf("failed to create log iterator from map boundary %d: %v", firstMap, err)
}
return &mapRenderer{
f: f,
currentMap: &renderedMap{
filterMap: f.emptyFilterMap(),
mapIndex: firstMap,
lastBlock: iter.blockNumber,
},
finishedMaps: make(map[uint32]*renderedMap),
firstFinished: firstMap,
afterLastFinished: firstMap,
afterLastMap: afterLastMap,
iterator: iter,
}, nil
}
// lastCanonicalSnapshotBefore returns the latest cached snapshot that matches
// the current targetView.
func (f *FilterMaps) lastCanonicalSnapshotBefore(afterLastMap uint32) *renderedMap {
var best *renderedMap
for _, blockNumber := range f.renderSnapshots.Keys() {
if cp, _ := f.renderSnapshots.Get(blockNumber); cp != nil && blockNumber < f.indexedRange.afterLastIndexedBlock &&
blockNumber <= f.targetView.headNumber && f.targetView.getBlockId(blockNumber) == cp.lastBlockId &&
cp.mapIndex < afterLastMap && (best == nil || blockNumber > best.lastBlock) {
best = cp
}
}
return best
}
// lastCanonicalMapBoundaryBefore returns the latest map boundary before the
// specified map index that matches the current targetView. This can either
// be a checkpoint (hardcoded or left from a previously unindexed tail epoch)
// or the boundary of a currently rendered map.
// Along with the next map index where the rendering can be started, the number
// and starting log value pointer of the last block is also returned.
func (f *FilterMaps) lastCanonicalMapBoundaryBefore(afterLastMap uint32) (nextMap uint32, startBlock, startLvPtr uint64, err error) {
if !f.indexedRange.initialized {
return 0, 0, 0, nil
}
mapIndex := afterLastMap
for {
var ok bool
if mapIndex, ok = f.lastMapBoundaryBefore(mapIndex); !ok {
return 0, 0, 0, nil
}
lastBlock, lastBlockId, err := f.getLastBlockOfMap(mapIndex)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to retrieve last block of reverse iterated map %d: %v", mapIndex, err)
}
if lastBlock >= f.indexedView.headNumber || lastBlock >= f.targetView.headNumber ||
lastBlockId != f.targetView.getBlockId(lastBlock) {
// map is not full or inconsistent with targetView; roll back
continue
}
lvPtr, err := f.getBlockLvPointer(lastBlock)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to retrieve log value pointer of last canonical boundary block %d: %v", lastBlock, err)
}
return mapIndex + 1, lastBlock, lvPtr, nil
}
}
// lastMapBoundaryBefore returns the latest map boundary before the specified
// map index.
func (f *FilterMaps) lastMapBoundaryBefore(mapIndex uint32) (uint32, bool) {
if !f.indexedRange.initialized || f.indexedRange.afterLastRenderedMap == 0 {
return 0, false
}
if mapIndex > f.indexedRange.afterLastRenderedMap {
mapIndex = f.indexedRange.afterLastRenderedMap
}
if mapIndex > f.indexedRange.firstRenderedMap {
return mapIndex - 1, true
}
if mapIndex+f.mapsPerEpoch > f.indexedRange.firstRenderedMap {
if mapIndex > f.indexedRange.firstRenderedMap-f.mapsPerEpoch+f.indexedRange.tailPartialEpoch {
mapIndex = f.indexedRange.firstRenderedMap - f.mapsPerEpoch + f.indexedRange.tailPartialEpoch
}
} else {
mapIndex = (mapIndex >> f.logMapsPerEpoch) << f.logMapsPerEpoch
}
if mapIndex == 0 {
return 0, false
}
return mapIndex - 1, true
}
// emptyFilterMap returns an empty filter map.
func (f *FilterMaps) emptyFilterMap() filterMap {
return make(filterMap, f.mapHeight)
}
// loadHeadSnapshot loads the last rendered map from the database and creates
// a snapshot.
func (f *FilterMaps) loadHeadSnapshot() error {
fm, err := f.getFilterMap(f.indexedRange.afterLastRenderedMap - 1)
if err != nil {
return fmt.Errorf("failed to load head snapshot map %d: %v", f.indexedRange.afterLastRenderedMap-1, err)
}
lastBlock, _, err := f.getLastBlockOfMap(f.indexedRange.afterLastRenderedMap - 1)
if err != nil {
return fmt.Errorf("failed to retrieve last block of head snapshot map %d: %v", f.indexedRange.afterLastRenderedMap-1, err)
}
var firstBlock uint64
if f.indexedRange.afterLastRenderedMap > 1 {
prevLastBlock, _, err := f.getLastBlockOfMap(f.indexedRange.afterLastRenderedMap - 2)
if err != nil {
return fmt.Errorf("failed to retrieve last block of map %d before head snapshot: %v", f.indexedRange.afterLastRenderedMap-2, err)
}
firstBlock = prevLastBlock + 1
}
lvPtrs := make([]uint64, lastBlock+1-firstBlock)
for i := range lvPtrs {
lvPtrs[i], err = f.getBlockLvPointer(firstBlock + uint64(i))
if err != nil {
return fmt.Errorf("failed to retrieve log value pointer of head snapshot block %d: %v", firstBlock+uint64(i), err)
}
}
f.renderSnapshots.Add(f.indexedRange.afterLastIndexedBlock-1, &renderedMap{
filterMap: fm,
mapIndex: f.indexedRange.afterLastRenderedMap - 1,
lastBlock: f.indexedRange.afterLastIndexedBlock - 1,
lastBlockId: f.indexedView.getBlockId(f.indexedRange.afterLastIndexedBlock - 1),
blockLvPtrs: lvPtrs,
finished: true,
headDelimiter: f.indexedRange.headBlockDelimiter,
})
return nil
}
// makeSnapshot creates a snapshot of the current state of the rendered map.
func (r *mapRenderer) makeSnapshot() {
r.f.renderSnapshots.Add(r.iterator.blockNumber, &renderedMap{
filterMap: r.currentMap.filterMap.copy(),
mapIndex: r.currentMap.mapIndex,
lastBlock: r.iterator.blockNumber,
lastBlockId: r.f.targetView.getBlockId(r.currentMap.lastBlock),
blockLvPtrs: r.currentMap.blockLvPtrs,
finished: true,
headDelimiter: r.iterator.lvIndex,
})
}
// run does the actual map rendering. It periodically calls the stopCb callback
// and if it returns true the process is interrupted an can be resumed later
// by calling run again. The writeCb callback is called after new maps have
// been written to disk and the index range has been updated accordingly.
func (r *mapRenderer) run(stopCb func() bool, writeCb func()) (bool, error) {
for {
if done, err := r.renderCurrentMap(stopCb); !done {
return done, err // stopped or failed
}
// map finished
r.finishedMaps[r.currentMap.mapIndex] = r.currentMap
r.afterLastFinished++
if len(r.finishedMaps) >= maxMapsPerBatch || r.afterLastFinished&(r.f.baseRowGroupLength-1) == 0 {
if err := r.writeFinishedMaps(stopCb); err != nil {
return false, err
}
writeCb()
}
if r.afterLastFinished == r.afterLastMap || r.iterator.finished {
if err := r.writeFinishedMaps(stopCb); err != nil {
return false, err
}
writeCb()
return true, nil
}
r.currentMap = &renderedMap{
filterMap: r.f.emptyFilterMap(),
mapIndex: r.afterLastFinished,
}
}
}
// renderCurrentMap renders a single map.
func (r *mapRenderer) renderCurrentMap(stopCb func() bool) (bool, error) {
if !r.iterator.updateChainView(r.f.targetView) {
return false, errChainUpdate
}
var waitCnt int
if r.iterator.lvIndex == 0 {
r.currentMap.blockLvPtrs = []uint64{0}
}
type lvPos struct{ rowIndex, layerIndex uint32 }
rowMappingCache := lru.NewCache[common.Hash, lvPos](cachedRowMappings)
defer rowMappingCache.Purge()
for r.iterator.lvIndex < uint64(r.currentMap.mapIndex+1)<<r.f.logValuesPerMap && !r.iterator.finished {
waitCnt++
if waitCnt >= valuesPerCallback {
if stopCb() {
return false, nil
}
if !r.iterator.updateChainView(r.f.targetView) {
return false, errChainUpdate
}
waitCnt = 0
}
r.currentMap.lastBlock = r.iterator.blockNumber
if r.iterator.delimiter {
r.currentMap.lastBlock++
r.currentMap.blockLvPtrs = append(r.currentMap.blockLvPtrs, r.iterator.lvIndex+1)
}
if logValue := r.iterator.getValueHash(); logValue != (common.Hash{}) {
lvp, cached := rowMappingCache.Get(logValue)
if !cached {
lvp = lvPos{rowIndex: r.f.rowIndex(r.currentMap.mapIndex, 0, logValue)}
}
for uint32(len(r.currentMap.filterMap[lvp.rowIndex])) >= r.f.maxRowLength(lvp.layerIndex) {
lvp.layerIndex++
lvp.rowIndex = r.f.rowIndex(r.currentMap.mapIndex, lvp.layerIndex, logValue)
cached = false
}
r.currentMap.filterMap[lvp.rowIndex] = append(r.currentMap.filterMap[lvp.rowIndex], r.f.columnIndex(r.iterator.lvIndex, &logValue))
if !cached {
rowMappingCache.Add(logValue, lvp)
}
}
if err := r.iterator.next(); err != nil {
return false, fmt.Errorf("failed to advance log iterator at %d while rendering map %d: %v", r.iterator.lvIndex, r.currentMap.mapIndex, err)
}
if !r.f.testDisableSnapshots && r.afterLastMap >= r.f.indexedRange.afterLastRenderedMap &&
(r.iterator.delimiter || r.iterator.finished) {
r.makeSnapshot()
}
}
if r.iterator.finished {
r.currentMap.finished = true
r.currentMap.headDelimiter = r.iterator.lvIndex
}
r.currentMap.lastBlockId = r.f.targetView.getBlockId(r.currentMap.lastBlock)
return true, nil
}
// writeFinishedMaps writes rendered maps to the database and updates
// filterMapsRange and indexedView accordingly.
func (r *mapRenderer) writeFinishedMaps(pauseCb func() bool) error {
if len(r.finishedMaps) == 0 {
return nil
}
r.f.indexLock.Lock()
defer r.f.indexLock.Unlock()
oldRange := r.f.indexedRange
tempRange, err := r.getTempRange()
if err != nil {
return fmt.Errorf("failed to get temporary rendered range: %v", err)
}
newRange, err := r.getUpdatedRange()
if err != nil {
return fmt.Errorf("failed to get updated rendered range: %v", err)
}
renderedView := r.f.targetView // stopCb callback might still change targetView while writing finished maps
batch := r.f.db.NewBatch()
var writeCnt int
checkWriteCnt := func() {
writeCnt++
if writeCnt == rowsPerBatch {
writeCnt = 0
if err := batch.Write(); err != nil {
log.Crit("Error writing log index update batch", "error", err)
}
// do not exit while in partially written state but do allow processing
// events and pausing while block processing is in progress
pauseCb()
batch = r.f.db.NewBatch()
}
}
r.f.setRange(batch, r.f.indexedView, tempRange)
// add or update filter rows
for rowIndex := uint32(0); rowIndex < r.f.mapHeight; rowIndex++ {
var (
mapIndices []uint32
rows []FilterRow
)
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
row := r.finishedMaps[mapIndex].filterMap[rowIndex]
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && row.Equal(fm[rowIndex]) {
continue
}
mapIndices = append(mapIndices, mapIndex)
rows = append(rows, row)
}
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && len(fm[rowIndex]) == 0 {
continue
}
mapIndices = append(mapIndices, mapIndex)
rows = append(rows, nil)
}
}
if err := r.f.storeFilterMapRows(batch, mapIndices, rowIndex, rows); err != nil {
return fmt.Errorf("failed to store filter maps %v row %d: %v", mapIndices, rowIndex, err)
}
checkWriteCnt()
}
// update filter map cache
if newRange.afterLastRenderedMap == r.afterLastFinished {
// head updated; cache new head maps and remove future entries
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
r.f.filterMapCache.Add(mapIndex, r.finishedMaps[mapIndex].filterMap)
}
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
r.f.filterMapCache.Remove(mapIndex)
}
} else {
// head not updated; do not cache maps during tail rendering because we
// need head maps to be available in the cache
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
r.f.filterMapCache.Remove(mapIndex)
}
}
// add or update block pointers
blockNumber := r.finishedMaps[r.firstFinished].firstBlock()
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ {
renderedMap := r.finishedMaps[mapIndex]
r.f.storeLastBlockOfMap(batch, mapIndex, renderedMap.lastBlock, renderedMap.lastBlockId)
checkWriteCnt()
if blockNumber != renderedMap.firstBlock() {
panic("non-continuous block numbers")
}
for _, lvPtr := range renderedMap.blockLvPtrs {
r.f.storeBlockLvPointer(batch, blockNumber, lvPtr)
checkWriteCnt()
blockNumber++
}
}
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ {
r.f.deleteLastBlockOfMap(batch, mapIndex)
checkWriteCnt()
}
for ; blockNumber < oldRange.afterLastIndexedBlock; blockNumber++ {
r.f.deleteBlockLvPointer(batch, blockNumber)
checkWriteCnt()
}
}
r.finishedMaps = make(map[uint32]*renderedMap)
r.firstFinished = r.afterLastFinished
r.f.setRange(batch, renderedView, newRange)
if err := batch.Write(); err != nil {
log.Crit("Error writing log index update batch", "error", err)
}
return nil
}
// getTempRange returns a temporary filterMapsRange that is committed to the
// database while the newly rendered maps are partially written. Writing all
// processed maps in a single database batch would be a serious hit on db
// performance so instead safety is ensured by first reverting the valid map
// range to the unchanged region until all new map data is committed.
func (r *mapRenderer) getTempRange() (filterMapsRange, error) {
tempRange := r.f.indexedRange
if err := tempRange.addRenderedRange(r.firstFinished, r.firstFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil {
return filterMapsRange{}, fmt.Errorf("failed to update temporary rendered range: %v", err)
}
if tempRange.firstRenderedMap != r.f.indexedRange.firstRenderedMap {
// first rendered map changed; update first indexed block
if tempRange.firstRenderedMap > 0 {
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.firstRenderedMap - 1)
if err != nil {
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d before temporary range: %v", tempRange.firstRenderedMap-1, err)
}
tempRange.firstIndexedBlock = lastBlock + 1
} else {
tempRange.firstIndexedBlock = 0
}
}
if tempRange.afterLastRenderedMap != r.f.indexedRange.afterLastRenderedMap {
// first rendered map changed; update first indexed block
if tempRange.afterLastRenderedMap > 0 {
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.afterLastRenderedMap - 1)
if err != nil {
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d at the end of temporary range: %v", tempRange.afterLastRenderedMap-1, err)
}
tempRange.afterLastIndexedBlock = lastBlock
} else {
tempRange.afterLastIndexedBlock = 0
}
tempRange.headBlockDelimiter = 0
}
return tempRange, nil
}
// getUpdatedRange returns the updated filterMapsRange after writing the newly
// rendered maps.
func (r *mapRenderer) getUpdatedRange() (filterMapsRange, error) {
// update filterMapsRange
newRange := r.f.indexedRange
if err := newRange.addRenderedRange(r.firstFinished, r.afterLastFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil {
return filterMapsRange{}, fmt.Errorf("failed to update rendered range: %v", err)
}
if newRange.firstRenderedMap != r.f.indexedRange.firstRenderedMap {
// first rendered map changed; update first indexed block
if newRange.firstRenderedMap > 0 {
lastBlock, _, err := r.f.getLastBlockOfMap(newRange.firstRenderedMap - 1)
if err != nil {
return filterMapsRange{}, fmt.Errorf("failed to retrieve last block of map %d before rendered range: %v", newRange.firstRenderedMap-1, err)
}
newRange.firstIndexedBlock = lastBlock + 1
} else {
newRange.firstIndexedBlock = 0
}
}
if newRange.afterLastRenderedMap == r.afterLastFinished {
// last rendered map changed; update last indexed block and head pointers
lm := r.finishedMaps[r.afterLastFinished-1]
newRange.headBlockIndexed = lm.finished
if lm.finished {
newRange.afterLastIndexedBlock = r.f.targetView.headNumber + 1
if lm.lastBlock != r.f.targetView.headNumber {
panic("map rendering finished but last block != head block")
}
newRange.headBlockDelimiter = lm.headDelimiter
} else {
newRange.afterLastIndexedBlock = lm.lastBlock
newRange.headBlockDelimiter = 0
}
} else {
// last rendered map not replaced; ensure that target chain view matches
// indexed chain view on the rendered section
if lastBlock := r.finishedMaps[r.afterLastFinished-1].lastBlock; !matchViews(r.f.indexedView, r.f.targetView, lastBlock) {
return filterMapsRange{}, errChainUpdate
}
}
return newRange, nil
}
// addRenderedRange adds the range [firstRendered, afterLastRendered) and
// removes [afterLastRendered, afterLastRemoved) from the set of rendered maps.
func (fmr *filterMapsRange) addRenderedRange(firstRendered, afterLastRendered, afterLastRemoved, mapsPerEpoch uint32) error {
if !fmr.initialized {
return errors.New("log index not initialized")
}
// Here we create a slice of endpoints for the rendered sections. There are two endpoints
// for each section: the index of the first map, and the index after the last map in the
// section. We then iterate the endpoints -- adding d values -- to determine whether the
// sections are contiguous or whether they have a gap.
type endpoint struct {
m uint32
d int
}
endpoints := []endpoint{{fmr.firstRenderedMap, 1}, {fmr.afterLastRenderedMap, -1}, {firstRendered, 1}, {afterLastRendered, -101}, {afterLastRemoved, 100}}
if fmr.tailPartialEpoch > 0 {
endpoints = append(endpoints, []endpoint{{fmr.firstRenderedMap - mapsPerEpoch, 1}, {fmr.firstRenderedMap - mapsPerEpoch + fmr.tailPartialEpoch, -1}}...)
}
sort.Slice(endpoints, func(i, j int) bool { return endpoints[i].m < endpoints[j].m })
var (
sum int
merged []uint32
last bool
)
for i, e := range endpoints {
sum += e.d
if i < len(endpoints)-1 && endpoints[i+1].m == e.m {
continue
}
if (sum > 0) != last {
merged = append(merged, e.m)
last = !last
}
}
switch len(merged) {
case 0:
// Initialized database, but no finished maps yet.
fmr.tailPartialEpoch = 0
fmr.firstRenderedMap = firstRendered
fmr.afterLastRenderedMap = firstRendered
case 2:
// One rendered section (no partial tail epoch).
fmr.tailPartialEpoch = 0
fmr.firstRenderedMap = merged[0]
fmr.afterLastRenderedMap = merged[1]
case 4:
// Two rendered sections (with a gap).
// First section (merged[0]-merged[1]) is for the partial tail epoch,
// and it has to start exactly one epoch before the main section.
if merged[2] != merged[0]+mapsPerEpoch {
return fmt.Errorf("invalid tail partial epoch: %v", merged)
}
fmr.tailPartialEpoch = merged[1] - merged[0]
fmr.firstRenderedMap = merged[2]
fmr.afterLastRenderedMap = merged[3]
default:
return fmt.Errorf("invalid number of rendered sections: %v", merged)
}
return nil
}
// logIterator iterates on the linear log value index range.
type logIterator struct {
chainView *ChainView
blockNumber uint64
receipts types.Receipts
blockStart, delimiter, finished bool
txIndex, logIndex, topicIndex int
lvIndex uint64
}
var errUnindexedRange = errors.New("unindexed range")
// newLogIteratorFromBlockDelimiter creates a logIterator starting at the
// given block's first log value entry (the block delimiter), according to the
// current targetView.
func (f *FilterMaps) newLogIteratorFromBlockDelimiter(blockNumber uint64) (*logIterator, error) {
if blockNumber > f.targetView.headNumber {
return nil, fmt.Errorf("iterator entry point %d after target chain head block %d", blockNumber, f.targetView.headNumber)
}
if blockNumber < f.indexedRange.firstIndexedBlock || blockNumber >= f.indexedRange.afterLastIndexedBlock {
return nil, errUnindexedRange
}
var lvIndex uint64
if f.indexedRange.headBlockIndexed && blockNumber+1 == f.indexedRange.afterLastIndexedBlock {
lvIndex = f.indexedRange.headBlockDelimiter
} else {
var err error
lvIndex, err = f.getBlockLvPointer(blockNumber + 1)
if err != nil {
return nil, fmt.Errorf("failed to retrieve log value pointer of block %d after delimiter: %v", blockNumber+1, err)
}
lvIndex--
}
finished := blockNumber == f.targetView.headNumber
return &logIterator{
chainView: f.targetView,
blockNumber: blockNumber,
finished: finished,
delimiter: !finished,
lvIndex: lvIndex,
}, nil
}
// newLogIteratorFromMapBoundary creates a logIterator starting at the given
// map boundary, according to the current targetView.
func (f *FilterMaps) newLogIteratorFromMapBoundary(mapIndex uint32, startBlock, startLvPtr uint64) (*logIterator, error) {
if startBlock > f.targetView.headNumber {
return nil, fmt.Errorf("iterator entry point %d after target chain head block %d", startBlock, f.targetView.headNumber)
}
// get block receipts
receipts := f.targetView.getReceipts(startBlock)
if receipts == nil {
return nil, fmt.Errorf("receipts not found for start block %d", startBlock)
}
// initialize iterator at block start
l := &logIterator{
chainView: f.targetView,
blockNumber: startBlock,
receipts: receipts,
blockStart: true,
lvIndex: startLvPtr,
}
l.nextValid()
targetIndex := uint64(mapIndex) << f.logValuesPerMap
if l.lvIndex > targetIndex {
return nil, fmt.Errorf("log value pointer %d of last block of map is after map boundary %d", l.lvIndex, targetIndex)
}
// iterate to map boundary
for l.lvIndex < targetIndex {
if l.finished {
return nil, fmt.Errorf("iterator already finished at %d before map boundary target %d", l.lvIndex, targetIndex)
}
if err := l.next(); err != nil {
return nil, fmt.Errorf("failed to advance log iterator at %d before map boundary target %d: %v", l.lvIndex, targetIndex, err)
}
}
return l, nil
}
// updateChainView updates the iterator's chain view if it still matches the
// previous view at the current position. Returns true if successful.
func (l *logIterator) updateChainView(cv *ChainView) bool {
if !matchViews(cv, l.chainView, l.blockNumber) {
return false
}
l.chainView = cv
return true
}
// getValueHash returns the log value hash at the current position.
func (l *logIterator) getValueHash() common.Hash {
if l.delimiter || l.finished {
return common.Hash{}
}
log := l.receipts[l.txIndex].Logs[l.logIndex]
if l.topicIndex == 0 {
return addressValue(log.Address)
}
return topicValue(log.Topics[l.topicIndex-1])
}
// next moves the iterator to the next log value index.
func (l *logIterator) next() error {
if l.finished {
return nil
}
if l.delimiter {
l.delimiter = false
l.blockNumber++
l.receipts = l.chainView.getReceipts(l.blockNumber)
if l.receipts == nil {
return fmt.Errorf("receipts not found for block %d", l.blockNumber)
}
l.txIndex, l.logIndex, l.topicIndex, l.blockStart = 0, 0, 0, true
} else {
l.topicIndex++
l.blockStart = false
}
l.lvIndex++
l.nextValid()
return nil
}
// nextValid updates the internal transaction, log and topic index pointers
// to the next existing log value of the given block if necessary.
// Note that nextValid does not advance the log value index pointer.
func (l *logIterator) nextValid() {
for ; l.txIndex < len(l.receipts); l.txIndex++ {
receipt := l.receipts[l.txIndex]
for ; l.logIndex < len(receipt.Logs); l.logIndex++ {
log := receipt.Logs[l.logIndex]
if l.topicIndex <= len(log.Topics) {
return
}
l.topicIndex = 0
}
l.logIndex = 0
}
if l.blockNumber == l.chainView.headNumber {
l.finished = true
} else {
l.delimiter = true
}
}

934
core/filtermaps/matcher.go Normal file
View file

@ -0,0 +1,934 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"context"
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
const doRuntimeStats = false
// ErrMatchAll is returned when the specified filter matches everything.
// Handling this case in filtermaps would require an extra special case and
// would actually be slower than reverting to legacy filter.
var ErrMatchAll = errors.New("match all patterns not supported")
// MatcherBackend defines the functions required for searching in the log index
// data structure. It is currently implemented by FilterMapsMatcherBackend but
// once EIP-7745 is implemented and active, these functions can also be trustlessly
// served by a remote prover.
type MatcherBackend interface {
GetParams() *Params
GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error)
GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error)
GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error)
SyncLogIndex(ctx context.Context) (SyncRange, error)
Close()
}
// SyncRange is returned by MatcherBackend.SyncLogIndex. It contains the latest
// chain head, the indexed range that is currently consistent with the chain
// and the valid range that has not been changed and has been consistent with
// all states of the chain since the previous SyncLogIndex or the creation of
// the matcher backend.
type SyncRange struct {
HeadNumber uint64
// block range where the index has not changed since the last matcher sync
// and therefore the set of matches found in this region is guaranteed to
// be valid and complete.
Valid bool
FirstValid, LastValid uint64
// block range indexed according to the given chain head.
Indexed bool
FirstIndexed, LastIndexed uint64
}
// GetPotentialMatches returns a list of logs that are potential matches for the
// given filter criteria. If parts of the log index in the searched range are
// missing or changed during the search process then the resulting logs belonging
// to that block range might be missing or incorrect.
// Also note that the returned list may contain false positives.
func GetPotentialMatches(ctx context.Context, backend MatcherBackend, firstBlock, lastBlock uint64, addresses []common.Address, topics [][]common.Hash) ([]*types.Log, error) {
params := backend.GetParams()
var getLogStats runtimeStats
// find the log value index range to search
firstIndex, err := backend.GetBlockLvPointer(ctx, firstBlock)
if err != nil {
return nil, fmt.Errorf("failed to retrieve log value pointer for first block %d: %v", firstBlock, err)
}
lastIndex, err := backend.GetBlockLvPointer(ctx, lastBlock+1)
if err != nil {
return nil, fmt.Errorf("failed to retrieve log value pointer after last block %d: %v", lastBlock, err)
}
if lastIndex > 0 {
lastIndex--
}
firstMap, lastMap := uint32(firstIndex>>params.logValuesPerMap), uint32(lastIndex>>params.logValuesPerMap)
firstEpoch, lastEpoch := firstMap>>params.logMapsPerEpoch, lastMap>>params.logMapsPerEpoch
// build matcher according to the given filter criteria
matchers := make([]matcher, len(topics)+1)
// matchAddress signals a match when there is a match for any of the given
// addresses.
// If the list of addresses is empty then it creates a "wild card" matcher
// that signals every index as a potential match.
matchAddress := make(matchAny, len(addresses))
for i, address := range addresses {
matchAddress[i] = &singleMatcher{backend: backend, value: addressValue(address)}
}
matchers[0] = matchAddress
for i, topicList := range topics {
// matchTopic signals a match when there is a match for any of the topics
// specified for the given position (topicList).
// If topicList is empty then it creates a "wild card" matcher that signals
// every index as a potential match.
matchTopic := make(matchAny, len(topicList))
for j, topic := range topicList {
matchTopic[j] = &singleMatcher{backend: backend, value: topicValue(topic)}
}
matchers[i+1] = matchTopic
}
// matcher is the final sequence matcher that signals a match when all underlying
// matchers signal a match for consecutive log value indices.
matcher := newMatchSequence(params, matchers)
// processEpoch returns the potentially matching logs from the given epoch.
processEpoch := func(epochIndex uint32) ([]*types.Log, error) {
var logs []*types.Log
// create a list of map indices to process
fm, lm := epochIndex<<params.logMapsPerEpoch, (epochIndex+1)<<params.logMapsPerEpoch-1
if fm < firstMap {
fm = firstMap
}
if lm > lastMap {
lm = lastMap
}
//
mapIndices := make([]uint32, lm+1-fm)
for i := range mapIndices {
mapIndices[i] = fm + uint32(i)
}
// find potential matches
matches, err := getAllMatches(ctx, matcher, mapIndices)
if err != nil {
return logs, err
}
// get the actual logs located at the matching log value indices
var st int
getLogStats.setState(&st, stGetLog)
defer getLogStats.setState(&st, stNone)
for _, m := range matches {
if m == nil {
return nil, ErrMatchAll
}
mlogs, err := getLogsFromMatches(ctx, backend, firstIndex, lastIndex, m)
if err != nil {
return logs, err
}
logs = append(logs, mlogs...)
}
getLogStats.addAmount(st, int64(len(logs)))
return logs, nil
}
type task struct {
epochIndex uint32
logs []*types.Log
err error
done chan struct{}
}
taskCh := make(chan *task)
var wg sync.WaitGroup
defer func() {
close(taskCh)
wg.Wait()
}()
worker := func() {
for task := range taskCh {
if task == nil {
break
}
task.logs, task.err = processEpoch(task.epochIndex)
close(task.done)
}
wg.Done()
}
start := time.Now()
for i := 0; i < 4; i++ {
wg.Add(1)
go worker()
}
var logs []*types.Log
// startEpoch is the next task to send whenever a worker can accept it.
// waitEpoch is the next task we are waiting for to finish in order to append
// results in the correct order.
startEpoch, waitEpoch := firstEpoch, firstEpoch
tasks := make(map[uint32]*task)
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
for waitEpoch <= lastEpoch {
select {
case taskCh <- tasks[startEpoch]:
startEpoch++
if startEpoch <= lastEpoch {
if tasks[startEpoch] == nil {
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})}
}
}
case <-tasks[waitEpoch].done:
logs = append(logs, tasks[waitEpoch].logs...)
if err := tasks[waitEpoch].err; err != nil {
if err == ErrMatchAll {
return logs, err
}
return logs, fmt.Errorf("failed to process log index epoch %d: %v", waitEpoch, err)
}
delete(tasks, waitEpoch)
waitEpoch++
if waitEpoch <= lastEpoch {
if tasks[waitEpoch] == nil {
tasks[waitEpoch] = &task{epochIndex: waitEpoch, done: make(chan struct{})}
}
}
}
}
if doRuntimeStats {
log.Info("Log search finished", "elapsed", time.Since(start))
for i, ma := range matchers {
for j, m := range ma.(matchAny) {
log.Info("Single matcher stats", "matchSequence", i, "matchAny", j)
m.(*singleMatcher).stats.print()
}
}
log.Info("Get log stats")
getLogStats.print()
}
return logs, nil
}
// getLogsFromMatches returns the list of potentially matching logs located at
// the given list of matching log indices. Matches outside the firstIndex to
// lastIndex range are not returned.
func getLogsFromMatches(ctx context.Context, backend MatcherBackend, firstIndex, lastIndex uint64, matches potentialMatches) ([]*types.Log, error) {
var logs []*types.Log
for _, match := range matches {
if match < firstIndex || match > lastIndex {
continue
}
log, err := backend.GetLogByLvIndex(ctx, match)
if err != nil {
return logs, fmt.Errorf("failed to retrieve log at index %d: %v", match, err)
}
if log != nil {
logs = append(logs, log)
}
}
return logs, nil
}
// matcher defines a general abstraction for any matcher configuration that
// can instantiate a matcherInstance.
type matcher interface {
newInstance(mapIndices []uint32) matcherInstance
}
// matcherInstance defines a general abstraction for a matcher configuration
// working on a specific set of map indices and eventually returning a list of
// potentially matching log value indices.
// Note that processing happens per mapping layer, each call returning a set
// of results for the maps where the processing has been finished at the given
// layer. Map indices can also be dropped before a result is returned for them
// in case the result is no longer interesting. Dropping indices twice or after
// a result has been returned has no effect. Exactly one matcherResult is
// returned per requested map index unless dropped.
type matcherInstance interface {
getMatchesForLayer(ctx context.Context, layerIndex uint32) ([]matcherResult, error)
dropIndices(mapIndices []uint32)
}
// matcherResult contains the list of potentially matching log value indices
// for a given map index.
type matcherResult struct {
mapIndex uint32
matches potentialMatches
}
// getAllMatches creates an instance for a given matcher and set of map indices,
// iterates through mapping layers and collects all results, then returns all
// results in the same order as the map indices were specified.
func getAllMatches(ctx context.Context, matcher matcher, mapIndices []uint32) ([]potentialMatches, error) {
instance := matcher.newInstance(mapIndices)
resultsMap := make(map[uint32]potentialMatches)
for layerIndex := uint32(0); len(resultsMap) < len(mapIndices); layerIndex++ {
results, err := instance.getMatchesForLayer(ctx, layerIndex)
if err != nil {
return nil, err
}
for _, result := range results {
resultsMap[result.mapIndex] = result.matches
}
}
matches := make([]potentialMatches, len(mapIndices))
for i, mapIndex := range mapIndices {
matches[i] = resultsMap[mapIndex]
}
return matches, nil
}
// singleMatcher implements matcher by returning matches for a single log value hash.
type singleMatcher struct {
backend MatcherBackend
value common.Hash
stats runtimeStats
}
// singleMatcherInstance is an instance of singleMatcher.
type singleMatcherInstance struct {
*singleMatcher
mapIndices []uint32
filterRows map[uint32][]FilterRow
}
// newInstance creates a new instance of singleMatcher.
func (m *singleMatcher) newInstance(mapIndices []uint32) matcherInstance {
filterRows := make(map[uint32][]FilterRow)
for _, idx := range mapIndices {
filterRows[idx] = []FilterRow{}
}
copiedIndices := make([]uint32, len(mapIndices))
copy(copiedIndices, mapIndices)
return &singleMatcherInstance{
singleMatcher: m,
mapIndices: copiedIndices,
filterRows: filterRows,
}
}
// getMatchesForLayer implements matcherInstance.
func (m *singleMatcherInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (results []matcherResult, err error) {
var st int
m.stats.setState(&st, stOther)
params := m.backend.GetParams()
maskedMapIndex, rowIndex := uint32(math.MaxUint32), uint32(0)
for _, mapIndex := range m.mapIndices {
filterRows, ok := m.filterRows[mapIndex]
if !ok {
continue
}
if mm := params.maskedMapIndex(mapIndex, layerIndex); mm != maskedMapIndex {
// only recalculate rowIndex when necessary
maskedMapIndex = mm
rowIndex = params.rowIndex(mapIndex, layerIndex, m.value)
}
if layerIndex == 0 {
m.stats.setState(&st, stFetchFirst)
} else {
m.stats.setState(&st, stFetchMore)
}
filterRow, err := m.backend.GetFilterMapRow(ctx, mapIndex, rowIndex, layerIndex == 0)
if err != nil {
m.stats.setState(&st, stNone)
return nil, fmt.Errorf("failed to retrieve filter map %d row %d: %v", mapIndex, rowIndex, err)
}
m.stats.addAmount(st, int64(len(filterRow)))
m.stats.setState(&st, stOther)
filterRows = append(filterRows, filterRow)
if uint32(len(filterRow)) < params.maxRowLength(layerIndex) {
m.stats.setState(&st, stProcess)
matches := params.potentialMatches(filterRows, mapIndex, m.value)
m.stats.addAmount(st, int64(len(matches)))
results = append(results, matcherResult{
mapIndex: mapIndex,
matches: matches,
})
m.stats.setState(&st, stOther)
delete(m.filterRows, mapIndex)
} else {
m.filterRows[mapIndex] = filterRows
}
}
m.cleanMapIndices()
m.stats.setState(&st, stNone)
return results, nil
}
// dropIndices implements matcherInstance.
func (m *singleMatcherInstance) dropIndices(dropIndices []uint32) {
for _, mapIndex := range dropIndices {
delete(m.filterRows, mapIndex)
}
m.cleanMapIndices()
}
// cleanMapIndices removes map indices from the list if there is no matching
// filterRows entry because a result has been returned or the index has been
// dropped.
func (m *singleMatcherInstance) cleanMapIndices() {
var j int
for i, mapIndex := range m.mapIndices {
if _, ok := m.filterRows[mapIndex]; ok {
if i != j {
m.mapIndices[j] = mapIndex
}
j++
}
}
m.mapIndices = m.mapIndices[:j]
}
// matchAny combinines a set of matchers and returns a match for every position
// where any of the underlying matchers signaled a match. A zero-length matchAny
// acts as a "wild card" that signals a potential match at every position.
type matchAny []matcher
// matchAnyInstance is an instance of matchAny.
type matchAnyInstance struct {
matchAny
childInstances []matcherInstance
childResults map[uint32]matchAnyResults
}
// matchAnyResults is used by matchAnyInstance to collect results from all
// child matchers for a specific map index. Once all results has been received
// a merged result is returned for the given map and this structure is discarded.
type matchAnyResults struct {
matches []potentialMatches
done []bool
needMore int
}
// newInstance creates a new instance of matchAny.
func (m matchAny) newInstance(mapIndices []uint32) matcherInstance {
if len(m) == 1 {
return m[0].newInstance(mapIndices)
}
childResults := make(map[uint32]matchAnyResults)
for _, idx := range mapIndices {
childResults[idx] = matchAnyResults{
matches: make([]potentialMatches, len(m)),
done: make([]bool, len(m)),
needMore: len(m),
}
}
childInstances := make([]matcherInstance, len(m))
for i, matcher := range m {
childInstances[i] = matcher.newInstance(mapIndices)
}
return &matchAnyInstance{
matchAny: m,
childInstances: childInstances,
childResults: childResults,
}
}
// getMatchesForLayer implements matcherInstance.
func (m *matchAnyInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (mergedResults []matcherResult, err error) {
if len(m.matchAny) == 0 {
// return "wild card" results (potentialMatches(nil) is interpreted as a
// potential match at every log value index of the map).
mergedResults = make([]matcherResult, len(m.childResults))
var i int
for mapIndex := range m.childResults {
mergedResults[i] = matcherResult{mapIndex: mapIndex, matches: nil}
i++
}
return mergedResults, nil
}
for i, childInstance := range m.childInstances {
results, err := childInstance.getMatchesForLayer(ctx, layerIndex)
if err != nil {
return nil, fmt.Errorf("failed to evaluate child matcher on layer %d: %v", layerIndex, err)
}
for _, result := range results {
mr, ok := m.childResults[result.mapIndex]
if !ok || mr.done[i] {
continue
}
mr.done[i] = true
mr.matches[i] = result.matches
mr.needMore--
if mr.needMore == 0 || result.matches == nil {
mergedResults = append(mergedResults, matcherResult{
mapIndex: result.mapIndex,
matches: mergeResults(mr.matches),
})
delete(m.childResults, result.mapIndex)
} else {
m.childResults[result.mapIndex] = mr
}
}
}
return mergedResults, nil
}
// dropIndices implements matcherInstance.
func (m *matchAnyInstance) dropIndices(dropIndices []uint32) {
for _, childInstance := range m.childInstances {
childInstance.dropIndices(dropIndices)
}
for _, mapIndex := range dropIndices {
delete(m.childResults, mapIndex)
}
}
// mergeResults merges multiple lists of matches into a single one, preserving
// ascending order and filtering out any duplicates.
func mergeResults(results []potentialMatches) potentialMatches {
if len(results) == 0 {
return nil
}
var sumLen int
for _, res := range results {
if res == nil {
// nil is a wild card; all indices in map range are potential matches
return nil
}
sumLen += len(res)
}
merged := make(potentialMatches, 0, sumLen)
for {
best := -1
for i, res := range results {
if len(res) == 0 {
continue
}
if best < 0 || res[0] < results[best][0] {
best = i
}
}
if best < 0 {
return merged
}
if len(merged) == 0 || results[best][0] > merged[len(merged)-1] {
merged = append(merged, results[best][0])
}
results[best] = results[best][1:]
}
}
// matchSequence combines two matchers, a "base" and a "next" matcher with a
// positive integer offset so that the resulting matcher signals a match at log
// value index X when the base matcher returns a match at X and the next matcher
// gives a match at X+offset. Note that matchSequence can be used recursively to
// detect any log value sequence.
type matchSequence struct {
params *Params
base, next matcher
offset uint64
statsLock sync.Mutex
baseStats, nextStats matchOrderStats
}
// newInstance creates a new instance of matchSequence.
func (m *matchSequence) newInstance(mapIndices []uint32) matcherInstance {
// determine set of indices to request from next matcher
nextIndices := make([]uint32, 0, len(mapIndices)*3/2)
needMatched := make(map[uint32]struct{})
baseRequested := make(map[uint32]struct{})
nextRequested := make(map[uint32]struct{})
for _, mapIndex := range mapIndices {
needMatched[mapIndex] = struct{}{}
baseRequested[mapIndex] = struct{}{}
if _, ok := nextRequested[mapIndex]; !ok {
nextIndices = append(nextIndices, mapIndex)
nextRequested[mapIndex] = struct{}{}
}
nextIndices = append(nextIndices, mapIndex+1)
nextRequested[mapIndex+1] = struct{}{}
}
return &matchSequenceInstance{
matchSequence: m,
baseInstance: m.base.newInstance(mapIndices),
nextInstance: m.next.newInstance(nextIndices),
needMatched: needMatched,
baseRequested: baseRequested,
nextRequested: nextRequested,
baseResults: make(map[uint32]potentialMatches),
nextResults: make(map[uint32]potentialMatches),
}
}
// matchOrderStats collects statistics about the evaluating cost and the
// occurrence of empty result sets from both base and next child matchers.
// This allows the optimization of the evaluation order by evaluating the
// child first that is cheaper and/or gives empty results more often and not
// evaluating the other child in most cases.
// Note that matchOrderStats is specific to matchSequence and the results are
// carried over to future instances as the results are mostly useful when
// evaluating layer zero of each instance. For this reason it should be used
// in a thread safe way as is may be accessed from multiple worker goroutines.
type matchOrderStats struct {
totalCount, nonEmptyCount, totalCost uint64
}
// add collects statistics after a child has been evaluated for a certain layer.
func (ms *matchOrderStats) add(empty bool, layerIndex uint32) {
if empty && layerIndex != 0 {
// matchers may be evaluated for higher layers after all results have
// been returned. Also, empty results are not relevant when previous
// layers yielded matches already, so these cases can be ignored.
return
}
ms.totalCount++
if !empty {
ms.nonEmptyCount++
}
ms.totalCost += uint64(layerIndex + 1)
}
// mergeStats merges two sets of matchOrderStats.
func (ms *matchOrderStats) mergeStats(add matchOrderStats) {
ms.totalCount += add.totalCount
ms.nonEmptyCount += add.nonEmptyCount
ms.totalCost += add.totalCost
}
// baseFirst returns true if the base child matcher should be evaluated first.
func (m *matchSequence) baseFirst() bool {
m.statsLock.Lock()
bf := float64(m.baseStats.totalCost)*float64(m.nextStats.totalCount)+
float64(m.baseStats.nonEmptyCount)*float64(m.nextStats.totalCost) <
float64(m.baseStats.totalCost)*float64(m.nextStats.nonEmptyCount)+
float64(m.nextStats.totalCost)*float64(m.baseStats.totalCount)
m.statsLock.Unlock()
return bf
}
// mergeBaseStats merges a set of matchOrderStats into the base matcher stats.
func (m *matchSequence) mergeBaseStats(stats matchOrderStats) {
m.statsLock.Lock()
m.baseStats.mergeStats(stats)
m.statsLock.Unlock()
}
// mergeNextStats merges a set of matchOrderStats into the next matcher stats.
func (m *matchSequence) mergeNextStats(stats matchOrderStats) {
m.statsLock.Lock()
m.nextStats.mergeStats(stats)
m.statsLock.Unlock()
}
// newMatchSequence creates a recursive sequence matcher from a list of underlying
// matchers. The resulting matcher signals a match at log value index X when each
// underlying matcher matchers[i] returns a match at X+i.
func newMatchSequence(params *Params, matchers []matcher) matcher {
if len(matchers) == 0 {
panic("zero length sequence matchers are not allowed")
}
if len(matchers) == 1 {
return matchers[0]
}
return &matchSequence{
params: params,
base: newMatchSequence(params, matchers[:len(matchers)-1]),
next: matchers[len(matchers)-1],
offset: uint64(len(matchers) - 1),
}
}
// matchSequenceInstance is an instance of matchSequence.
type matchSequenceInstance struct {
*matchSequence
baseInstance, nextInstance matcherInstance
baseRequested, nextRequested, needMatched map[uint32]struct{}
baseResults, nextResults map[uint32]potentialMatches
}
// getMatchesForLayer implements matcherInstance.
func (m *matchSequenceInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (matchedResults []matcherResult, err error) {
// decide whether to evaluate base or next matcher first
baseFirst := m.baseFirst()
if baseFirst {
if err := m.evalBase(ctx, layerIndex); err != nil {
return nil, err
}
}
if err := m.evalNext(ctx, layerIndex); err != nil {
return nil, err
}
if !baseFirst {
if err := m.evalBase(ctx, layerIndex); err != nil {
return nil, err
}
}
// evaluate and return matched results where possible
for mapIndex := range m.needMatched {
if _, ok := m.baseRequested[mapIndex]; ok {
continue
}
if _, ok := m.nextRequested[mapIndex]; ok {
continue
}
if _, ok := m.nextRequested[mapIndex+1]; ok {
continue
}
matchedResults = append(matchedResults, matcherResult{
mapIndex: mapIndex,
matches: m.params.matchResults(mapIndex, m.offset, m.baseResults[mapIndex], m.nextResults[mapIndex], m.nextResults[mapIndex+1]),
})
delete(m.needMatched, mapIndex)
}
return matchedResults, nil
}
// dropIndices implements matcherInstance.
func (m *matchSequenceInstance) dropIndices(dropIndices []uint32) {
for _, mapIndex := range dropIndices {
delete(m.needMatched, mapIndex)
}
var dropBase, dropNext []uint32
for _, mapIndex := range dropIndices {
if m.dropBase(mapIndex) {
dropBase = append(dropBase, mapIndex)
}
}
m.baseInstance.dropIndices(dropBase)
for _, mapIndex := range dropIndices {
if m.dropNext(mapIndex) {
dropNext = append(dropNext, mapIndex)
}
if m.dropNext(mapIndex + 1) {
dropNext = append(dropNext, mapIndex+1)
}
}
m.nextInstance.dropIndices(dropNext)
}
// evalBase evaluates the base child matcher and drops map indices from the
// next matcher if possible.
func (m *matchSequenceInstance) evalBase(ctx context.Context, layerIndex uint32) error {
results, err := m.baseInstance.getMatchesForLayer(ctx, layerIndex)
if err != nil {
return fmt.Errorf("failed to evaluate base matcher on layer %d: %v", layerIndex, err)
}
var (
dropIndices []uint32
stats matchOrderStats
)
for _, r := range results {
m.baseResults[r.mapIndex] = r.matches
delete(m.baseRequested, r.mapIndex)
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex)
}
m.mergeBaseStats(stats)
for _, r := range results {
if m.dropNext(r.mapIndex) {
dropIndices = append(dropIndices, r.mapIndex)
}
if m.dropNext(r.mapIndex + 1) {
dropIndices = append(dropIndices, r.mapIndex+1)
}
}
if len(dropIndices) > 0 {
m.nextInstance.dropIndices(dropIndices)
}
return nil
}
// evalNext evaluates the next child matcher and drops map indices from the
// base matcher if possible.
func (m *matchSequenceInstance) evalNext(ctx context.Context, layerIndex uint32) error {
results, err := m.nextInstance.getMatchesForLayer(ctx, layerIndex)
if err != nil {
return fmt.Errorf("failed to evaluate next matcher on layer %d: %v", layerIndex, err)
}
var (
dropIndices []uint32
stats matchOrderStats
)
for _, r := range results {
m.nextResults[r.mapIndex] = r.matches
delete(m.nextRequested, r.mapIndex)
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex)
}
m.mergeNextStats(stats)
for _, r := range results {
if r.mapIndex > 0 && m.dropBase(r.mapIndex-1) {
dropIndices = append(dropIndices, r.mapIndex-1)
}
if m.dropBase(r.mapIndex) {
dropIndices = append(dropIndices, r.mapIndex)
}
}
if len(dropIndices) > 0 {
m.baseInstance.dropIndices(dropIndices)
}
return nil
}
// dropBase checks whether the given map index can be dropped from the base
// matcher based on the known results from the next matcher and removes it
// from the internal requested set and returns true if possible.
func (m *matchSequenceInstance) dropBase(mapIndex uint32) bool {
if _, ok := m.baseRequested[mapIndex]; !ok {
return false
}
if _, ok := m.needMatched[mapIndex]; ok {
if next := m.nextResults[mapIndex]; next == nil ||
(len(next) > 0 && next[len(next)-1] >= (uint64(mapIndex)<<m.params.logValuesPerMap)+m.offset) {
return false
}
if nextNext := m.nextResults[mapIndex+1]; nextNext == nil ||
(len(nextNext) > 0 && nextNext[0] < (uint64(mapIndex+1)<<m.params.logValuesPerMap)+m.offset) {
return false
}
}
delete(m.baseRequested, mapIndex)
return true
}
// dropNext checks whether the given map index can be dropped from the next
// matcher based on the known results from the base matcher and removes it
// from the internal requested set and returns true if possible.
func (m *matchSequenceInstance) dropNext(mapIndex uint32) bool {
if _, ok := m.nextRequested[mapIndex]; !ok {
return false
}
if _, ok := m.needMatched[mapIndex-1]; ok {
if prevBase := m.baseResults[mapIndex-1]; prevBase == nil ||
(len(prevBase) > 0 && prevBase[len(prevBase)-1]+m.offset >= (uint64(mapIndex)<<m.params.logValuesPerMap)) {
return false
}
}
if _, ok := m.needMatched[mapIndex]; ok {
if base := m.baseResults[mapIndex]; base == nil ||
(len(base) > 0 && base[0]+m.offset < (uint64(mapIndex+1)<<m.params.logValuesPerMap)) {
return false
}
}
delete(m.nextRequested, mapIndex)
return true
}
// matchResults returns a list of sequence matches for the given mapIndex and
// offset based on the base matcher's results at mapIndex and the next matcher's
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
// skipped and it can be substituted with an empty list if baseRes has no potential
// matches that could be sequence matched with anything that could be in nextNextRes.
func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes, nextNextRes potentialMatches) potentialMatches {
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) {
// if nextRes is a wild card or baseRes is empty then the sequence matcher
// result equals baseRes.
return baseRes
}
if len(nextRes) > 0 {
// discard items from nextRes whose corresponding base matcher results
// with the negative offset applied would be located at mapIndex-1.
start := 0
for start < len(nextRes) && nextRes[start] < uint64(mapIndex)<<params.logValuesPerMap+offset {
start++
}
nextRes = nextRes[start:]
}
if len(nextNextRes) > 0 {
// discard items from nextNextRes whose corresponding base matcher results
// with the negative offset applied would still be located at mapIndex+1.
stop := 0
for stop < len(nextNextRes) && nextNextRes[stop] < uint64(mapIndex+1)<<params.logValuesPerMap+offset {
stop++
}
nextNextRes = nextNextRes[:stop]
}
maxLen := len(nextRes) + len(nextNextRes)
if maxLen == 0 {
return nextRes
}
if len(baseRes) < maxLen {
maxLen = len(baseRes)
}
// iterate through baseRes, nextRes and nextNextRes and collect matching results.
matchedRes := make(potentialMatches, 0, maxLen)
for _, nextRes := range []potentialMatches{nextRes, nextNextRes} {
if baseRes != nil {
for len(nextRes) > 0 && len(baseRes) > 0 {
if nextRes[0] > baseRes[0]+offset {
baseRes = baseRes[1:]
} else if nextRes[0] < baseRes[0]+offset {
nextRes = nextRes[1:]
} else {
matchedRes = append(matchedRes, baseRes[0])
baseRes = baseRes[1:]
nextRes = nextRes[1:]
}
}
} else {
// baseRes is a wild card so just return next matcher results with
// negative offset.
for len(nextRes) > 0 {
matchedRes = append(matchedRes, nextRes[0]-offset)
nextRes = nextRes[1:]
}
}
}
return matchedRes
}
// runtimeStats collects processing time statistics while searching in the log
// index. Used only when the doRuntimeStats global flag is true.
type runtimeStats struct {
dt, cnt, amount [stCount]int64
}
const (
stNone = iota
stFetchFirst
stFetchMore
stProcess
stGetLog
stOther
stCount
)
var stNames = []string{"", "fetchFirst", "fetchMore", "process", "getLog", "other"}
// set sets the processing state to one of the pre-defined constants.
// Processing time spent in each state is measured separately.
func (ts *runtimeStats) setState(state *int, newState int) {
if !doRuntimeStats || newState == *state {
return
}
now := int64(mclock.Now())
atomic.AddInt64(&ts.dt[*state], now)
atomic.AddInt64(&ts.dt[newState], -now)
atomic.AddInt64(&ts.cnt[newState], 1)
*state = newState
}
func (ts *runtimeStats) addAmount(state int, amount int64) {
atomic.AddInt64(&ts.amount[state], amount)
}
// print prints the collected statistics.
func (ts *runtimeStats) print() {
for i := 1; i < stCount; i++ {
log.Info("Matcher stats", "name", stNames[i], "dt", time.Duration(ts.dt[i]), "count", ts.cnt[i], "amount", ts.amount[i])
}
}

View file

@ -0,0 +1,206 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"context"
"github.com/ethereum/go-ethereum/core/types"
)
// FilterMapsMatcherBackend implements MatcherBackend.
type FilterMapsMatcherBackend struct {
f *FilterMaps
// these fields should be accessed under f.matchersLock mutex.
valid bool
firstValid, lastValid uint64
syncCh chan SyncRange
}
// NewMatcherBackend returns a FilterMapsMatcherBackend after registering it in
// the active matcher set.
// Note that Close should always be called when the matcher is no longer used.
func (f *FilterMaps) NewMatcherBackend() *FilterMapsMatcherBackend {
f.indexLock.RLock()
f.matchersLock.Lock()
defer func() {
f.matchersLock.Unlock()
f.indexLock.RUnlock()
}()
fm := &FilterMapsMatcherBackend{
f: f,
valid: f.indexedRange.initialized && f.indexedRange.afterLastIndexedBlock > f.indexedRange.firstIndexedBlock,
firstValid: f.indexedRange.firstIndexedBlock,
lastValid: f.indexedRange.afterLastIndexedBlock - 1,
}
f.matchers[fm] = struct{}{}
return fm
}
// GetParams returns the filtermaps parameters.
// GetParams implements MatcherBackend.
func (fm *FilterMapsMatcherBackend) GetParams() *Params {
return &fm.f.Params
}
// Close removes the matcher from the set of active matchers and ensures that
// any SyncLogIndex calls are cancelled.
// Close implements MatcherBackend.
func (fm *FilterMapsMatcherBackend) Close() {
fm.f.matchersLock.Lock()
defer fm.f.matchersLock.Unlock()
delete(fm.f.matchers, fm)
}
// GetFilterMapRow returns the given row of the given map. If the row is empty
// then a non-nil zero length row is returned. If baseLayerOnly is true then
// only the first baseRowLength entries of the row are guaranteed to be
// returned.
// Note that the returned slices should not be modified, they should be copied
// on write.
// GetFilterMapRow implements MatcherBackend.
func (fm *FilterMapsMatcherBackend) GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) {
return fm.f.getFilterMapRow(mapIndex, rowIndex, baseLayerOnly)
}
// GetBlockLvPointer returns the starting log value index where the log values
// generated by the given block are located. If blockNumber is beyond the current
// head then the first unoccupied log value index is returned.
// GetBlockLvPointer implements MatcherBackend.
func (fm *FilterMapsMatcherBackend) GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error) {
fm.f.indexLock.RLock()
defer fm.f.indexLock.RUnlock()
return fm.f.getBlockLvPointer(blockNumber)
}
// GetLogByLvIndex returns the log at the given log value index.
// Note that this function assumes that the log index structure is consistent
// with the canonical chain at the point where the given log value index points.
// If this is not the case then an invalid result may be returned or certain
// logs might not be returned at all.
// No error is returned though because of an inconsistency between the chain and
// the log index. It is the caller's responsibility to verify this consistency
// using SyncLogIndex and re-process certain blocks if necessary.
// GetLogByLvIndex implements MatcherBackend.
func (fm *FilterMapsMatcherBackend) GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error) {
fm.f.indexLock.RLock()
defer fm.f.indexLock.RUnlock()
return fm.f.getLogByLvIndex(lvIndex)
}
// synced signals to the matcher that has triggered a synchronisation that it
// has been finished and the log index is consistent with the chain head passed
// as a parameter.
// Note that if the log index head was far behind the chain head then it might not
// be synced up to the given head in a single step. Still, the latest chain head
// should be passed as a parameter and the existing log index should be consistent
// with that chain.
func (fm *FilterMapsMatcherBackend) synced() {
fm.f.indexLock.RLock()
fm.f.matchersLock.Lock()
defer func() {
fm.f.matchersLock.Unlock()
fm.f.indexLock.RUnlock()
}()
var (
indexed bool
lastIndexed, subLastIndexed uint64
)
if !fm.f.indexedRange.headBlockIndexed {
subLastIndexed = 1
}
if fm.f.indexedRange.afterLastIndexedBlock-subLastIndexed > fm.f.indexedRange.firstIndexedBlock {
indexed, lastIndexed = true, fm.f.indexedRange.afterLastIndexedBlock-subLastIndexed-1
}
fm.syncCh <- SyncRange{
HeadNumber: fm.f.indexedView.headNumber,
Valid: fm.valid,
FirstValid: fm.firstValid,
LastValid: fm.lastValid,
Indexed: indexed,
FirstIndexed: fm.f.indexedRange.firstIndexedBlock,
LastIndexed: lastIndexed,
}
fm.valid = indexed
fm.firstValid = fm.f.indexedRange.firstIndexedBlock
fm.lastValid = lastIndexed
fm.syncCh = nil
}
// SyncLogIndex ensures that the log index is consistent with the current state
// of the chain and is synced up to the current head. It blocks until this state
// is achieved or the context is cancelled.
// If successful, it returns a SyncRange that contains the latest chain head,
// the indexed range that is currently consistent with the chain and the valid
// range that has not been changed and has been consistent with all states of the
// chain since the previous SyncLogIndex or the creation of the matcher backend.
func (fm *FilterMapsMatcherBackend) SyncLogIndex(ctx context.Context) (SyncRange, error) {
if fm.f.disabled {
return SyncRange{HeadNumber: fm.f.targetView.headNumber}, nil
}
syncCh := make(chan SyncRange, 1)
fm.f.matchersLock.Lock()
fm.syncCh = syncCh
fm.f.matchersLock.Unlock()
select {
case fm.f.matcherSyncCh <- fm:
case <-ctx.Done():
return SyncRange{}, ctx.Err()
}
select {
case vr := <-syncCh:
return vr, nil
case <-ctx.Done():
return SyncRange{}, ctx.Err()
}
}
// updateMatchersValidRange iterates through active matchers and limits their
// valid range with the current indexed range. This function should be called
// whenever a part of the log index has been removed, before adding new blocks
// to it.
// Note that this function assumes that the index read lock is being held.
func (f *FilterMaps) updateMatchersValidRange() {
f.matchersLock.Lock()
defer f.matchersLock.Unlock()
for fm := range f.matchers {
if !f.indexedRange.hasIndexedBlocks() {
fm.valid = false
}
if !fm.valid {
continue
}
if fm.firstValid < f.indexedRange.firstIndexedBlock {
fm.firstValid = f.indexedRange.firstIndexedBlock
}
if fm.lastValid >= f.indexedRange.afterLastIndexedBlock {
fm.lastValid = f.indexedRange.afterLastIndexedBlock - 1
}
if fm.firstValid > fm.lastValid {
fm.valid = false
}
}
}

View file

@ -0,0 +1,87 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"context"
crand "crypto/rand"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestMatcher(t *testing.T) {
ts := newTestSetup(t)
defer ts.close()
ts.chain.addBlocks(100, 10, 10, 4, true)
ts.setHistory(0, false)
ts.fm.WaitIdle()
for i := 0; i < 2000; i++ {
bhash := ts.chain.canonical[rand.Intn(len(ts.chain.canonical))]
receipts := ts.chain.receipts[bhash]
if len(receipts) == 0 {
continue
}
receipt := receipts[rand.Intn(len(receipts))]
if len(receipt.Logs) == 0 {
continue
}
log := receipt.Logs[rand.Intn(len(receipt.Logs))]
var ok bool
addresses := make([]common.Address, rand.Intn(3))
for i := range addresses {
crand.Read(addresses[i][:])
}
if len(addresses) > 0 {
addresses[rand.Intn(len(addresses))] = log.Address
ok = true
}
topics := make([][]common.Hash, rand.Intn(len(log.Topics)+1))
for j := range topics {
topics[j] = make([]common.Hash, rand.Intn(3))
for i := range topics[j] {
crand.Read(topics[j][i][:])
}
if len(topics[j]) > 0 {
topics[j][rand.Intn(len(topics[j]))] = log.Topics[j]
ok = true
}
}
if !ok {
continue // cannot search for match-all pattern
}
mb := ts.fm.NewMatcherBackend()
logs, err := GetPotentialMatches(context.Background(), mb, 0, 1000, addresses, topics)
mb.Close()
if err != nil {
t.Fatalf("Log search error: %v", err)
}
var found bool
for _, l := range logs {
if l == log {
found = true
break
}
}
if !found {
t.Fatalf("Log search did not return expected log (addresses: %v, topics: %v, expected log: %v)", addresses, topics, *log)
}
}
}

212
core/filtermaps/math.go Normal file
View file

@ -0,0 +1,212 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
"crypto/sha256"
"encoding/binary"
"hash/fnv"
"math"
"sort"
"github.com/ethereum/go-ethereum/common"
)
// Params defines the basic parameters of the log index structure.
type Params struct {
logMapHeight uint // log2(mapHeight)
logMapWidth uint // log2(mapWidth)
logMapsPerEpoch uint // log2(mapsPerEpoch)
logValuesPerMap uint // log2(logValuesPerMap)
baseRowLengthRatio uint // baseRowLength / average row length
logLayerDiff uint // maxRowLength log2 growth per layer
// derived fields
mapHeight uint32 // filter map height (number of rows)
mapsPerEpoch uint32 // number of maps in an epoch
baseRowLength uint32 // maximum number of log values per row on layer 0
valuesPerMap uint64 // number of log values marked on each filter map
// not affecting consensus
baseRowGroupLength uint32 // length of base row groups in local database
}
// DefaultParams is the set of parameters used on mainnet.
var DefaultParams = Params{
logMapHeight: 16,
logMapWidth: 24,
logMapsPerEpoch: 10,
logValuesPerMap: 16,
baseRowGroupLength: 32,
baseRowLengthRatio: 8,
logLayerDiff: 4,
}
// RangeTestParams puts one log value per epoch, ensuring block exact tail unindexing for testing
var RangeTestParams = Params{
logMapHeight: 4,
logMapWidth: 24,
logMapsPerEpoch: 0,
logValuesPerMap: 0,
baseRowGroupLength: 32,
baseRowLengthRatio: 16, // baseRowLength >= 1
logLayerDiff: 4,
}
// deriveFields calculates the derived fields of the parameter set.
func (p *Params) deriveFields() {
p.mapHeight = uint32(1) << p.logMapHeight
p.mapsPerEpoch = uint32(1) << p.logMapsPerEpoch
p.valuesPerMap = uint64(1) << p.logValuesPerMap
p.baseRowLength = uint32(p.valuesPerMap * uint64(p.baseRowLengthRatio) / uint64(p.mapHeight))
}
// addressValue returns the log value hash of a log emitting address.
func addressValue(address common.Address) common.Hash {
var result common.Hash
hasher := sha256.New()
hasher.Write(address[:])
hasher.Sum(result[:0])
return result
}
// topicValue returns the log value hash of a log topic.
func topicValue(topic common.Hash) common.Hash {
var result common.Hash
hasher := sha256.New()
hasher.Write(topic[:])
hasher.Sum(result[:0])
return result
}
// rowIndex returns the row index in which the given log value should be marked
// on the given map and mapping layer. Note that row assignments are re-shuffled
// with a different frequency on each mapping layer, allowing efficient disk
// access and Merkle proofs for long sections of short rows on lower order
// layers while avoiding putting too many heavy rows next to each other on
// higher order layers.
func (p *Params) rowIndex(mapIndex, layerIndex uint32, logValue common.Hash) uint32 {
hasher := sha256.New()
hasher.Write(logValue[:])
var indexEnc [8]byte
binary.LittleEndian.PutUint32(indexEnc[0:4], p.maskedMapIndex(mapIndex, layerIndex))
binary.LittleEndian.PutUint32(indexEnc[4:8], layerIndex)
hasher.Write(indexEnc[:])
var hash common.Hash
hasher.Sum(hash[:0])
return binary.LittleEndian.Uint32(hash[:4]) % p.mapHeight
}
// columnIndex returns the column index where the given log value at the given
// position should be marked.
func (p *Params) columnIndex(lvIndex uint64, logValue *common.Hash) uint32 {
var indexEnc [8]byte
binary.LittleEndian.PutUint64(indexEnc[:], lvIndex)
// Note: reusing the hasher brings practically no performance gain and would
// require passing it through the entire matcher logic because of multi-thread
// matching
hasher := fnv.New64a()
hasher.Write(indexEnc[:])
hasher.Write(logValue[:])
hash := hasher.Sum64()
hashBits := p.logMapWidth - p.logValuesPerMap
return uint32(lvIndex%p.valuesPerMap)<<hashBits + (uint32(hash>>(64-hashBits)) ^ uint32(hash)>>(32-hashBits))
}
// maxRowLength returns the maximum length filter rows are populated up to
// when using the given mapping layer. A log value can be marked on the map
// according to a given mapping layer if the row mapping on that layer points
// to a row that has not yet reached the maxRowLength belonging to that layer.
// This means that a row that is considered full on a given layer may still be
// extended further on a higher order layer.
// Each value is marked on the lowest order layer possible, assuming that marks
// are added in ascending log value index order.
// When searching for a log value one should consider all layers and process
// corresponding rows up until the first one where the row mapped to the given
// layer is not full.
func (p *Params) maxRowLength(layerIndex uint32) uint32 {
logLayerDiff := uint(layerIndex) * p.logLayerDiff
if logLayerDiff > p.logMapsPerEpoch {
logLayerDiff = p.logMapsPerEpoch
}
return p.baseRowLength << logLayerDiff
}
// maskedMapIndex returns the index used for row mapping calculation on the
// given layer. On layer zero the mapping changes once per epoch, then the
// frequency of re-mapping increases with every new layer until it reaches
// the frequency where it is different for every mapIndex.
func (p *Params) maskedMapIndex(mapIndex, layerIndex uint32) uint32 {
logLayerDiff := uint(layerIndex) * p.logLayerDiff
if logLayerDiff > p.logMapsPerEpoch {
logLayerDiff = p.logMapsPerEpoch
}
return mapIndex & (uint32(math.MaxUint32) << (p.logMapsPerEpoch - logLayerDiff))
}
// potentialMatches returns the list of log value indices potentially matching
// the given log value hash in the range of the filter map the row belongs to.
// Note that the list of indices is always sorted and potential duplicates are
// removed. Though the column indices are stored in the same order they were
// added and therefore the true matches are automatically reverse transformed
// in the right order, false positives can ruin this property. Since these can
// only be separated from true matches after the combined pattern matching of the
// outputs of individual log value matchers and this pattern matcher assumes a
// sorted and duplicate-free list of indices, we should ensure these properties
// here.
func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue common.Hash) potentialMatches {
results := make(potentialMatches, 0, 8)
mapFirst := uint64(mapIndex) << p.logValuesPerMap
for i, row := range rows {
rowLen, maxLen := len(row), int(p.maxRowLength(uint32(i)))
if rowLen > maxLen {
rowLen = maxLen // any additional entries are generated by another log value on a higher mapping layer
}
for i := 0; i < rowLen; i++ {
if potentialMatch := mapFirst + uint64(row[i]>>(p.logMapWidth-p.logValuesPerMap)); row[i] == p.columnIndex(potentialMatch, &logValue) {
results = append(results, potentialMatch)
}
}
if rowLen < maxLen {
break
}
if i == len(rows)-1 {
panic("potentialMatches: insufficient list of row alternatives")
}
}
sort.Sort(results)
// remove duplicates
j := 0
for i, match := range results {
if i == 0 || match != results[i-1] {
results[j] = results[i]
j++
}
}
return results[:j]
}
// potentialMatches is a strictly monotonically increasing list of log value
// indices in the range of a filter map that are potential matches for certain
// filter criteria.
// potentialMatches implements sort.Interface.
// Note that nil is used as a wildcard and therefore means that all log value
// indices in the filter map range are potential matches. If there are no
// potential matches in the given map's range then an empty slice should be used.
type potentialMatches []uint64
func (p potentialMatches) Len() int { return len(p) }
func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] }
func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View file

@ -0,0 +1,149 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filtermaps
import (
crand "crypto/rand"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestSingleMatch(t *testing.T) {
params := DefaultParams
params.deriveFields()
for count := 0; count < 100000; count++ {
// generate a row with a single random entry
mapIndex := rand.Uint32()
lvIndex := uint64(mapIndex)<<params.logValuesPerMap + uint64(rand.Intn(int(params.valuesPerMap)))
var lvHash common.Hash
crand.Read(lvHash[:])
row := FilterRow{params.columnIndex(lvIndex, &lvHash)}
matches := params.potentialMatches([]FilterRow{row}, mapIndex, lvHash)
// check if it has been reverse transformed correctly
if len(matches) != 1 {
t.Fatalf("Invalid length of matches (got %d, expected 1)", len(matches))
}
if matches[0] != lvIndex {
if len(matches) != 1 {
t.Fatalf("Incorrect match returned (got %d, expected %d)", matches[0], lvIndex)
}
}
}
}
const (
testPmCount = 50
testPmLen = 1000
)
func TestPotentialMatches(t *testing.T) {
params := DefaultParams
params.deriveFields()
var falsePositives int
for count := 0; count < testPmCount; count++ {
mapIndex := rand.Uint32()
lvStart := uint64(mapIndex) << params.logValuesPerMap
var row FilterRow
lvIndices := make([]uint64, testPmLen)
lvHashes := make([]common.Hash, testPmLen+1)
for i := range lvIndices {
// add testPmLen single entries with different log value hashes at different indices
lvIndices[i] = lvStart + uint64(rand.Intn(int(params.valuesPerMap)))
crand.Read(lvHashes[i][:])
row = append(row, params.columnIndex(lvIndices[i], &lvHashes[i]))
}
// add the same log value hash at the first testPmLen log value indices of the map's range
crand.Read(lvHashes[testPmLen][:])
for lvIndex := lvStart; lvIndex < lvStart+testPmLen; lvIndex++ {
row = append(row, params.columnIndex(lvIndex, &lvHashes[testPmLen]))
}
// randomly duplicate some entries
for i := 0; i < testPmLen; i++ {
row = append(row, row[rand.Intn(len(row))])
}
// randomly mix up order of elements
for i := len(row) - 1; i > 0; i-- {
j := rand.Intn(i)
row[i], row[j] = row[j], row[i]
}
// split up into a list of rows if longer than allowed
var rows []FilterRow
for layerIndex := uint32(0); row != nil; layerIndex++ {
maxLen := int(params.maxRowLength(layerIndex))
if len(row) > maxLen {
rows = append(rows, row[:maxLen])
row = row[maxLen:]
} else {
rows = append(rows, row)
row = nil
}
}
// check retrieved matches while also counting false positives
for i, lvHash := range lvHashes {
matches := params.potentialMatches(rows, mapIndex, lvHash)
if i < testPmLen {
// check single entry match
if len(matches) < 1 {
t.Fatalf("Invalid length of matches (got %d, expected >=1)", len(matches))
}
var found bool
for _, lvi := range matches {
if lvi == lvIndices[i] {
found = true
} else {
falsePositives++
}
}
if !found {
t.Fatalf("Expected match not found (got %v, expected %d)", matches, lvIndices[i])
}
} else {
// check "long series" match
if len(matches) < testPmLen {
t.Fatalf("Invalid length of matches (got %d, expected >=%d)", len(matches), testPmLen)
}
// since results are ordered, first testPmLen entries should always match exactly
for j := 0; j < testPmLen; j++ {
if matches[j] != lvStart+uint64(j) {
t.Fatalf("Incorrect match at index %d (got %d, expected %d)", j, matches[j], lvStart+uint64(j))
}
}
// the rest are false positives
falsePositives += len(matches) - testPmLen
}
}
}
// Whenever looking for a certain log value hash, each entry in the row that
// was generated by another log value hash (a "foreign entry") has a
// valuesPerMap // 2^32 chance of yielding a false positive if the reverse
// transformed 32 bit integer is by random chance less than valuesPerMap and
// is therefore considered a potentially valid match.
// We have testPmLen unique hash entries and a testPmLen long series of entries
// for the same hash. For each of the testPmLen unique hash entries there are
// testPmLen*2-1 foreign entries while for the long series there are testPmLen
// foreign entries. This means that after performing all these filtering runs,
// we have processed 2*testPmLen^2 foreign entries, which given us an estimate
// of how many false positives to expect.
expFalse := int(uint64(testPmCount*testPmLen*testPmLen*2) * params.valuesPerMap >> params.logMapWidth)
if falsePositives < expFalse/2 || falsePositives > expFalse*3/2 {
t.Fatalf("False positive rate out of expected range (got %d, expected %d +-50%%)", falsePositives, expFalse)
}
}

View file

@ -18,6 +18,8 @@ package rawdb
import (
"bytes"
"encoding/binary"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -179,3 +181,306 @@ func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) {
log.Crit("Failed to delete bloom bits", "err", it.Error())
}
}
// ReadFilterMapRow retrieves a filter map row at the given mapRowIndex
// (see filtermaps.mapRowIndex for the storage index encoding).
// Note that zero length rows are not stored in the database and therefore all
// non-existent entries are interpreted as empty rows and return no error.
// Also note that the mapRowIndex indexing scheme is the same as the one
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
// same data proximity reasons it is also suitable for database representation.
// See also:
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
func ReadFilterMapExtRow(db ethdb.KeyValueReader, mapRowIndex uint64, bitLength uint) ([]uint32, error) {
byteLength := int(bitLength) / 8
if int(bitLength) != byteLength*8 {
panic("invalid bit length")
}
key := filterMapRowKey(mapRowIndex, false)
has, err := db.Has(key)
if err != nil {
return nil, err
}
if !has {
return nil, nil
}
encRow, err := db.Get(key)
if err != nil {
return nil, err
}
if len(encRow)%byteLength != 0 {
return nil, errors.New("Invalid encoded extended filter row length")
}
row := make([]uint32, len(encRow)/byteLength)
var b [4]byte
for i := range row {
copy(b[:byteLength], encRow[i*byteLength:(i+1)*byteLength])
row[i] = binary.LittleEndian.Uint32(b[:])
}
return row, nil
}
func ReadFilterMapBaseRows(db ethdb.KeyValueReader, mapRowIndex uint64, rowCount uint32, bitLength uint) ([][]uint32, error) {
byteLength := int(bitLength) / 8
if int(bitLength) != byteLength*8 {
panic("invalid bit length")
}
key := filterMapRowKey(mapRowIndex, true)
has, err := db.Has(key)
if err != nil {
return nil, err
}
rows := make([][]uint32, rowCount)
if !has {
return rows, nil
}
encRows, err := db.Get(key)
if err != nil {
return nil, err
}
encLen := len(encRows)
var (
entryCount, entriesInRow, rowIndex, headerLen, headerBits int
headerByte byte
)
for headerLen+byteLength*entryCount < encLen {
if headerBits == 0 {
headerByte = encRows[headerLen]
headerLen++
headerBits = 8
}
if headerByte&1 > 0 {
entriesInRow++
entryCount++
} else {
if entriesInRow > 0 {
rows[rowIndex] = make([]uint32, entriesInRow)
entriesInRow = 0
}
rowIndex++
}
headerByte >>= 1
headerBits--
}
if headerLen+byteLength*entryCount > encLen {
return nil, errors.New("Invalid encoded base filter rows length")
}
if entriesInRow > 0 {
rows[rowIndex] = make([]uint32, entriesInRow)
}
nextEntry := headerLen
for _, row := range rows {
for i := range row {
var b [4]byte
copy(b[:byteLength], encRows[nextEntry:nextEntry+byteLength])
row[i] = binary.LittleEndian.Uint32(b[:])
nextEntry += byteLength
}
}
return rows, nil
}
// WriteFilterMapRow stores a filter map row at the given mapRowIndex or deletes
// any existing entry if the row is empty.
func WriteFilterMapExtRow(db ethdb.KeyValueWriter, mapRowIndex uint64, row []uint32, bitLength uint) {
byteLength := int(bitLength) / 8
if int(bitLength) != byteLength*8 {
panic("invalid bit length")
}
var err error
if len(row) > 0 {
encRow := make([]byte, len(row)*byteLength)
for i, c := range row {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], c)
copy(encRow[i*byteLength:(i+1)*byteLength], b[:byteLength])
}
err = db.Put(filterMapRowKey(mapRowIndex, false), encRow)
} else {
err = db.Delete(filterMapRowKey(mapRowIndex, false))
}
if err != nil {
log.Crit("Failed to store extended filter map row", "err", err)
}
}
func WriteFilterMapBaseRows(db ethdb.KeyValueWriter, mapRowIndex uint64, rows [][]uint32, bitLength uint) {
byteLength := int(bitLength) / 8
if int(bitLength) != byteLength*8 {
panic("invalid bit length")
}
var entryCount, zeroBits int
for i, row := range rows {
if len(row) > 0 {
entryCount += len(row)
zeroBits = i
}
}
var err error
if entryCount > 0 {
headerLen := (zeroBits + entryCount + 7) / 8
encRows := make([]byte, headerLen+entryCount*byteLength)
nextEntry := headerLen
headerPtr, headerByte := 0, byte(1)
addHeaderBit := func(bit bool) {
if bit {
encRows[headerPtr] += headerByte
}
if headerByte += headerByte; headerByte == 0 {
headerPtr++
headerByte = 1
}
}
for _, row := range rows {
for _, entry := range row {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], entry)
copy(encRows[nextEntry:nextEntry+byteLength], b[:byteLength])
nextEntry += byteLength
addHeaderBit(true)
}
if zeroBits == 0 {
break
}
addHeaderBit(false)
zeroBits--
}
err = db.Put(filterMapRowKey(mapRowIndex, true), encRows)
} else {
err = db.Delete(filterMapRowKey(mapRowIndex, true))
}
if err != nil {
log.Crit("Failed to store base filter map rows", "err", err)
}
}
func DeleteFilterMapRows(db ethdb.KeyValueRangeDeleter, firstMapRowIndex, afterLastMapRowIndex uint64) {
if err := db.DeleteRange(filterMapRowKey(firstMapRowIndex, false), filterMapRowKey(afterLastMapRowIndex, false)); err != nil {
log.Crit("Failed to delete range of filter map rows", "err", err)
}
}
// ReadFilterMapLastBlock retrieves the number of the block that generated the
// last log value entry of the given map.
func ReadFilterMapLastBlock(db ethdb.KeyValueReader, mapIndex uint32) (uint64, common.Hash, error) {
enc, err := db.Get(filterMapLastBlockKey(mapIndex))
if err != nil {
return 0, common.Hash{}, err
}
if len(enc) != 40 {
return 0, common.Hash{}, errors.New("Invalid block number and id encoding")
}
var id common.Hash
copy(id[:], enc[8:])
return binary.BigEndian.Uint64(enc[:8]), id, nil
}
// WriteFilterMapLastBlock stores the number of the block that generated the
// last log value entry of the given map.
func WriteFilterMapLastBlock(db ethdb.KeyValueWriter, mapIndex uint32, blockNumber uint64, id common.Hash) {
var enc [40]byte
binary.BigEndian.PutUint64(enc[:8], blockNumber)
copy(enc[8:], id[:])
if err := db.Put(filterMapLastBlockKey(mapIndex), enc[:]); err != nil {
log.Crit("Failed to store filter map last block pointer", "err", err)
}
}
// DeleteFilterMapLastBlock deletes the number of the block that generated the
// last log value entry of the given map.
func DeleteFilterMapLastBlock(db ethdb.KeyValueWriter, mapIndex uint32) {
if err := db.Delete(filterMapLastBlockKey(mapIndex)); err != nil {
log.Crit("Failed to delete filter map last block pointer", "err", err)
}
}
func DeleteFilterMapLastBlocks(db ethdb.KeyValueRangeDeleter, firstMapIndex, afterLastMapIndex uint32) {
if err := db.DeleteRange(filterMapLastBlockKey(firstMapIndex), filterMapLastBlockKey(afterLastMapIndex)); err != nil {
log.Crit("Failed to delete range of filter map last block pointers", "err", err)
}
}
// ReadBlockLvPointer retrieves the starting log value index where the log values
// generated by the given block are located.
func ReadBlockLvPointer(db ethdb.KeyValueReader, blockNumber uint64) (uint64, error) {
encPtr, err := db.Get(filterMapBlockLVKey(blockNumber))
if err != nil {
return 0, err
}
if len(encPtr) != 8 {
return 0, errors.New("Invalid log value pointer encoding")
}
return binary.BigEndian.Uint64(encPtr), nil
}
// WriteBlockLvPointer stores the starting log value index where the log values
// generated by the given block are located.
func WriteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber, lvPointer uint64) {
var encPtr [8]byte
binary.BigEndian.PutUint64(encPtr[:], lvPointer)
if err := db.Put(filterMapBlockLVKey(blockNumber), encPtr[:]); err != nil {
log.Crit("Failed to store block log value pointer", "err", err)
}
}
// DeleteBlockLvPointer deletes the starting log value index where the log values
// generated by the given block are located.
func DeleteBlockLvPointer(db ethdb.KeyValueWriter, blockNumber uint64) {
if err := db.Delete(filterMapBlockLVKey(blockNumber)); err != nil {
log.Crit("Failed to delete block log value pointer", "err", err)
}
}
func DeleteBlockLvPointers(db ethdb.KeyValueRangeDeleter, firstBlockNumber, afterLastBlockNumber uint64) {
if err := db.DeleteRange(filterMapBlockLVKey(firstBlockNumber), filterMapBlockLVKey(afterLastBlockNumber)); err != nil {
log.Crit("Failed to delete range of block log value pointers", "err", err)
}
}
// FilterMapsRange is a storage representation of the block range covered by the
// filter maps structure and the corresponting log value index range.
type FilterMapsRange struct {
HeadBlockIndexed bool
HeadBlockDelimiter uint64
FirstIndexedBlock, AfterLastIndexedBlock uint64
FirstRenderedMap, AfterLastRenderedMap, TailPartialEpoch uint32
}
// ReadFilterMapsRange retrieves the filter maps range data. Note that if the
// database entry is not present, that is interpreted as a valid non-initialized
// state and returns a blank range structure and no error.
func ReadFilterMapsRange(db ethdb.KeyValueReader) (FilterMapsRange, bool, error) {
if has, err := db.Has(filterMapsRangeKey); !has || err != nil {
return FilterMapsRange{}, false, err
}
encRange, err := db.Get(filterMapsRangeKey)
if err != nil {
return FilterMapsRange{}, false, err
}
var fmRange FilterMapsRange
if err := rlp.DecodeBytes(encRange, &fmRange); err != nil {
return FilterMapsRange{}, false, err
}
return fmRange, true, err
}
// WriteFilterMapsRange stores the filter maps range data.
func WriteFilterMapsRange(db ethdb.KeyValueWriter, fmRange FilterMapsRange) {
encRange, err := rlp.EncodeToBytes(&fmRange)
if err != nil {
log.Crit("Failed to encode filter maps range", "err", err)
}
if err := db.Put(filterMapsRangeKey, encRange); err != nil {
log.Crit("Failed to store filter maps range", "err", err)
}
}
// DeleteFilterMapsRange deletes the filter maps range data which is interpreted
// as reverting to the un-initialized state.
func DeleteFilterMapsRange(db ethdb.KeyValueWriter) {
if err := db.Delete(filterMapsRangeKey); err != nil {
log.Crit("Failed to delete filter maps range", "err", err)
}
}

View file

@ -376,6 +376,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
storageSnaps stat
preimages stat
bloomBits stat
filterMaps stat
beaconHeaders stat
cliqueSnaps stat
@ -440,6 +441,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
bloomBits.Add(size)
case bytes.HasPrefix(key, []byte(FilterMapsPrefix)):
filterMaps.Add(size)
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
beaconHeaders.Add(size)
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
@ -505,6 +508,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
{"Key-Value store", "Log search index", filterMaps.Size(), filterMaps.Count()},
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},

View file

@ -145,6 +145,12 @@ var (
FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
FilterMapsPrefix = "fm-"
filterMapsRangeKey = []byte(FilterMapsPrefix + "R")
filterMapRowPrefix = []byte(FilterMapsPrefix + "r") // filterMapRowPrefix + mapRowIndex (uint64 big endian) -> filter row
filterMapLastBlockPrefix = []byte(FilterMapsPrefix + "b") // filterMapLastBlockPrefix + mapIndex (uint32 big endian) -> block number (uint64 big endian)
filterMapBlockLVPrefix = []byte(FilterMapsPrefix + "p") // filterMapBlockLVPrefix + num (uint64 big endian) -> log value pointer (uint64 big endian)
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil)
@ -342,3 +348,34 @@ func IsStorageTrieNode(key []byte) bool {
ok, _, _ := ResolveStorageTrieNode(key)
return ok
}
// filterMapRowKey = filterMapRowPrefix + mapRowIndex (uint64 big endian)
func filterMapRowKey(mapRowIndex uint64, base bool) []byte {
extLen := 8
if base {
extLen = 9
}
l := len(filterMapRowPrefix)
key := make([]byte, l+extLen)
copy(key[:l], filterMapRowPrefix)
binary.BigEndian.PutUint64(key[l:l+8], mapRowIndex)
return key
}
// filterMapLastBlockKey = filterMapLastBlockPrefix + mapIndex (uint32 big endian)
func filterMapLastBlockKey(mapIndex uint32) []byte {
l := len(filterMapLastBlockPrefix)
key := make([]byte, l+4)
copy(key[:l], filterMapLastBlockPrefix)
binary.BigEndian.PutUint32(key[l:], mapIndex)
return key
}
// filterMapBlockLVKey = filterMapBlockLVPrefix + num (uint64 big endian)
func filterMapBlockLVKey(number uint64) []byte {
l := len(filterMapBlockLVPrefix)
key := make([]byte, l+8)
copy(key[:l], filterMapBlockLVPrefix)
binary.BigEndian.PutUint64(key[l:], number)
return key
}