mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-03-04 10:25:04 +00:00
core/rawdb: improve database stats output (#31463)
Instead of reporting all filtermaps stuff in one line, I'm breaking it down into the three separate kinds of entries here. ``` +-----------------------+-----------------------------+------------+------------+ | DATABASE | CATEGORY | SIZE | ITEMS | +-----------------------+-----------------------------+------------+------------+ | Key-Value store | Log index filter-map rows | 59.21 GiB | 616077345 | | Key-Value store | Log index last-block-of-map | 12.35 MiB | 269755 | | Key-Value store | Log index block-lv | 421.70 MiB | 22109169 | ``` Also added some other changes to make it easier to debug: - restored bloombits into the inspect output, so we notice if it doesn't get deleted for some reason - tracking of unaccounted key examples
This commit is contained in:
parent
b0b2b76509
commit
fd4049dc1e
3 changed files with 94 additions and 54 deletions
|
|
@ -502,5 +502,5 @@ func DeleteBloomBitsDb(db ethdb.KeyValueRangeDeleter) error {
|
|||
if err := deletePrefixRange(db, bloomBitsPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
return deletePrefixRange(db, bloomBitsIndexPrefix)
|
||||
return deletePrefixRange(db, bloomBitsMetaPrefix)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,10 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -360,24 +362,27 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
logged = time.Now()
|
||||
|
||||
// Key-value store statistics
|
||||
headers stat
|
||||
bodies stat
|
||||
receipts stat
|
||||
tds stat
|
||||
numHashPairings stat
|
||||
hashNumPairings stat
|
||||
legacyTries stat
|
||||
stateLookups stat
|
||||
accountTries stat
|
||||
storageTries stat
|
||||
codes stat
|
||||
txLookups stat
|
||||
accountSnaps stat
|
||||
storageSnaps stat
|
||||
preimages stat
|
||||
filterMaps stat
|
||||
beaconHeaders stat
|
||||
cliqueSnaps stat
|
||||
headers stat
|
||||
bodies stat
|
||||
receipts stat
|
||||
tds stat
|
||||
numHashPairings stat
|
||||
hashNumPairings stat
|
||||
legacyTries stat
|
||||
stateLookups stat
|
||||
accountTries stat
|
||||
storageTries stat
|
||||
codes stat
|
||||
txLookups stat
|
||||
accountSnaps stat
|
||||
storageSnaps stat
|
||||
preimages stat
|
||||
beaconHeaders stat
|
||||
cliqueSnaps stat
|
||||
bloomBits stat
|
||||
filterMapRows stat
|
||||
filterMapLastBlock stat
|
||||
filterMapBlockLV stat
|
||||
|
||||
// Verkle statistics
|
||||
verkleTries stat
|
||||
|
|
@ -393,6 +398,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
|
||||
// Totals
|
||||
total common.StorageSize
|
||||
|
||||
// This map tracks example keys for unaccounted data.
|
||||
// For each unique two-byte prefix, the first unaccounted key encountered
|
||||
// by the iterator will be stored.
|
||||
unaccountedKeys = make(map[[2]byte][]byte)
|
||||
)
|
||||
// Inspect key-value database first.
|
||||
for it.Next() {
|
||||
|
|
@ -436,19 +446,33 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
metadata.Add(size)
|
||||
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
||||
metadata.Add(size)
|
||||
case bytes.HasPrefix(key, []byte(filterMapsPrefix)):
|
||||
filterMaps.Add(size)
|
||||
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
||||
beaconHeaders.Add(size)
|
||||
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
|
||||
cliqueSnaps.Add(size)
|
||||
case bytes.HasPrefix(key, ChtTablePrefix) ||
|
||||
bytes.HasPrefix(key, ChtIndexTablePrefix) ||
|
||||
bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
|
||||
|
||||
// new log index
|
||||
case bytes.HasPrefix(key, filterMapRowPrefix) && len(key) <= len(filterMapRowPrefix)+9:
|
||||
filterMapRows.Add(size)
|
||||
case bytes.HasPrefix(key, filterMapLastBlockPrefix) && len(key) == len(filterMapLastBlockPrefix)+4:
|
||||
filterMapLastBlock.Add(size)
|
||||
case bytes.HasPrefix(key, filterMapBlockLVPrefix) && len(key) == len(filterMapBlockLVPrefix)+8:
|
||||
filterMapBlockLV.Add(size)
|
||||
|
||||
// old log index (deprecated)
|
||||
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
||||
bloomBits.Add(size)
|
||||
case bytes.HasPrefix(key, bloomBitsMetaPrefix) && len(key) < len(bloomBitsMetaPrefix)+8:
|
||||
bloomBits.Add(size)
|
||||
|
||||
// LES indexes (deprecated)
|
||||
case bytes.HasPrefix(key, chtTablePrefix) ||
|
||||
bytes.HasPrefix(key, chtIndexTablePrefix) ||
|
||||
bytes.HasPrefix(key, chtPrefix): // Canonical hash trie
|
||||
chtTrieNodes.Add(size)
|
||||
case bytes.HasPrefix(key, BloomTrieTablePrefix) ||
|
||||
bytes.HasPrefix(key, BloomTrieIndexPrefix) ||
|
||||
bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
|
||||
case bytes.HasPrefix(key, bloomTrieTablePrefix) ||
|
||||
bytes.HasPrefix(key, bloomTrieIndexPrefix) ||
|
||||
bytes.HasPrefix(key, bloomTriePrefix): // Bloomtrie sub
|
||||
bloomTrieNodes.Add(size)
|
||||
|
||||
// Verkle trie data is detected, determine the sub-category
|
||||
|
|
@ -468,24 +492,19 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
default:
|
||||
unaccounted.Add(size)
|
||||
}
|
||||
|
||||
// Metadata keys
|
||||
case slices.ContainsFunc(knownMetadataKeys, func(x []byte) bool { return bytes.Equal(x, key) }):
|
||||
metadata.Add(size)
|
||||
|
||||
default:
|
||||
var accounted bool
|
||||
for _, meta := range [][]byte{
|
||||
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
|
||||
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
||||
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
||||
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
|
||||
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
|
||||
} {
|
||||
if bytes.Equal(key, meta) {
|
||||
metadata.Add(size)
|
||||
accounted = true
|
||||
break
|
||||
unaccounted.Add(size)
|
||||
if len(key) >= 2 {
|
||||
prefix := [2]byte(key[:2])
|
||||
if _, ok := unaccountedKeys[prefix]; !ok {
|
||||
unaccountedKeys[prefix] = bytes.Clone(key)
|
||||
}
|
||||
}
|
||||
if !accounted {
|
||||
unaccounted.Add(size)
|
||||
}
|
||||
}
|
||||
count++
|
||||
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||||
|
|
@ -502,7 +521,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
|
||||
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
|
||||
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
|
||||
{"Key-Value store", "Log search index", filterMaps.Size(), filterMaps.Count()},
|
||||
{"Key-Value store", "Log index filter-map rows", filterMapRows.Size(), filterMapRows.Count()},
|
||||
{"Key-Value store", "Log index last-block-of-map", filterMapLastBlock.Size(), filterMapLastBlock.Count()},
|
||||
{"Key-Value store", "Log index block-lv", filterMapBlockLV.Size(), filterMapBlockLV.Count()},
|
||||
{"Key-Value store", "Log bloombits (deprecated)", bloomBits.Size(), bloomBits.Count()},
|
||||
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
|
||||
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
|
||||
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
|
||||
|
|
@ -543,10 +565,23 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||
|
||||
if unaccounted.size > 0 {
|
||||
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
|
||||
for _, e := range slices.SortedFunc(maps.Values(unaccountedKeys), bytes.Compare) {
|
||||
log.Error(fmt.Sprintf(" example key: %x", e))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is the list of known 'metadata' keys stored in the databasse.
|
||||
var knownMetadataKeys = [][]byte{
|
||||
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
|
||||
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
||||
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
||||
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
|
||||
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
|
||||
filterMapsRangeKey,
|
||||
}
|
||||
|
||||
// printChainMetadata prints out chain metadata to stderr.
|
||||
func printChainMetadata(db ethdb.KeyValueStore) {
|
||||
fmt.Fprintf(os.Stderr, "Chain metadata\n")
|
||||
|
|
@ -566,6 +601,7 @@ func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
|
|||
}
|
||||
return fmt.Sprintf("%d (%#x)", *val, *val)
|
||||
}
|
||||
|
||||
data := [][]string{
|
||||
{"databaseVersion", pp(ReadDatabaseVersion(db))},
|
||||
{"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db))},
|
||||
|
|
@ -582,5 +618,8 @@ func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
|
|||
if b := ReadSkeletonSyncStatus(db); b != nil {
|
||||
data = append(data, []string{"SkeletonSyncStatus", string(b)})
|
||||
}
|
||||
if fmr, ok, _ := ReadFilterMapsRange(db); ok {
|
||||
data = append(data, []string{"filterMapsRange", fmt.Sprintf("%+v", fmr)})
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,29 +128,30 @@ var (
|
|||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
|
||||
|
||||
// bloomBitsIndexPrefix is the data table of a chain indexer to track its progress
|
||||
bloomBitsIndexPrefix = []byte("iB")
|
||||
|
||||
ChtPrefix = []byte("chtRootV2-") // ChtPrefix + chtNum (uint64 big endian) -> trie root hash
|
||||
ChtTablePrefix = []byte("cht-")
|
||||
ChtIndexTablePrefix = []byte("chtIndexV2-")
|
||||
|
||||
BloomTriePrefix = []byte("bltRoot-") // BloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
|
||||
BloomTrieTablePrefix = []byte("blt-")
|
||||
BloomTrieIndexPrefix = []byte("bltIndex-")
|
||||
|
||||
CliqueSnapshotPrefix = []byte("clique-")
|
||||
|
||||
BestUpdateKey = []byte("update-") // bigEndian64(syncPeriod) -> RLP(types.LightClientUpdate) (nextCommittee only referenced by root hash)
|
||||
FixedCommitteeRootKey = []byte("fixedRoot-") // bigEndian64(syncPeriod) -> committee root hash
|
||||
SyncCommitteeKey = []byte("committee-") // bigEndian64(syncPeriod) -> serialized committee
|
||||
|
||||
// new log index
|
||||
filterMapsPrefix = "fm-"
|
||||
filterMapsRangeKey = []byte(filterMapsPrefix + "R")
|
||||
filterMapRowPrefix = []byte(filterMapsPrefix + "r") // filterMapRowPrefix + mapRowIndex (uint64 big endian) -> filter row
|
||||
filterMapLastBlockPrefix = []byte(filterMapsPrefix + "b") // filterMapLastBlockPrefix + mapIndex (uint32 big endian) -> block number (uint64 big endian)
|
||||
filterMapBlockLVPrefix = []byte(filterMapsPrefix + "p") // filterMapBlockLVPrefix + num (uint64 big endian) -> log value pointer (uint64 big endian)
|
||||
|
||||
// old log index
|
||||
bloomBitsMetaPrefix = []byte("iB")
|
||||
|
||||
// LES indexes
|
||||
chtPrefix = []byte("chtRootV2-") // ChtPrefix + chtNum (uint64 big endian) -> trie root hash
|
||||
chtTablePrefix = []byte("cht-")
|
||||
chtIndexTablePrefix = []byte("chtIndexV2-")
|
||||
bloomTriePrefix = []byte("bltRoot-") // BloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
|
||||
bloomTrieTablePrefix = []byte("blt-")
|
||||
bloomTrieIndexPrefix = []byte("bltIndex-")
|
||||
|
||||
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
|
||||
preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
|
||||
preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil)
|
||||
|
|
|
|||
Loading…
Reference in a new issue